repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
ToontownUprising/src | toontown/safezone/TreasurePlannerAI.py | 4 | 4944 | from direct.distributed.ClockDelta import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.task import Task
from DistributedTreasureAI import DistributedTreasureAI
import random
class TreasurePlannerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('TreasurePlannerAI')
def __init__(self, zoneId, treasureType, callback = None):
self.zoneId = zoneId
self.treasureType = treasureType
self.callback = callback
self.initSpawnPoints()
self.treasures = []
for spawnPoint in self.spawnPoints:
self.treasures.append(None)
self.deleteTaskNames = set()
self.lastRequestId = None
self.requestStartTime = None
self.requestCount = None
return
def initSpawnPoints(self):
self.spawnPoints = []
return self.spawnPoints
def numTreasures(self):
counter = 0
for treasure in self.treasures:
if treasure:
counter += 1
return counter
def countEmptySpawnPoints(self):
counter = 0
for treasure in self.treasures:
if treasure == None:
counter += 1
return counter
def nthEmptyIndex(self, n):
emptyCounter = -1
spawnPointCounter = -1
while emptyCounter < n:
spawnPointCounter += 1
if self.treasures[spawnPointCounter] == None:
emptyCounter += 1
return spawnPointCounter
def findIndexOfTreasureId(self, treasureId):
counter = 0
for treasure in self.treasures:
if treasure == None:
pass
elif treasureId == treasure.getDoId():
return counter
counter += 1
return
def placeAllTreasures(self):
index = 0
for treasure in self.treasures:
if not treasure:
self.placeTreasure(index)
index += 1
def placeTreasure(self, index):
spawnPoint = self.spawnPoints[index]
treasure = DistributedTreasureAI(simbase.air, self, self.treasureType, spawnPoint[0], spawnPoint[1], spawnPoint[2])
treasure.generateWithRequired(self.zoneId)
self.treasures[index] = treasure
def validAvatar(self, treasure, av):
return treasure.validAvatar(av)
def grabAttempt(self, avId, treasureId):
if self.lastRequestId == avId:
self.requestCount += 1
now = globalClock.getFrameTime()
elapsed = now - self.requestStartTime
if elapsed > 10:
self.requestCount = 1
self.requestStartTime = now
else:
secondsPerGrab = elapsed / self.requestCount
if self.requestCount >= 3 and secondsPerGrab <= 0.4:
simbase.air.writeServerEvent('suspicious', avId, 'TreasurePlannerAI.grabAttempt %s treasures in %s seconds' % (self.requestCount, elapsed))
else:
self.lastRequestId = avId
self.requestCount = 1
self.requestStartTime = globalClock.getFrameTime()
index = self.findIndexOfTreasureId(treasureId)
if index == None:
pass
else:
av = simbase.air.doId2do.get(avId)
if av == None:
simbase.air.writeServerEvent('suspicious', avId, 'TreasurePlannerAI.grabAttempt unknown avatar')
self.notify.warning('avid: %s does not exist' % avId)
else:
treasure = self.treasures[index]
if self.validAvatar(treasure, av):
self.treasures[index] = None
if self.callback:
self.callback(avId)
treasure.d_setGrab(avId)
self.deleteTreasureSoon(treasure)
else:
treasure.d_setReject()
return
def deleteTreasureSoon(self, treasure):
taskName = treasure.uniqueName('deletingTreasure')
taskMgr.doMethodLater(5, self.__deleteTreasureNow, taskName, extraArgs=(treasure, taskName))
self.deleteTaskNames.add(taskName)
def deleteAllTreasuresNow(self):
for treasure in self.treasures:
if treasure:
treasure.requestDelete()
for taskName in self.deleteTaskNames:
tasks = taskMgr.getTasksNamed(taskName)
if len(tasks):
treasure = tasks[0].getArgs()[0]
treasure.requestDelete()
taskMgr.remove(taskName)
self.deleteTaskNames = set()
self.treasures = []
for spawnPoint in self.spawnPoints:
self.treasures.append(None)
return
def __deleteTreasureNow(self, treasure, taskName):
treasure.requestDelete()
self.deleteTaskNames.remove(taskName)
| mit |
KarrLab/obj_model | tests/fixtures/migrate/wc_lang_fixture/wc_lang/transform/split_reversible_reactions.py | 1 | 5346 | """ Transform models.
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2018-06-19
:Copyright: 2018, Karr Lab
:License: MIT
"""
from .core import Transform
from wc_lang import Model, Reaction, RateLawDirection
from wc_onto import onto
from wc_utils.util.ontology import are_terms_equivalent
import copy
import re
class SplitReversibleReactionsTransform(Transform):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions """
class Meta(object):
id = 'SplitReversibleReactions'
label = 'Split reversible reactions into separate forward and backward reactions'
def run(self, model):
""" Split reversible reactions in non-dFBA submodels into separate forward and backward reactions
Args:
model (:obj:`Model`): model definition
Returns:
:obj:`Model`: same model definition, but with reversible reactions split into separate forward and backward reactions
"""
for submodel in model.submodels:
if not are_terms_equivalent(submodel.framework, onto['WC:dynamic_flux_balance_analysis']):
for rxn in list(submodel.reactions):
if rxn.reversible:
# remove reversible reaction
model.reactions.remove(rxn)
submodel.reactions.remove(rxn)
# create separate forward and reverse reactions
rxn_for = submodel.reactions.create(
model=model,
id='{}_forward'.format(rxn.id),
name='{} (forward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn_bck = submodel.reactions.create(
model=model,
id='{}_backward'.format(rxn.id),
name='{} (backward)'.format(rxn.name),
reversible=False,
evidence=rxn.evidence,
conclusions=rxn.conclusions,
identifiers=rxn.identifiers,
comments=rxn.comments,
references=rxn.references,
)
rxn.evidence = []
rxn.conclusions = []
rxn.identifiers = []
rxn.references = []
# copy participants and negate for backward reaction
for part in rxn.participants:
rxn_for.participants.append(part)
part_back = part.species.species_coefficients.get_one(coefficient=-1 * part.coefficient)
if part_back:
rxn_bck.participants.append(part_back)
else:
rxn_bck.participants.create(species=part.species, coefficient=-1 * part.coefficient)
rxn.participants = []
# copy rate laws
law_for = rxn.rate_laws.get_one(direction=RateLawDirection.forward)
law_bck = rxn.rate_laws.get_one(direction=RateLawDirection.backward)
if law_for:
law_for.reaction = rxn_for
law_for.direction = RateLawDirection.forward
law_for.id = law_for.gen_id()
if law_bck:
law_bck.reaction = rxn_bck
law_bck.direction = RateLawDirection.forward
law_bck.id = law_bck.gen_id()
# copy dFBA objective: unreachable because only non-dFBA reactions are split
if rxn.dfba_obj_expression:
dfba_obj_expr = rxn.dfba_obj_expression # pragma: no cover
parsed_expr = dfba_obj_expr._parsed_expression # pragma: no cover
dfba_obj_expr.expression = parsed_expr.expression = re.sub(
r'\b' + rxn.id + r'\b',
'({} - {})'.format(rxn_for.id, rxn_bck.id),
dfba_obj_expr.expression) # pragma: no cover
parsed_expr._objs[Reaction].pop(rxn.id) # pragma: no cover
parsed_expr._objs[Reaction][rxn_for.id] = rxn_for # pragma: no cover
parsed_expr._objs[Reaction][rxn_bck.id] = rxn_bck # pragma: no cover
parsed_expr.tokenize() # pragma: no cover
rxn.dfba_obj_expression = None # pragma: no cover
rxn_for.dfba_obj_expression = dfba_obj_expr # pragma: no cover
rxn_bck.dfba_obj_expression = dfba_obj_expr # pragma: no cover
return model
| mit |
MadManRises/Madgine | shared/assimp/contrib/gtest/scripts/pump.py | 2471 | 23673 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsSingleLineComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirective(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirective(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsMultiLineIWYUPragma(line):
return re.search(r'/\* IWYU pragma: ', line)
def IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line) or
# Don't break IWYU pragmas, either; that causes iwyu.py problems.
re.search(r'// IWYU pragma: ', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsSingleLineComment(line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirective(output, line):
if IsHeaderGuardIncludeOrOneLineIWYUPragma(line):
# The style guide made an exception to allow long header guard lines,
# includes and IWYU pragmas.
output.append(line)
else:
WrapPreprocessorDirective(line, output)
elif IsMultiLineIWYUPragma(line):
output.append(line)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| mit |
guardicore/monkey | monkey/tests/unit_tests/infection_monkey/system_info/windows_cred_collector/test_pypykatz_handler.py | 1 | 5948 | from unittest import TestCase
from infection_monkey.system_info.windows_cred_collector.pypykatz_handler import (
_get_creds_from_pypykatz_session,
)
class TestPypykatzHandler(TestCase):
# Made up credentials, but structure of dict should be roughly the same
PYPYKATZ_SESSION = {
"authentication_id": 555555,
"session_id": 3,
"username": "Monkey",
"domainname": "ReAlDoMaIn",
"logon_server": "ReAlDoMaIn",
"logon_time": "2020-06-02T04:53:45.256562+00:00",
"sid": "S-1-6-25-260123139-3611579848-5589493929-3021",
"luid": 123086,
"msv_creds": [
{
"username": "monkey",
"domainname": "ReAlDoMaIn",
"NThash": b"1\xb7<Y\xd7\xe0\xc0\x89\xc01\xd6\xcf\xe0\xd1j\xe9",
"LMHash": None,
"SHAHash": b"\x18\x90\xaf\xd8\x07\t\xda9\xa3\xee^kK\r2U\xbf\xef\x95`",
}
],
"wdigest_creds": [
{
"credtype": "wdigest",
"username": "monkey",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme",
"luid": 123086,
}
],
"ssp_creds": [
{
"credtype": "wdigest",
"username": "monkey123",
"domainname": "ReAlDoMaIn",
"password": "canyoufindme123",
"luid": 123086,
}
],
"livessp_creds": [
{
"credtype": "wdigest",
"username": "monk3y",
"domainname": "ReAlDoMaIn",
"password": "canyoufindm3",
"luid": 123086,
}
],
"dpapi_creds": [
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b7294"
"7f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b729"
"47f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b72"
"947f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{
"credtype": "dpapi",
"key_guid": "9123-123ae123de4-121239-3123-421f",
"masterkey": "6e81d0cfd5e9ec083cfbdaf4d25b9cc9cc6b729"
"47f5e80920034d1275d8613532025975e"
"f051e891c30e6e9af6db54500fedfed1c968389bf6262c77fbaa68c9",
"sha1_masterkey": "bbdabc3cd2f6bcbe3e2cee6ce4ce4cebcef4c6da",
"luid": 123086,
},
{"credtype": "dpapi", "key_guid": "9123-123ae123de4-121239-3123-421f"},
],
"kerberos_creds": [
{
"credtype": "kerberos",
"username": "monkey_kerb",
"password": None,
"domainname": "ReAlDoMaIn",
"luid": 123086,
"tickets": [],
}
],
"credman_creds": [
{
"credtype": "credman",
"username": "monkey",
"domainname": "monkey.ad.monkey.com",
"password": "canyoufindme2",
"luid": 123086,
},
{
"credtype": "credman",
"username": "monkey@monkey.com",
"domainname": "moneky.monkey.com",
"password": "canyoufindme1",
"luid": 123086,
},
{
"credtype": "credman",
"username": "test",
"domainname": "test.test.ts",
"password": "canyoufindit",
"luid": 123086,
},
],
"tspkg_creds": [],
}
def test__get_creds_from_pypykatz_session(self):
results = _get_creds_from_pypykatz_session(TestPypykatzHandler.PYPYKATZ_SESSION)
test_dicts = [
{
"username": "monkey",
"ntlm_hash": "31b73c59d7e0c089c031d6cfe0d16ae9",
"password": "",
"lm_hash": "",
},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme", "lm_hash": ""},
{
"username": "monkey123",
"ntlm_hash": "",
"password": "canyoufindme123",
"lm_hash": "",
},
{"username": "monk3y", "ntlm_hash": "", "password": "canyoufindm3", "lm_hash": ""},
{"username": "monkey", "ntlm_hash": "", "password": "canyoufindme2", "lm_hash": ""},
{
"username": "monkey@monkey.com",
"ntlm_hash": "",
"password": "canyoufindme1",
"lm_hash": "",
},
{"username": "test", "ntlm_hash": "", "password": "canyoufindit", "lm_hash": ""},
]
results = [result.to_dict() for result in results]
[self.assertTrue(test_dict in results) for test_dict in test_dicts]
| gpl-3.0 |
blueskycoco/rt-thread | bsp/stm32/stm32l010-st-nucleo/rtconfig.py | 14 | 4031 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m0'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m0plus -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M0 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M0'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M0'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
| gpl-2.0 |
pelya/commandergenius | project/jni/python/src/Demo/tix/samples/Balloon.py | 37 | 2310 | # -*-mode: python; fill-column: 75; tab-width: 8; coding: iso-latin-1-unix -*-
#
# $Id: Balloon.py 36560 2004-07-18 06:16:08Z tim_one $
#
# Tix Demostration Program
#
# This sample program is structured in such a way so that it can be
# executed from the Tix demo program "tixwidgets.py": it must have a
# procedure called "RunSample". It should also have the "if" statment
# at the end of this file so that it can be run as a standalone
# program.
# This file demonstrates the use of the tixBalloon widget, which provides
# a interesting way to give help tips about elements in your user interface.
# Your can display the help message in a "balloon" and a status bar widget.
#
import Tix
TCL_ALL_EVENTS = 0
def RunSample (root):
balloon = DemoBalloon(root)
balloon.mainloop()
balloon.destroy()
class DemoBalloon:
def __init__(self, w):
self.root = w
self.exit = -1
z = w.winfo_toplevel()
z.wm_protocol("WM_DELETE_WINDOW", lambda self=self: self.quitcmd())
status = Tix.Label(w, width=40, relief=Tix.SUNKEN, bd=1)
status.pack(side=Tix.BOTTOM, fill=Tix.Y, padx=2, pady=1)
# Create two mysterious widgets that need balloon help
button1 = Tix.Button(w, text='Something Unexpected',
command=self.quitcmd)
button2 = Tix.Button(w, text='Something Else Unexpected')
button2['command'] = lambda w=button2: w.destroy()
button1.pack(side=Tix.TOP, expand=1)
button2.pack(side=Tix.TOP, expand=1)
# Create the balloon widget and associate it with the widgets that we want
# to provide tips for:
b = Tix.Balloon(w, statusbar=status)
b.bind_widget(button1, balloonmsg='Close Window',
statusmsg='Press this button to close this window')
b.bind_widget(button2, balloonmsg='Self-destruct button',
statusmsg='Press this button and it will destroy itself')
def quitcmd (self):
self.exit = 0
def mainloop(self):
foundEvent = 1
while self.exit < 0 and foundEvent > 0:
foundEvent = self.root.tk.dooneevent(TCL_ALL_EVENTS)
def destroy (self):
self.root.destroy()
if __name__ == '__main__':
root = Tix.Tk()
RunSample(root)
| lgpl-2.1 |
Anik1199/android_kernel_mediatek_sprout | tools/perf/tests/attr.py | 3174 | 9441 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
foss-transportationmodeling/rettina-server | flask/local/lib/python2.7/site-packages/sqlalchemy/testing/util.py | 21 | 5304 | # testing/util.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ..util import jython, pypy, defaultdict, decorator, py2k
import decimal
import gc
import time
import random
import sys
import types
if jython:
def jython_gc_collect(*args):
"""aggressive gc.collect for tests."""
gc.collect()
time.sleep(0.1)
gc.collect()
gc.collect()
return 0
# "lazy" gc, for VM's that don't GC on refcount == 0
gc_collect = lazy_gc = jython_gc_collect
elif pypy:
def pypy_gc_collect(*args):
gc.collect()
gc.collect()
gc_collect = lazy_gc = pypy_gc_collect
else:
# assume CPython - straight gc.collect, lazy_gc() is a pass
gc_collect = gc.collect
def lazy_gc():
pass
def picklers():
picklers = set()
if py2k:
try:
import cPickle
picklers.add(cPickle)
except ImportError:
pass
import pickle
picklers.add(pickle)
# yes, this thing needs this much testing
for pickle_ in picklers:
for protocol in -1, 0, 1, 2:
yield pickle_.loads, lambda d: pickle_.dumps(d, protocol)
def round_decimal(value, prec):
if isinstance(value, float):
return round(value, prec)
# can also use shift() here but that is 2.6 only
return (value * decimal.Decimal("1" + "0" * prec)
).to_integral(decimal.ROUND_FLOOR) / \
pow(10, prec)
class RandomSet(set):
def __iter__(self):
l = list(set.__iter__(self))
random.shuffle(l)
return iter(l)
def pop(self):
index = random.randint(0, len(self) - 1)
item = list(set.__iter__(self))[index]
self.remove(item)
return item
def union(self, other):
return RandomSet(set.union(self, other))
def difference(self, other):
return RandomSet(set.difference(self, other))
def intersection(self, other):
return RandomSet(set.intersection(self, other))
def copy(self):
return RandomSet(self)
def conforms_partial_ordering(tuples, sorted_elements):
"""True if the given sorting conforms to the given partial ordering."""
deps = defaultdict(set)
for parent, child in tuples:
deps[parent].add(child)
for i, node in enumerate(sorted_elements):
for n in sorted_elements[i:]:
if node in deps[n]:
return False
else:
return True
def all_partial_orderings(tuples, elements):
edges = defaultdict(set)
for parent, child in tuples:
edges[child].add(parent)
def _all_orderings(elements):
if len(elements) == 1:
yield list(elements)
else:
for elem in elements:
subset = set(elements).difference([elem])
if not subset.intersection(edges[elem]):
for sub_ordering in _all_orderings(subset):
yield [elem] + sub_ordering
return iter(_all_orderings(elements))
def function_named(fn, name):
"""Return a function with a given __name__.
Will assign to __name__ and return the original function if possible on
the Python implementation, otherwise a new function will be constructed.
This function should be phased out as much as possible
in favor of @decorator. Tests that "generate" many named tests
should be modernized.
"""
try:
fn.__name__ = name
except TypeError:
fn = types.FunctionType(fn.__code__, fn.__globals__, name,
fn.__defaults__, fn.__closure__)
return fn
def run_as_contextmanager(ctx, fn, *arg, **kw):
"""Run the given function under the given contextmanager,
simulating the behavior of 'with' to support older
Python versions.
"""
obj = ctx.__enter__()
try:
result = fn(obj, *arg, **kw)
ctx.__exit__(None, None, None)
return result
except:
exc_info = sys.exc_info()
raise_ = ctx.__exit__(*exc_info)
if raise_ is None:
raise
else:
return raise_
def rowset(results):
"""Converts the results of sql execution into a plain set of column tuples.
Useful for asserting the results of an unordered query.
"""
return set([tuple(row) for row in results])
def fail(msg):
assert False, msg
@decorator
def provide_metadata(fn, *args, **kw):
"""Provide bound MetaData for a single test, dropping afterwards."""
from . import config
from sqlalchemy import schema
metadata = schema.MetaData(config.db)
self = args[0]
prev_meta = getattr(self, 'metadata', None)
self.metadata = metadata
try:
return fn(*args, **kw)
finally:
metadata.drop_all()
self.metadata = prev_meta
class adict(dict):
"""Dict keys available as attributes. Shadows."""
def __getattribute__(self, key):
try:
return self[key]
except KeyError:
return dict.__getattribute__(self, key)
def get_all(self, *keys):
return tuple([self[key] for key in keys])
| apache-2.0 |
mihaic/brainiak | tests/io/test_io.py | 7 | 3078 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Iterable, Sequence
import nibabel as nib
import numpy as np
import pytest
from brainiak import io
@pytest.fixture
def in_dir() -> Path:
return Path(__file__).parent / "data"
@pytest.fixture
def expected_image_data_shape() -> Sequence[int]:
return (64, 64, 26, 10)
@pytest.fixture
def mask_path(in_dir: Path) -> Path:
return in_dir / "mask.nii.gz"
@pytest.fixture
def labels_path(in_dir: Path) -> Path:
return in_dir / "epoch_labels.npy"
@pytest.fixture
def expected_condition_spec_shape() -> Sequence[int]:
return (2, 2, 10)
@pytest.fixture
def expected_n_subjects() -> int:
return 2
@pytest.fixture
def image_paths(in_dir: Path) -> Iterable[Path]:
return (in_dir / "subject1_bet.nii.gz", in_dir / "subject2_bet.nii.gz")
def test_load_images_from_dir_data_shape(
in_dir: Path,
expected_image_data_shape: Sequence[int],
expected_n_subjects: int
) -> None:
for i, image in enumerate(io.load_images_from_dir(in_dir, "bet.nii.gz")):
assert image.get_data().shape == (64, 64, 26, 10)
assert i + 1 == expected_n_subjects
def test_load_images_data_shape(
image_paths: Iterable[Path],
expected_image_data_shape: Sequence[int],
expected_n_subjects: int
) -> None:
for i, image in enumerate(io.load_images(image_paths)):
assert image.get_data().shape == (64, 64, 26, 10)
assert i + 1 == expected_n_subjects
def test_load_boolean_mask(mask_path: Path) -> None:
mask = io.load_boolean_mask(mask_path)
assert mask.dtype == np.bool
def test_load_boolean_mask_predicate(mask_path: Path) -> None:
mask = io.load_boolean_mask(mask_path, lambda x: np.logical_not(x))
expected_mask = np.logical_not(io.load_boolean_mask(mask_path))
assert np.array_equal(mask, expected_mask)
def test_load_labels(labels_path: Path,
expected_condition_spec_shape: Sequence[int],
expected_n_subjects: int) -> None:
condition_specs = io.load_labels(labels_path)
i = 0
for condition_spec in condition_specs:
assert condition_spec.shape == expected_condition_spec_shape
i += 1
assert i == expected_n_subjects
def test_save_as_nifti_file(tmpdir) -> None:
out_file = str(tmpdir / "nifti.nii")
shape = (4, 4, 4)
io.save_as_nifti_file(np.ones(shape), np.eye(4), out_file)
assert nib.load(out_file).get_data().shape == shape
| apache-2.0 |
rc/sfepy | sfepy/discrete/fem/utils.py | 4 | 9529 | from __future__ import absolute_import
import numpy as nm
import sfepy.linalg as la
from sfepy.discrete.integrals import Integral
from sfepy.discrete import PolySpace
from six.moves import range
def prepare_remap(indices, n_full):
"""
Prepare vector for remapping range `[0, n_full]` to its subset given
by `indices`.
"""
remap = nm.empty((n_full,), dtype=nm.int32)
remap.fill(-1)
remap[indices] = nm.arange(indices.shape[0], dtype=nm.int32)
return remap
def invert_remap(remap):
"""
Return the inverse of `remap`, i.e. a mapping from a sub-range
indices to a full range, see :func:`prepare_remap()`.
"""
if remap is not None:
inverse = nm.where(remap >= 0)[0].astype(nm.int32)
else:
inverse = None
return inverse
def prepare_translate(old_indices, new_indices):
"""
Prepare vector for translating `old_indices` to `new_indices`.
Returns
-------
translate : array
The translation vector. Then `new_ar = translate[old_ar]`.
"""
old_indices = nm.asarray(old_indices)
new_indices = nm.asarray(new_indices)
translate = nm.zeros(old_indices.max() + 1, dtype=new_indices.dtype)
translate[old_indices] = new_indices
return translate
def compute_nodal_normals(nodes, region, field, return_imap=False):
"""
Nodal normals are computed by simple averaging of element normals of
elements every node is contained in.
"""
dim = region.dim
field.domain.create_surface_group(region)
field.setup_surface_data(region)
# Custom integral with quadrature points in nodes.
ps = PolySpace.any_from_args('', field.gel.surface_facet,
field.approx_order)
qp_coors = ps.node_coors
# Unit normals -> weights = ones.
qp_weights = nm.ones(qp_coors.shape[0], dtype=nm.float64)
integral = Integral('aux', coors=qp_coors, weights=qp_weights)
normals = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
mask = nm.zeros((nodes.max() + 1,), dtype=nm.int32)
imap = nm.empty_like(mask)
imap.fill(nodes.shape[0]) # out-of-range index for normals.
imap[nodes] = nm.arange(nodes.shape[0], dtype=nm.int32)
cmap, _ = field.get_mapping(region, integral, 'surface')
e_normals = cmap.normal[..., 0]
sd = field.surface_data[region.name]
econn = sd.get_connectivity()
mask[econn] += 1
# normals[imap[econn]] += e_normals
im = imap[econn]
for ii, en in enumerate(e_normals):
normals[im[ii]] += en
# All nodes must have a normal.
if not nm.all(mask[nodes] > 0):
raise ValueError('region %s has not complete faces!' % region.name)
norm = la.norm_l2_along_axis(normals)[:, nm.newaxis]
if (norm < 1e-15).any():
raise ValueError('zero nodal normal! (a node in volume?)')
normals /= norm
if return_imap:
return normals, imap
else:
return normals
def _get_edge_path(graph, seed, mask, cycle=False):
"""
Get a path in an edge graph starting with seed. The mask is incremented by
one at positions of the path vertices.
"""
if mask[seed]:
return []
path = [seed]
mask[seed] = 1
row = graph[seed].indices
nv = len(row)
while nv:
if nv == 2:
if mask[row[0]]:
if mask[row[1]]:
if cycle:
path.append(seed)
break
else:
vert = row[1]
else:
vert = row[0]
elif mask[row[0]]:
break
else:
vert = row[0]
path.append(vert)
mask[vert] = 1
row = graph[vert].indices
nv = len(row)
path = nm.array(path, dtype=nm.int32)
return path
def get_edge_paths(graph, mask):
"""
Get all edge paths in a graph with non-masked vertices. The mask is
updated.
"""
nodes = nm.unique(graph.indices)
npv = nm.diff(graph.indptr)
if npv.max() > 2:
raise ValueError('more than 2 edges sharing a vertex!')
seeds = nm.where(npv == 1)[0]
# 1. get paths.
paths = []
for seed in seeds:
path = _get_edge_path(graph, seed, mask)
if len(path):
paths.append(path)
# 2. get possible remaing cycles.
while 1:
ii = nm.where(mask[nodes] == 0)[0]
if not len(ii):
break
path = _get_edge_path(graph, nodes[ii[0]], mask, cycle=True)
if len(path):
paths.append(path)
return paths
def compute_nodal_edge_dirs(nodes, region, field, return_imap=False):
"""
Nodal edge directions are computed by simple averaging of direction vectors
of edges a node is contained in. Edges are assumed to be straight and a
node must be on a single edge (a border node) or shared by exactly two
edges.
"""
coors = region.domain.mesh.coors
dim = coors.shape[1]
graph = region.get_edge_graph()
imap = prepare_remap(nodes, nodes.max() + 1)
mask = nm.zeros_like(imap)
try:
paths = get_edge_paths(graph, mask)
except ValueError:
raise ValueError('more than 2 edges sharing a vertex in region %s!'
% region.name)
# All nodes must have an edge direction.
if not nm.all(mask[nodes]):
raise ValueError('region %s has not complete edges!' % region.name)
edge_dirs = nm.zeros((nodes.shape[0], dim), dtype=nm.float64)
for path in paths:
pcoors = coors[path]
edirs = nm.diff(pcoors, axis=0)
la.normalize_vectors(edirs, eps=1e-12)
im = imap[nm.c_[path[:-1], path[1:]]]
for ii, edir in enumerate(edirs):
edge_dirs[im[ii]] += edir
la.normalize_vectors(edge_dirs, eps=1e-12)
if return_imap:
return edge_dirs, imap
else:
return edge_dirs
def get_min_value(dofs):
"""
Get a reasonable minimal value of DOFs suitable for extending over a
whole domain.
"""
if dofs.shape[1] > 1: # Vector.
val = 0.0
else: # Scalar.
val = dofs.min()
return val
def extend_cell_data(data, domain, rname, val=None, is_surface=False,
average_surface=True):
"""
Extend cell data defined in a region to the whole domain.
Parameters
----------
data : array
The data defined in the region.
domain : FEDomain instance
The FE domain.
rname : str
The region name.
val : float, optional
The value for filling cells not covered by the region. If not given,
the smallest value in data is used.
is_surface : bool
If True, the data are defined on a surface region. In that case the
values are averaged or summed into the cells containing the region
surface faces (a cell can have several faces of the surface), see
`average_surface`.
average_surface : bool
If True, the data defined on a surface region are averaged, otherwise
the data are summed.
Returns
-------
edata : array
The data extended to all domain elements.
"""
n_el = domain.shape.n_el
if data.shape[0] == n_el: return data
if val is None:
if data.shape[2] > 1: # Vector.
val = nm.amin(nm.abs(data))
else: # Scalar.
val = nm.amin(data)
edata = nm.empty((n_el,) + data.shape[1:], dtype=data.dtype)
edata.fill(val)
region = domain.regions[rname]
if not is_surface:
edata[region.get_cells()] = data
else:
cells = region.get_cells(true_cells_only=False)
ucells = nm.unique(cells)
if len(cells) != len(region.facets):
raise ValueError('region %s has an inner face!'
% region.name)
if average_surface:
avg = nm.bincount(cells, minlength=n_el)[ucells]
else:
avg = 1.0
for ic in range(data.shape[2]):
if nm.isrealobj(data):
evals = nm.bincount(cells, weights=data[:, 0, ic, 0],
minlength=n_el)[ucells]
else:
evals = (nm.bincount(cells, weights=data[:, 0, ic, 0].real,
minlength=n_el)[ucells]
+ 1j *
nm.bincount(cells, weights=data[:, 0, ic, 0].imag,
minlength=n_el)[ucells])
edata[ucells, 0, ic, 0] = evals / avg
return edata
def refine_mesh(filename, level):
"""
Uniformly refine `level`-times a mesh given by `filename`.
The refined mesh is saved to a file with name constructed from base
name of `filename` and `level`-times appended `'_r'` suffix.
Parameters
----------
filename : str
The mesh file name.
level : int
The refinement level.
"""
import os
from sfepy.base.base import output
from sfepy.discrete.fem import Mesh, FEDomain
if level > 0:
mesh = Mesh.from_file(filename)
domain = FEDomain(mesh.name, mesh)
for ii in range(level):
output('refine %d...' % ii)
domain = domain.refine()
output('... %d nodes %d elements'
% (domain.shape.n_nod, domain.shape.n_el))
suffix = os.path.splitext(filename)[1]
filename = domain.name + suffix
domain.mesh.write(filename, io='auto')
return filename
| bsd-3-clause |
mavenlin/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/routing_function_op_test.py | 102 | 2327 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class RoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
def testRoutingFunction(self):
with self.test_session():
route_tensor = gen_training_ops.routing_function(
self.input_data, self.tree_weights, self.tree_thresholds, max_nodes=3)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
p4datasystems/CarnotKE | jyhton/lib-python/2.7/encodings/iso2022_jp_3.py | 816 | 1061 | #
# iso2022_jp_3.py: Python Unicode Codec for ISO2022_JP_3
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_jp_3')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_jp_3',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| apache-2.0 |
PaddlePaddle/models | dygraph/mobilenet/imagenet_dataset.py | 1 | 1987 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import math
import random
import numpy as np
from paddle.vision.datasets import DatasetFolder
from paddle.vision.transforms import transforms
from paddle import fluid
class ImageNetDataset(DatasetFolder):
def __init__(self,
path,
mode='train',
image_size=224,
resize_short_size=256):
super(ImageNetDataset, self).__init__(path)
self.mode = mode
normalize = transforms.Normalize(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.120, 57.375])
if self.mode == 'train':
self.transform = transforms.Compose([
transforms.RandomResizedCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.Transpose(order=(2, 0, 1)), normalize
])
else:
self.transform = transforms.Compose([
transforms.Resize(resize_short_size),
transforms.CenterCrop(image_size),
transforms.Transpose(order=(2, 0, 1)), normalize
])
def __getitem__(self, idx):
img_path, label = self.samples[idx]
img = cv2.imread(img_path).astype(np.float32)
label = np.array([label]).astype(np.int64)
return self.transform(img), label
def __len__(self):
return len(self.samples)
| apache-2.0 |
ShiYw/Sigil | 3rdparty/python/Lib/idlelib/idle_test/mock_tk.py | 8 | 11545 | """Classes that replace tkinter gui objects used by an object being tested.
A gui object is anything with a master or parent paramenter, which is
typically required in spite of what the doc strings say.
"""
class Event:
'''Minimal mock with attributes for testing event handlers.
This is not a gui object, but is used as an argument for callbacks
that access attributes of the event passed. If a callback ignores
the event, other than the fact that is happened, pass 'event'.
Keyboard, mouse, window, and other sources generate Event instances.
Event instances have the following attributes: serial (number of
event), time (of event), type (of event as number), widget (in which
event occurred), and x,y (position of mouse). There are other
attributes for specific events, such as keycode for key events.
tkinter.Event.__doc__ has more but is still not complete.
'''
def __init__(self, **kwds):
"Create event with attributes needed for test"
self.__dict__.update(kwds)
class Var:
"Use for String/Int/BooleanVar: incomplete"
def __init__(self, master=None, value=None, name=None):
self.master = master
self.value = value
self.name = name
def set(self, value):
self.value = value
def get(self):
return self.value
class Mbox_func:
"""Generic mock for messagebox functions, which all have the same signature.
Instead of displaying a message box, the mock's call method saves the
arguments as instance attributes, which test functions can then examime.
The test can set the result returned to ask function
"""
def __init__(self, result=None):
self.result = result # Return None for all show funcs
def __call__(self, title, message, *args, **kwds):
# Save all args for possible examination by tester
self.title = title
self.message = message
self.args = args
self.kwds = kwds
return self.result # Set by tester for ask functions
class Mbox:
"""Mock for tkinter.messagebox with an Mbox_func for each function.
This module was 'tkMessageBox' in 2.x; hence the 'import as' in 3.x.
Example usage in test_module.py for testing functions in module.py:
---
from idlelib.idle_test.mock_tk import Mbox
import module
orig_mbox = module.tkMessageBox
showerror = Mbox.showerror # example, for attribute access in test methods
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
module.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
module.tkMessageBox = orig_mbox
---
For 'ask' functions, set func.result return value before calling the method
that uses the message function. When tkMessageBox functions are the
only gui alls in a method, this replacement makes the method gui-free,
"""
askokcancel = Mbox_func() # True or False
askquestion = Mbox_func() # 'yes' or 'no'
askretrycancel = Mbox_func() # True or False
askyesno = Mbox_func() # True or False
askyesnocancel = Mbox_func() # True, False, or None
showerror = Mbox_func() # None
showinfo = Mbox_func() # None
showwarning = Mbox_func() # None
from _tkinter import TclError
class Text:
"""A semi-functional non-gui replacement for tkinter.Text text editors.
The mock's data model is that a text is a list of \n-terminated lines.
The mock adds an empty string at the beginning of the list so that the
index of actual lines start at 1, as with Tk. The methods never see this.
Tk initializes files with a terminal \n that cannot be deleted. It is
invisible in the sense that one cannot move the cursor beyond it.
This class is only tested (and valid) with strings of ascii chars.
For testing, we are not concerned with Tk Text's treatment of,
for instance, 0-width characters or character + accent.
"""
def __init__(self, master=None, cnf={}, **kw):
'''Initialize mock, non-gui, text-only Text widget.
At present, all args are ignored. Almost all affect visual behavior.
There are just a few Text-only options that affect text behavior.
'''
self.data = ['', '\n']
def index(self, index):
"Return string version of index decoded according to current text."
return "%s.%s" % self._decode(index, endflag=1)
def _decode(self, index, endflag=0):
"""Return a (line, char) tuple of int indexes into self.data.
This implements .index without converting the result back to a string.
The result is constrained by the number of lines and linelengths of
self.data. For many indexes, the result is initially (1, 0).
The input index may have any of several possible forms:
* line.char float: converted to 'line.char' string;
* 'line.char' string, where line and char are decimal integers;
* 'line.char lineend', where lineend='lineend' (and char is ignored);
* 'line.end', where end='end' (same as above);
* 'insert', the positions before terminal \n;
* 'end', whose meaning depends on the endflag passed to ._endex.
* 'sel.first' or 'sel.last', where sel is a tag -- not implemented.
"""
if isinstance(index, (float, bytes)):
index = str(index)
try:
index=index.lower()
except AttributeError:
raise TclError('bad text index "%s"' % index) from None
lastline = len(self.data) - 1 # same as number of text lines
if index == 'insert':
return lastline, len(self.data[lastline]) - 1
elif index == 'end':
return self._endex(endflag)
line, char = index.split('.')
line = int(line)
# Out of bounds line becomes first or last ('end') index
if line < 1:
return 1, 0
elif line > lastline:
return self._endex(endflag)
linelength = len(self.data[line]) -1 # position before/at \n
if char.endswith(' lineend') or char == 'end':
return line, linelength
# Tk requires that ignored chars before ' lineend' be valid int
# Out of bounds char becomes first or last index of line
char = int(char)
if char < 0:
char = 0
elif char > linelength:
char = linelength
return line, char
def _endex(self, endflag):
'''Return position for 'end' or line overflow corresponding to endflag.
-1: position before terminal \n; for .insert(), .delete
0: position after terminal \n; for .get, .delete index 1
1: same viewed as beginning of non-existent next line (for .index)
'''
n = len(self.data)
if endflag == 1:
return n, 0
else:
n -= 1
return n, len(self.data[n]) + endflag
def insert(self, index, chars):
"Insert chars before the character at index."
if not chars: # ''.splitlines() is [], not ['']
return
chars = chars.splitlines(True)
if chars[-1][-1] == '\n':
chars.append('')
line, char = self._decode(index, -1)
before = self.data[line][:char]
after = self.data[line][char:]
self.data[line] = before + chars[0]
self.data[line+1:line+1] = chars[1:]
self.data[line+len(chars)-1] += after
def get(self, index1, index2=None):
"Return slice from index1 to index2 (default is 'index1+1')."
startline, startchar = self._decode(index1)
if index2 is None:
endline, endchar = startline, startchar+1
else:
endline, endchar = self._decode(index2)
if startline == endline:
return self.data[startline][startchar:endchar]
else:
lines = [self.data[startline][startchar:]]
for i in range(startline+1, endline):
lines.append(self.data[i])
lines.append(self.data[endline][:endchar])
return ''.join(lines)
def delete(self, index1, index2=None):
'''Delete slice from index1 to index2 (default is 'index1+1').
Adjust default index2 ('index+1) for line ends.
Do not delete the terminal \n at the very end of self.data ([-1][-1]).
'''
startline, startchar = self._decode(index1, -1)
if index2 is None:
if startchar < len(self.data[startline])-1:
# not deleting \n
endline, endchar = startline, startchar+1
elif startline < len(self.data) - 1:
# deleting non-terminal \n, convert 'index1+1 to start of next line
endline, endchar = startline+1, 0
else:
# do not delete terminal \n if index1 == 'insert'
return
else:
endline, endchar = self._decode(index2, -1)
# restricting end position to insert position excludes terminal \n
if startline == endline and startchar < endchar:
self.data[startline] = self.data[startline][:startchar] + \
self.data[startline][endchar:]
elif startline < endline:
self.data[startline] = self.data[startline][:startchar] + \
self.data[endline][endchar:]
startline += 1
for i in range(startline, endline+1):
del self.data[startline]
def compare(self, index1, op, index2):
line1, char1 = self._decode(index1)
line2, char2 = self._decode(index2)
if op == '<':
return line1 < line2 or line1 == line2 and char1 < char2
elif op == '<=':
return line1 < line2 or line1 == line2 and char1 <= char2
elif op == '>':
return line1 > line2 or line1 == line2 and char1 > char2
elif op == '>=':
return line1 > line2 or line1 == line2 and char1 >= char2
elif op == '==':
return line1 == line2 and char1 == char2
elif op == '!=':
return line1 != line2 or char1 != char2
else:
raise TclError('''bad comparison operator "%s":'''
'''must be <, <=, ==, >=, >, or !=''' % op)
# The following Text methods normally do something and return None.
# Whether doing nothing is sufficient for a test will depend on the test.
def mark_set(self, name, index):
"Set mark *name* before the character at index."
pass
def mark_unset(self, *markNames):
"Delete all marks in markNames."
def tag_remove(self, tagName, index1, index2=None):
"Remove tag tagName from all characters between index1 and index2."
pass
# The following Text methods affect the graphics screen and return None.
# Doing nothing should always be sufficient for tests.
def scan_dragto(self, x, y):
"Adjust the view of the text according to scan_mark"
def scan_mark(self, x, y):
"Remember the current X, Y coordinates."
def see(self, index):
"Scroll screen to make the character at INDEX is visible."
pass
# The following is a Misc method inherited by Text.
# It should properly go in a Misc mock, but is included here for now.
def bind(sequence=None, func=None, add=None):
"Bind to this widget at event sequence a call to function func."
pass
| gpl-3.0 |
lillisgary/shiny-shame | theme/admin.py | 1 | 1252 | from django.contrib import admin
from .models import HomePage, Slide, IconBlurb, Portfolio, PortfolioItemImage, PortfolioItem, PortfolioItemCategory, TextSlider, DocumentListItem, DocumentList, DocumentListItemCategory
from mezzanine.core.admin import TabularDynamicInlineAdmin
from mezzanine.pages.admin import PageAdmin
class SlideInline(TabularDynamicInlineAdmin):
model = Slide
class IconBlurbInline(TabularDynamicInlineAdmin):
model = IconBlurb
class PortfolioItemImageInline(TabularDynamicInlineAdmin):
model = PortfolioItemImage
class TextSliderInline(TabularDynamicInlineAdmin):
model = TextSlider
class HomePageAdmin(PageAdmin):
inlines = (SlideInline, IconBlurbInline, TextSliderInline,)
class PortfolioItemAdmin(PageAdmin):
inlines = (PortfolioItemImageInline,)
class DocumentListItemInline(TabularDynamicInlineAdmin):
model = DocumentListItem
class DocumentListAdmin(PageAdmin):
inlines = (DocumentListItemInline,)
admin.site.register(HomePage, HomePageAdmin)
admin.site.register(Portfolio, PageAdmin)
admin.site.register(PortfolioItem, PortfolioItemAdmin)
admin.site.register(PortfolioItemCategory)
admin.site.register(DocumentList, DocumentListAdmin)
admin.site.register(DocumentListItemCategory)
| gpl-2.0 |
thientu/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
sjdv1982/seamless | tests/lowlevel/module-package.py | 1 | 1611 | import seamless
from seamless.core import macro_mode_on
from seamless.core import context, cell, macro
mod_init = """
from .mod3 import testvalue
"""
mod1 = """
from . import testvalue
def func():
return testvalue
"""
mod2 = """
from .mod1 import func
"""
mod3 = """
testvalue = 42
"""
package = {
"__init__": {
"language": "python",
"code": mod_init,
"dependencies": [".mod3"],
},
"mod1": {
"language": "python",
"code": mod1,
"dependencies": ["__init__"],
},
"mod2": {
"language": "python",
"code": mod2,
"dependencies": [".mod1"],
},
"mod3": {
"language": "python",
"code": mod3,
"dependencies": [],
},
}
testmodule = {
"type": "interpreted",
"language": "python",
"code": package,
}
with macro_mode_on():
ctx = context(toplevel=True)
ctx.param = cell("plain").set(1)
ctx.macro = macro({
"param": "plain",
"testmodule": ("plain", "module"),
})
ctx.param.connect(ctx.macro.param)
ctx.macro_code = cell("macro").set("""
print("macro execute")
from .testmodule import testvalue
from .testmodule.mod1 import func
from .testmodule.mod2 import func as func2
print(testvalue)
print(func is func2)
print(func2())
print(testmodule.testvalue)
from .testmodule import mod3
print(mod3.testvalue)
print("/macro execute")
""")
ctx.macro_code.connect(ctx.macro.code)
ctx.testmodule = cell("plain").set(testmodule)
ctx.testmodule.connect(ctx.macro.testmodule)
print("START")
ctx.compute()
print(ctx.macro.exception)
| mit |
joequery/django | tests/model_fields/test_durationfield.py | 296 | 2724 | import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import SimpleTestCase, TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(SimpleTestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(SimpleTestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
| bsd-3-clause |
twz915/django | django/core/serializers/json.py | 1 | 3709 | """
Serialize data to/from JSON
"""
import datetime
import decimal
import json
import sys
import uuid
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import (
Deserializer as PythonDeserializer, Serializer as PythonSerializer,
)
from django.utils import six
from django.utils.duration import duration_iso_string
from django.utils.functional import Promise
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def _init_options(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
if self.options.get('indent'):
# Prevent trailing spaces
self.json_kwargs['separators'] = (',', ': ')
self.json_kwargs.setdefault('cls', DjangoJSONEncoder)
def start_serialization(self):
self._init_options()
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, str)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
six.reraise(DeserializationError, DeserializationError(e), sys.exc_info()[2])
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time, decimal types and UUIDs.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, datetime.timedelta):
return duration_iso_string(o)
elif isinstance(o, (decimal.Decimal, uuid.UUID, Promise)):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
| bsd-3-clause |
HOQTEC/MCP | jumpgate/compute/drivers/sl/quota_sets.py | 3 | 1091 | from oslo.config import cfg
class OSQuotaSetsV2(object):
def on_get(self, req, resp, tenant_id, account_id=None):
qs = {
"cores": cfg.CONF['compute']['default_cores'],
"floating_ips": cfg.CONF['compute']['default_floating_ips'],
"id": tenant_id,
"injected_file_content_bytes":
cfg.CONF['compute']['default_injected_file_content_bytes'],
"injected_file_path_bytes":
cfg.CONF['compute']['default_injected_file_path_bytes'],
"injected_files": cfg.CONF['compute']['default_injected_files'],
"instances": cfg.CONF['compute']['default_instances'],
"key_pairs": cfg.CONF['compute']['default_key_pairs'],
"metadata_items": cfg.CONF['compute']['default_metadata_items'],
"ram": cfg.CONF['compute']['default_ram'],
"security_group_rules":
cfg.CONF['compute']['default_security_group_rules'],
"security_groups": cfg.CONF['compute']['default_security_groups']
}
resp.body = {'quota_set': qs}
| mit |
mavit/ansible | lib/ansible/modules/monitoring/zabbix/zabbix_host.py | 12 | 37008 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- Tony Minfei Ding
- Harrison Gu (@harrisongu)
- Werner Dijkerman (@dj-wasabi)
- Eike Frost (@eikef)
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.3"
options:
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
version_added: '2.3'
description:
description:
- Description of the host in Zabbix.
version_added: '2.5'
host_groups:
description:
- List of host groups the host is part of.
link_templates:
description:
- List of templates linked to the host.
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
version_added: '2.1'
inventory_zabbix:
description:
- Add Facts for a zabbix inventory (e.g. Tag) (see example below).
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.2/manual/api/reference/host/object#host_inventory'
version_added: '2.5'
status:
description:
- Monitoring status of the host.
choices: ['enabled', 'disabled']
default: 'enabled'
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
choices: ['present', 'absent']
default: 'present'
proxy:
description:
- The name of the Zabbix proxy to be used.
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available keys are: I(dns), I(ip), I(main), I(port), I(type), I(useip), and I(bulk).'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
- If an interface definition is incomplete, this module will attempt to fill in sensible values.
- I(type) can also be C(agent), C(snmp), C(ipmi), or C(jmx) instead of its numerical value.
default: []
tls_connect:
description:
- Specifies what encryption to use for outgoing connections.
- Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
- Works only with >= Zabbix 3.0
default: 1
version_added: '2.5'
tls_accept:
description:
- Specifies what types of connections are allowed for incoming connections.
- The tls_accept parameter accepts values of 1 to 7
- Possible values, 1 (no encryption), 2 (PSK), 4 (certificate).
- Values can be combined.
- Works only with >= Zabbix 3.0
default: 1
version_added: '2.5'
tls_psk_identity:
description:
- It is a unique name by which this specific PSK is referred to by Zabbix components
- Do not put sensitive information in the PSK identity string, it is transmitted over the network unencrypted.
- Works only with >= Zabbix 3.0
version_added: '2.5'
tls_psk:
description:
- PSK value is a hard to guess string of hexadecimal digits.
- The preshared key, at least 32 hex digits. Required if either tls_connect or tls_accept has PSK enabled.
- Works only with >= Zabbix 3.0
version_added: '2.5'
tls_issuer:
description:
- Required certificate issuer.
- Works only with >= Zabbix 3.0
version_added: '2.5'
tls_subject:
description:
- Required certificate subject.
- Works only with >= Zabbix 3.0
version_added: '2.5'
ipmi_authtype:
description:
- IPMI authentication algorithm.
- Please review the Host object documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
- Possible values are, C(0) (none), C(1) (MD2), C(2) (MD5), C(4) (straight), C(5) (OEM), C(6) (RMCP+),
with -1 being the API default.
- Please note that the Zabbix API will treat absent settings as default when updating
any of the I(ipmi_)-options; this means that if you attempt to set any of the four
options individually, the rest will be reset to default values.
version_added: '2.5'
ipmi_privilege:
description:
- IPMI privilege level.
- Please review the Host object documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/3.4/manual/api/reference/host/object'
- Possible values are C(1) (callback), C(2) (user), C(3) (operator), C(4) (admin), C(5) (OEM), with C(2)
being the API default.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
ipmi_username:
description:
- IPMI username.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
ipmi_password:
description:
- IPMI password.
- also see the last note in the I(ipmi_authtype) documentation
version_added: '2.5'
force:
description:
- Overwrite the host configuration, even if already present.
type: bool
default: 'yes'
version_added: '2.0'
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
description: My ExampleHost Description
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: manual
inventory_zabbix:
tag: "{{ your_tag }}"
alias: "{{ your_alias }}"
notes: "Special Informations: {{ your_informations | default('None') }}"
location: "{{ your_location }}"
site_rack: "{{ your_site_rack }}"
os: "{{ your_os }}"
hardware: "{{ your_hardware }}"
ipmi_authtype: 2
ipmi_privilege: 4
ipmi_username: username
ipmi_password: password
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
- name: Update an existing host's TLS settings
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
tls_psk_identity: test
tls_connect: 2
tls_psk: 123456789abcdef123456789abcdef12
'''
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, validate_certs, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd, validate_certs=validate_certs)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status,
'tls_connect': tls_connect, 'tls_accept': tls_accept}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
if tls_psk_identity is not None:
parameters['tls_psk_identity'] = tls_psk_identity
if tls_psk is not None:
parameters['tls_psk'] = tls_psk
if tls_issuer is not None:
parameters['tls_issuer'] = tls_issuer
if tls_subject is not None:
parameters['tls_subject'] = tls_subject
if description:
parameters['description'] = description
if ipmi_authtype is not None:
parameters['ipmi_authtype'] = ipmi_authtype
if ipmi_privilege is not None:
parameters['ipmi_privilege'] = ipmi_privilege
if ipmi_username is not None:
parameters['ipmi_username'] = ipmi_username
if ipmi_password is not None:
parameters['ipmi_password'] = ipmi_password
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id,
visible_name, description, tls_connect, tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype,
ipmi_privilege, ipmi_username, ipmi_password):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status, 'tls_connect': tls_connect,
'tls_accept': tls_accept}
if proxy_id >= 0:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
if tls_psk_identity:
parameters['tls_psk_identity'] = tls_psk_identity
if tls_psk:
parameters['tls_psk'] = tls_psk
if tls_issuer:
parameters['tls_issuer'] = tls_issuer
if tls_subject:
parameters['tls_subject'] = tls_subject
if description:
parameters['description'] = description
if ipmi_authtype:
parameters['ipmi_authtype'] = ipmi_authtype
if ipmi_privilege:
parameters['ipmi_privilege'] = ipmi_privilege
if ipmi_username:
parameters['ipmi_username'] = ipmi_username
if ipmi_password:
parameters['ipmi_password'] = ipmi_password
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = int(interface['type'])
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'selectInventory': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return int(proxy_list[0]['proxyid'])
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name, description, host_name,
inventory_mode, inventory_zabbix, tls_accept, tls_psk_identity, tls_psk,
tls_issuer, tls_subject, tls_connect, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if int(host['proxy_hostid']) != int(proxy_id):
return True
# Check whether the visible_name has changed; Zabbix defaults to the technical hostname if not set.
if visible_name:
if host['name'] != visible_name and host['name'] != host_name:
return True
# Only compare description if it is given as a module parameter
if description:
if host['description'] != description:
return True
if inventory_mode:
if host['inventory']:
if int(host['inventory']['inventory_mode']) != self.inventory_mode_numeric(inventory_mode):
return True
elif inventory_mode != 'disabled':
return True
if inventory_zabbix:
proposed_inventory = copy.deepcopy(host['inventory'])
proposed_inventory.update(inventory_zabbix)
if proposed_inventory != host['inventory']:
return True
if tls_accept is not None and 'tls_accept' in host:
if int(host['tls_accept']) != tls_accept:
return True
if tls_psk_identity is not None and 'tls_psk_identity' in host:
if host['tls_psk_identity'] != tls_psk_identity:
return True
if tls_psk is not None and 'tls_psk' in host:
if host['tls_psk'] != tls_psk:
return True
if tls_issuer is not None and 'tls_issuer' in host:
if host['tls_issuer'] != tls_issuer:
return True
if tls_subject is not None and 'tls_subject' in host:
if host['tls_subject'] != tls_subject:
return True
if tls_connect is not None and 'tls_connect' in host:
if int(host['tls_connect']) != tls_connect:
return True
if ipmi_authtype is not None:
if int(host['ipmi_authtype']) != ipmi_authtype:
return True
if ipmi_privilege is not None:
if int(host['ipmi_privilege']) != ipmi_privilege:
return True
if ipmi_username is not None:
if host['ipmi_username'] != ipmi_username:
return True
if ipmi_password is not None:
if host['ipmi_password'] != ipmi_password:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list, tls_connect, tls_accept, tls_psk_identity, tls_psk,
tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list,
'tls_connect': tls_connect, 'tls_accept': tls_accept, 'ipmi_authtype': ipmi_authtype,
'ipmi_privilege': ipmi_privilege, 'ipmi_username': ipmi_username, 'ipmi_password': ipmi_password}
if tls_psk_identity is not None:
request_str['tls_psk_identity'] = tls_psk_identity
if tls_psk is not None:
request_str['tls_psk'] = tls_psk
if tls_issuer is not None:
request_str['tls_issuer'] = tls_issuer
if tls_subject is not None:
request_str['tls_subject'] = tls_subject
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
def inventory_mode_numeric(self, inventory_mode):
if inventory_mode == "automatic":
return int(1)
elif inventory_mode == "manual":
return int(0)
elif inventory_mode == "disabled":
return int(-1)
return inventory_mode
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
inventory_mode = self.inventory_mode_numeric(inventory_mode)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def update_inventory_zabbix(self, host_id, inventory):
if not inventory:
return
request_str = {'hostid': host_id, 'inventory': inventory}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
ipmi_authtype=dict(type='int', default=None),
ipmi_privilege=dict(type='int', default=None),
ipmi_username=dict(type='str', required=False, default=None),
ipmi_password=dict(type='str', required=False, default=None, no_log=True),
tls_connect=dict(type='int', default=1),
tls_accept=dict(type='int', default=1),
tls_psk_identity=dict(type='str', required=False),
tls_psk=dict(type='str', required=False),
tls_issuer=dict(type='str', required=False),
tls_subject=dict(type='str', required=False),
inventory_zabbix=dict(required=False, type='dict'),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False),
description=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
description = module.params['description']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
ipmi_authtype = module.params['ipmi_authtype']
ipmi_privilege = module.params['ipmi_privilege']
ipmi_username = module.params['ipmi_username']
ipmi_password = module.params['ipmi_password']
tls_connect = module.params['tls_connect']
tls_accept = module.params['tls_accept']
tls_psk_identity = module.params['tls_psk_identity']
tls_psk = module.params['tls_psk']
tls_issuer = module.params['tls_issuer']
tls_subject = module.params['tls_subject']
inventory_zabbix = module.params['inventory_zabbix']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
# ensure interfaces are well-formed
for interface in interfaces:
if 'type' not in interface:
module.fail_json(msg="(interface) type needs to be specified for interface '%s'." % interface)
interfacetypes = {'agent': 1, 'snmp': 2, 'ipmi': 3, 'jmx': 4}
if interface['type'] in interfacetypes.keys():
interface['type'] = interfacetypes[interface['type']]
if interface['type'] < 1 or interface['type'] > 4:
module.fail_json(msg="Interface type can only be 1-4 for interface '%s'." % interface)
if 'useip' not in interface:
interface['useip'] = 0
if 'dns' not in interface:
if interface['useip'] == 0:
module.fail_json(msg="dns needs to be set if useip is 0 on interface '%s'." % interface)
interface['dns'] = ''
if 'ip' not in interface:
if interface['useip'] == 1:
module.fail_json(msg="ip needs to be set if useip is 1 on interface '%s'." % interface)
interface['ip'] = ''
if 'main' not in interface:
interface['main'] = 0
if 'port' not in interface:
if interface['type'] == 1:
interface['port'] = "10050"
elif interface['type'] == 2:
interface['port'] = "161"
elif interface['type'] == 3:
interface['port'] = "623"
elif interface['type'] == 4:
interface['port'] = "12345"
if interface['type'] == 1:
ip = interface['ip']
# Use proxy specified, or set to 0
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
# If proxy is not specified as a module parameter, use the existing setting
if proxy is None:
proxy_id = int(zabbix_host_obj['proxy_hostid'])
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not host_groups:
# if host_groups have not been specified when updating an existing host, just
# get the group_ids from the existing host without updating them.
host_groups = host.get_host_groups_by_host_id(host_id)
group_ids = host.get_group_ids_by_group_names(host_groups)
# get existing host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
# if no interfaces were specified with the module, start with an empty list
if not interfaces:
interfaces = []
# When force=no is specified, append existing interfaces to interfaces to update. When
# no interfaces have been specified, copy existing interfaces as specified from the API.
# Do the same with templates and host groups.
if not force or not interfaces:
for interface in copy.deepcopy(exist_interfaces):
# remove values not used during hostinterface.add/update calls
for key in tuple(interface.keys()):
if key in ['interfaceid', 'hostid', 'bulk']:
interface.pop(key, None)
for index in interface.keys():
if index in ['useip', 'main', 'type', 'port']:
interface[index] = int(interface[index])
if interface not in interfaces:
interfaces.append(interface)
if not force or link_templates is None:
template_ids = list(set(template_ids + host.get_host_templates_by_host_id(host_id)))
if not force:
for group_id in host.get_group_ids_by_group_names(host.get_host_groups_by_host_id(host_id)):
if group_id not in group_ids:
group_ids.append(group_id)
# update host
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name,
description, host_name, inventory_mode, inventory_zabbix,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, tls_connect,
ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password):
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name, description, tls_connect, tls_accept,
tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity,
tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password)
host.update_inventory_mode(host_id, inventory_mode)
host.update_inventory_zabbix(host_id, inventory_zabbix)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name, description, tls_connect,
tls_accept, tls_psk_identity, tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege,
ipmi_username, ipmi_password)
host.link_or_clear_template(host_id, template_ids, tls_connect, tls_accept, tls_psk_identity,
tls_psk, tls_issuer, tls_subject, ipmi_authtype, ipmi_privilege, ipmi_username, ipmi_password)
host.update_inventory_mode(host_id, inventory_mode)
host.update_inventory_zabbix(host_id, inventory_zabbix)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
if __name__ == '__main__':
main()
| gpl-3.0 |
jusdng/odoo | openerp/tools/config.py | 178 | 36937 | #openerp.loggers.handlers. -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ConfigParser
import optparse
import os
import sys
import openerp
import openerp.conf
import openerp.loglevels as loglevels
import logging
import openerp.release as release
import appdirs
class MyOption (optparse.Option, object):
""" optparse Option with two additional attributes.
The list of command line options (getopt.Option) is used to create the
list of the configuration file options. When reading the file, and then
reading the command line arguments, we don't want optparse.parse results
to override the configuration file values. But if we provide default
values to optparse, optparse will return them and we can't know if they
were really provided by the user or not. A solution is to not use
optparse's default attribute, but use a custom one (that will be copied
to create the default values of the configuration file).
"""
def __init__(self, *opts, **attrs):
self.my_default = attrs.pop('my_default', None)
super(MyOption, self).__init__(*opts, **attrs)
DEFAULT_LOG_HANDLER = ':INFO'
def _check_ssl():
try:
from OpenSSL import SSL
import socket
return hasattr(socket, 'ssl') and hasattr(SSL, "Connection")
except:
return False
def _get_default_datadir():
home = os.path.expanduser('~')
if os.path.exists(home):
func = appdirs.user_data_dir
else:
if sys.platform in ['win32', 'darwin']:
func = appdirs.site_data_dir
else:
func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower()
# No "version" kwarg as session and filestore paths are shared against series
return func(appname=release.product_name, appauthor=release.author)
def _deduplicate_loggers(loggers):
""" Avoid saving multiple logging levels for the same loggers to a save
file, that just takes space and the list can potentially grow unbounded
if for some odd reason people use :option`odoo.py --save`` all the time.
"""
# dict(iterable) -> the last item of iterable for any given key wins,
# which is what we want and expect. Output order should not matter as
# there are no duplicates within the output sequence
return (
'{}:{}'.format(logger, level)
for logger, level in dict(it.split(':') for it in loggers).iteritems()
)
class configmanager(object):
def __init__(self, fname=None):
"""Constructor.
:param fname: a shortcut allowing to instantiate :class:`configmanager`
from Python code without resorting to environment
variable
"""
# Options not exposed on the command line. Command line options will be added
# from optparse's parser.
self.options = {
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
'reportgz': False,
'root_path': None,
}
# Not exposed in the configuration file.
self.blacklist_for_save = set([
'publisher_warranty_url', 'load_language', 'root_path',
'init', 'save', 'config', 'update', 'stop_after_init'
])
# dictionary mapping option destination (keys in self.options) to MyOptions.
self.casts = {}
self.misc = {}
self.config_file = fname
self.has_ssl = _check_ssl()
self._LOGLEVELS = dict([
(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
])
version = "%s %s" % (release.description, release.version)
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
# Server startup config
group = optparse.OptionGroup(parser, "Common options")
group.add_option("-c", "--config", dest="config", help="specify alternate config file")
group.add_option("-s", "--save", action="store_true", dest="save", default=False,
help="save configuration to ~/.openerp_serverrc")
group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d")
group.add_option("-u", "--update", dest="update",
help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.")
group.add_option("--without-demo", dest="without_demo",
help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default",
my_default=False)
group.add_option("-P", "--import-partial", dest="import_partial", my_default='',
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
group.add_option("--addons-path", dest="addons_path",
help="specify additional addons paths (separated by commas).",
action="callback", callback=self._check_addons_path, nargs=1, type="string")
group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules default=web")
group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(),
help="Directory where to store Odoo data")
parser.add_option_group(group)
# XML-RPC / HTTP
group = optparse.OptionGroup(parser, "XML-RPC Configuration")
group.add_option("--xmlrpc-interface", dest="xmlrpc_interface", my_default='',
help="Specify the TCP IP address for the XML-RPC protocol. The empty string binds to all interfaces.")
group.add_option("--xmlrpc-port", dest="xmlrpc_port", my_default=8069,
help="specify the TCP port for the XML-RPC protocol", type="int")
group.add_option("--no-xmlrpc", dest="xmlrpc", action="store_false", my_default=True,
help="disable the XML-RPC protocol")
group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False,
help="Enable correct behavior when behind a reverse proxy")
group.add_option("--longpolling-port", dest="longpolling_port", my_default=8072,
help="specify the TCP port for longpolling requests", type="int")
parser.add_option_group(group)
# XML-RPC / HTTPS
title = "XML-RPC Secure Configuration"
if not self.has_ssl:
title += " (disabled as ssl is unavailable)"
group = optparse.OptionGroup(parser, title)
group.add_option("--xmlrpcs-interface", dest="xmlrpcs_interface", my_default='',
help="Specify the TCP IP address for the XML-RPC Secure protocol. The empty string binds to all interfaces.")
group.add_option("--xmlrpcs-port", dest="xmlrpcs_port", my_default=8071,
help="specify the TCP port for the XML-RPC Secure protocol", type="int")
group.add_option("--no-xmlrpcs", dest="xmlrpcs", action="store_false", my_default=True,
help="disable the XML-RPC Secure protocol")
group.add_option("--cert-file", dest="secure_cert_file", my_default='server.cert',
help="specify the certificate file for the SSL connection")
group.add_option("--pkey-file", dest="secure_pkey_file", my_default='server.pkey',
help="specify the private key file for the SSL connection")
parser.add_option_group(group)
# WEB
group = optparse.OptionGroup(parser, "Web interface Configuration")
group.add_option("--db-filter", dest="dbfilter", my_default='.*',
help="Filter listed database", metavar="REGEXP")
parser.add_option_group(group)
# Testing Group
group = optparse.OptionGroup(parser, "Testing Configuration")
group.add_option("--test-file", dest="test_file", my_default=False,
help="Launch a python or YML test file.")
group.add_option("--test-report-directory", dest="test_report_directory", my_default=False,
help="If set, will save sample of all reports in this directory.")
group.add_option("--test-enable", action="store_true", dest="test_enable",
my_default=False, help="Enable YAML and unit tests.")
group.add_option("--test-commit", action="store_true", dest="test_commit",
my_default=False, help="Commit database changes performed by YAML or XML tests.")
parser.add_option_group(group)
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--logrotate", dest="logrotate", action="store_true", my_default=False, help="enable logfile rotation")
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "openerp.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
group.add_option('--log-request', action="append_const", dest="log_handler", const="openerp.http.rpc.request:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.request:DEBUG')
group.add_option('--log-response', action="append_const", dest="log_handler", const="openerp.http.rpc.response:DEBUG", help='shortcut for --log-handler=openerp.http.rpc.response:DEBUG')
group.add_option('--log-web', action="append_const", dest="log_handler", const="openerp.http:DEBUG", help='shortcut for --log-handler=openerp.http:DEBUG')
group.add_option('--log-sql', action="append_const", dest="log_handler", const="openerp.sql_db:DEBUG", help='shortcut for --log-handler=openerp.sql_db:DEBUG')
group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False)
group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level")
# For backward-compatibility, map the old log levels to something
# quite close.
levels = [
'info', 'debug_rpc', 'warn', 'test', 'critical',
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset'
]
group.add_option('--log-level', dest='log_level', type='choice',
choices=levels, my_default='info',
help='specify the level of the logging. Accepted values: %s.' % (levels,))
parser.add_option_group(group)
# SMTP Group
group = optparse.OptionGroup(parser, "SMTP Configuration")
group.add_option('--email-from', dest='email_from', my_default=False,
help='specify the SMTP email address for sending email')
group.add_option('--smtp', dest='smtp_server', my_default='localhost',
help='specify the SMTP server for sending email')
group.add_option('--smtp-port', dest='smtp_port', my_default=25,
help='specify the SMTP port', type="int")
group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False,
help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)')
group.add_option('--smtp-user', dest='smtp_user', my_default=False,
help='specify the SMTP username for sending email')
group.add_option('--smtp-password', dest='smtp_password', my_default=False,
help='specify the SMTP password for sending email')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Database related options")
group.add_option("-d", "--database", dest="db_name", my_default=False,
help="specify the database name")
group.add_option("-r", "--db_user", dest="db_user", my_default=False,
help="specify the database user name")
group.add_option("-w", "--db_password", dest="db_password", my_default=False,
help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", my_default=False,
help="specify the database host")
group.add_option("--db_port", dest="db_port", my_default=False,
help="specify the database port", type="int")
group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64,
help="specify the the maximum number of physical connections to posgresql")
group.add_option("--db-template", dest="db_template", my_default="template1",
help="specify a custom database template to create a new database")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Internationalisation options",
"Use these options to translate Odoo to another language."
"See i18n section of the user manual. Option '-d' is mandatory."
"Option '-l' is mandatory in case of importation"
)
group.add_option('--load-language', dest="load_language",
help="specifies the languages for the translations you want to be loaded")
group.add_option('-l', "--language", dest="language",
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
group.add_option("--i18n-export", dest="translate_out",
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
group.add_option("--i18n-import", dest="translate_in",
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False,
help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.")
group.add_option("--modules", dest="translate_modules",
help="specify modules to export. Use in combination with --i18n-export")
parser.add_option_group(group)
security = optparse.OptionGroup(parser, 'Security-related options')
security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True,
help="disable the ability to return the list of databases")
parser.add_option_group(security)
# Advanced options
group = optparse.OptionGroup(parser, "Advanced options")
if os.name == 'posix':
group.add_option('--auto-reload', dest='auto_reload', action='store_true', my_default=False, help='enable auto reload')
group.add_option('--debug', dest='debug_mode', action='store_true', my_default=False, help='enable debug mode')
group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False,
help="stop the server after its initialization")
group.add_option("-t", "--timezone", dest="timezone", my_default=False,
help="specify reference timezone for the server (e.g. Europe/Brussels")
group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=False,
help="Force a limit on the maximum number of records kept in the virtual "
"osv_memory tables. The default is False, which means no count-based limit.",
type="int")
group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=1.0,
help="Force a limit on the maximum age of records kept in the virtual "
"osv_memory tables. This is a decimal value expressed in hours, "
"and the default is 1 hour.",
type="float")
group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2,
help="Maximum number of threads processing concurrently cron jobs (default 2).",
type="int")
group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true",
help="Use the unaccent function provided by the database when available.")
group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLiteCity.dat',
help="Absolute path to the GeoIP database file.")
parser.add_option_group(group)
if os.name == 'posix':
group = optparse.OptionGroup(parser, "Multiprocessing options")
# TODO sensible default for the three following limits.
group.add_option("--workers", dest="workers", my_default=0,
help="Specify the number of workers, 0 disable prefork mode.",
type="int")
group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024,
help="Maximum allowed virtual memory per worker, when reached the worker be reset after the current request (default 671088640 aka 640MB).",
type="int")
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
help="Maximum allowed virtual memory per worker, when reached, any memory allocation will fail (default 805306368 aka 768MB).",
type="int")
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
help="Maximum allowed CPU time per request (default 60).",
type="int")
group.add_option("--limit-time-real", dest="limit_time_real", my_default=120,
help="Maximum allowed Real time per request (default 120).",
type="int")
group.add_option("--limit-request", dest="limit_request", my_default=8192,
help="Maximum number of request to be processed per worker (default 8192).",
type="int")
parser.add_option_group(group)
# Copy all optparse options (i.e. MyOption) into self.options.
for group in parser.option_groups:
for option in group.option_list:
if option.dest not in self.options:
self.options[option.dest] = option.my_default
self.casts[option.dest] = option
# generate default config
self._parse_config()
def parse_config(self, args=None):
""" Parse the configuration file (if any) and the command-line
arguments.
This method initializes openerp.tools.config and openerp.conf (the
former should be removed in the furture) with library-wide
configuration values.
This method must be called before proper usage of this library can be
made.
Typical usage of this method:
openerp.tools.config.parse_config(sys.argv[1:])
"""
self._parse_config(args)
openerp.netsvc.init_logger()
openerp.modules.module.initialize_sys_path()
def _parse_config(self, args=None):
if args is None:
args = []
opt, args = self.parser.parse_args(args)
def die(cond, msg):
if cond:
self.parser.error(msg)
# Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem)
die(args, "unrecognized parameters: '%s'" % " ".join(args))
die(bool(opt.syslog) and bool(opt.logfile),
"the syslog and logfile options are exclusive")
die(opt.translate_in and (not opt.language or not opt.db_name),
"the i18n-import option cannot be used without the language (-l) and the database (-d) options")
die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update),
"the i18n-overwrite option cannot be used without the i18n-import option or without the update option")
die(opt.translate_out and (not opt.db_name),
"the i18n-export option cannot be used without the database (-d) option")
# Check if the config file exists (-c used, but not -s)
die(not opt.save and opt.config and not os.access(opt.config, os.R_OK),
"The config file '%s' selected with -c/--config doesn't exist or is not readable, "\
"use -s/--save if you want to generate it"% opt.config)
# place/search the config file on Win32 near the server installation
# (../etc from the server)
# if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write,
# else he won't be able to save the configurations, or even to start the server...
# TODO use appdirs
if os.name == 'nt':
rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'openerp-server.conf')
else:
rcfilepath = os.path.expanduser('~/.openerp_serverrc')
self.rcfile = os.path.abspath(
self.config_file or opt.config or os.environ.get('OPENERP_SERVER') or rcfilepath)
self.load()
# Verify that we want to log or not, if not the output will go to stdout
if self.options['logfile'] in ('None', 'False'):
self.options['logfile'] = False
# the same for the pidfile
if self.options['pidfile'] in ('None', 'False'):
self.options['pidfile'] = False
# if defined dont take the configfile value even if the defined value is None
keys = ['xmlrpc_interface', 'xmlrpc_port', 'longpolling_port',
'db_name', 'db_user', 'db_password', 'db_host',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password',
'db_maxconn', 'import_partial', 'addons_path',
'xmlrpc', 'syslog', 'without_demo', 'timezone',
'xmlrpcs_interface', 'xmlrpcs_port', 'xmlrpcs',
'secure_cert_file', 'secure_pkey_file', 'dbfilter', 'log_level', 'log_db',
'log_db_level', 'geoip_database',
]
for arg in keys:
# Copy the command-line argument (except the special case for log_handler, due to
# action=append requiring a real default, so we cannot use the my_default workaround)
if getattr(opt, arg):
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
if isinstance(self.options['log_handler'], basestring):
self.options['log_handler'] = self.options['log_handler'].split(',')
self.options['log_handler'].extend(opt.log_handler)
# if defined but None take the configfile value
keys = [
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
'debug_mode', 'smtp_ssl', 'load_language',
'stop_after_init', 'logrotate', 'without_demo', 'xmlrpc', 'syslog',
'list_db', 'xmlrpcs', 'proxy_mode',
'test_file', 'test_enable', 'test_commit', 'test_report_directory',
'osv_memory_count_limit', 'osv_memory_age_limit', 'max_cron_threads', 'unaccent',
'data_dir',
]
posix_keys = [
'auto_reload', 'workers',
'limit_memory_hard', 'limit_memory_soft',
'limit_time_cpu', 'limit_time_real', 'limit_request',
]
if os.name == 'posix':
keys += posix_keys
else:
self.options.update(dict.fromkeys(posix_keys, None))
# Copy the command-line arguments...
for arg in keys:
if getattr(opt, arg) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], basestring) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
self.options['root_path'] = os.path.abspath(os.path.expanduser(os.path.expandvars(os.path.dirname(openerp.__file__))))
if not self.options['addons_path'] or self.options['addons_path']=='None':
default_addons = []
base_addons = os.path.join(self.options['root_path'], 'addons')
if os.path.exists(base_addons):
default_addons.append(base_addons)
main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons'))
if os.path.exists(main_addons):
default_addons.append(main_addons)
self.options['addons_path'] = ','.join(default_addons)
else:
self.options['addons_path'] = ",".join(
os.path.abspath(os.path.expanduser(os.path.expandvars(x)))
for x in self.options['addons_path'].split(','))
self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {}
self.options["demo"] = not opt.without_demo and self.options['init'] or {}
self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {}
self.options['translate_modules'] = opt.translate_modules and map(lambda m: m.strip(), opt.translate_modules.split(',')) or ['all']
self.options['translate_modules'].sort()
# TODO checking the type of the parameters should be done for every
# parameters, not just the timezone.
# The call to get_server_timezone() sets the timezone; this should
# probably done here.
if self.options['timezone']:
# Prevent the timezone to be True. (The config file parsing changes
# the string 'True' to the boolean value True. It would be probably
# be better to remove that conversion.)
die(not isinstance(self.options['timezone'], basestring),
"Invalid timezone value in configuration or environment: %r.\n"
"Please fix this in your configuration." %(self.options['timezone']))
# If an explicit TZ was provided in the config, make sure it is known
try:
import pytz
pytz.timezone(self.options['timezone'])
except pytz.UnknownTimeZoneError:
die(True, "The specified timezone (%s) is invalid" % self.options['timezone'])
except:
# If pytz is missing, don't check the provided TZ, it will be ignored anyway.
pass
if opt.pg_path:
self.options['pg_path'] = opt.pg_path
if self.options.get('language', False):
if len(self.options['language']) > 5:
raise Exception('ERROR: The Lang name must take max 5 chars, Eg: -lfr_BE')
if not self.options['db_user']:
try:
import getpass
self.options['db_user'] = getpass.getuser()
except:
self.options['db_user'] = None
die(not self.options['db_user'], 'ERROR: No user specified for the connection to the database')
if self.options['db_password']:
if sys.platform == 'win32' and not self.options['db_host']:
self.options['db_host'] = 'localhost'
#if self.options['db_host']:
# self._generate_pgpassfile()
if opt.save:
self.save()
openerp.conf.addons_paths = self.options['addons_path'].split(',')
if opt.server_wide_modules:
openerp.conf.server_wide_modules = map(lambda m: m.strip(), opt.server_wide_modules.split(','))
else:
openerp.conf.server_wide_modules = ['web','web_kanban']
def _generate_pgpassfile(self):
"""
Generate the pgpass file with the parameters from the command line (db_host, db_user,
db_password)
Used because pg_dump and pg_restore can not accept the password on the command line.
"""
is_win32 = sys.platform == 'win32'
if is_win32:
filename = os.path.join(os.environ['APPDATA'], 'pgpass.conf')
else:
filename = os.path.join(os.environ['HOME'], '.pgpass')
text_to_add = "%(db_host)s:*:*:%(db_user)s:%(db_password)s" % self.options
if os.path.exists(filename):
content = [x.strip() for x in file(filename, 'r').readlines()]
if text_to_add in content:
return
fp = file(filename, 'a+')
fp.write(text_to_add + "\n")
fp.close()
if is_win32:
try:
import _winreg
except ImportError:
_winreg = None
x=_winreg.ConnectRegistry(None,_winreg.HKEY_LOCAL_MACHINE)
y = _winreg.OpenKey(x, r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", 0,_winreg.KEY_ALL_ACCESS)
_winreg.SetValueEx(y,"PGPASSFILE", 0, _winreg.REG_EXPAND_SZ, filename )
_winreg.CloseKey(y)
_winreg.CloseKey(x)
else:
import stat
os.chmod(filename, stat.S_IRUSR + stat.S_IWUSR)
def _is_addons_path(self, path):
for f in os.listdir(path):
modpath = os.path.join(path, f)
if os.path.isdir(modpath):
def hasfile(filename):
return os.path.isfile(os.path.join(modpath, filename))
if hasfile('__init__.py') and (hasfile('__openerp__.py') or hasfile('__terp__.py')):
return True
return False
def _check_addons_path(self, option, opt, value, parser):
ad_paths = []
for path in value.split(','):
path = path.strip()
res = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
if not self._is_addons_path(res):
raise optparse.OptionValueError("option %s: The addons-path %r does not seem to a be a valid Addons Directory!" % (opt, path))
ad_paths.append(res)
setattr(parser.values, option.dest, ",".join(ad_paths))
def load(self):
p = ConfigParser.ConfigParser()
try:
p.read([self.rcfile])
for (name,value) in p.items('options'):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.options[name] = value
#parse the other sections, as well
for sec in p.sections():
if sec == 'options':
continue
if not self.misc.has_key(sec):
self.misc[sec]= {}
for (name, value) in p.items(sec):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.misc[sec][name] = value
except IOError:
pass
except ConfigParser.NoSectionError:
pass
def save(self):
p = ConfigParser.ConfigParser()
loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS.keys()))
p.add_section('options')
for opt in sorted(self.options.keys()):
if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update'):
continue
if opt in self.blacklist_for_save:
continue
if opt in ('log_level',):
p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt]))
elif opt == 'log_handler':
p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt])))
else:
p.set('options', opt, self.options[opt])
for sec in sorted(self.misc.keys()):
p.add_section(sec)
for opt in sorted(self.misc[sec].keys()):
p.set(sec,opt,self.misc[sec][opt])
# try to create the directories and write the file
try:
rc_exists = os.path.exists(self.rcfile)
if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)):
os.makedirs(os.path.dirname(self.rcfile))
try:
p.write(file(self.rcfile, 'w'))
if not rc_exists:
os.chmod(self.rcfile, 0600)
except IOError:
sys.stderr.write("ERROR: couldn't write the config file\n")
except OSError:
# what to do if impossible?
sys.stderr.write("ERROR: couldn't create the config directory\n")
def get(self, key, default=None):
return self.options.get(key, default)
def get_misc(self, sect, key, default=None):
return self.misc.get(sect,{}).get(key, default)
def __setitem__(self, key, value):
self.options[key] = value
if key in self.options and isinstance(self.options[key], basestring) and \
key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER:
self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key])
def __getitem__(self, key):
return self.options[key]
@property
def addons_data_dir(self):
d = os.path.join(self['data_dir'], 'addons', release.series)
if not os.path.exists(d):
os.makedirs(d, 0700)
else:
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
@property
def session_dir(self):
d = os.path.join(self['data_dir'], 'sessions')
if not os.path.exists(d):
os.makedirs(d, 0700)
else:
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
def filestore(self, dbname):
return os.path.join(self['data_dir'], 'filestore', dbname)
config = configmanager()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
LinErinG/foxsi-smex | pyfoxsi/src/pyfoxsi/response/response.py | 4 | 8272 | """
Response is a module to handle the response of the FOXSI telescopes
"""
from __future__ import absolute_import
import pandas as pd
import numpy as np
import warnings
import os
import matplotlib.pyplot as plt
import astropy.units as u
from scipy import interpolate
import pyfoxsi
import h5py
__all__ = ['Response', 'Material']
class Response(object):
"""An object which provides the FOXSI telescope response
Parameters
----------
shutter_state : int, default 0
A number representing the state of the shutter (0 - no shutter, 1 - thin shutter, 2 - thick shutter)
configuration : int, default 1
Choose the optics configuration
1 : 15 meters
2 : 10 meters 3 modules
3 : 10 meters 2 modules
Examples
--------
>>> from pyfoxsi.response import Response
>>> resp = Response()
>>> resp1 = Response(shutter_state=1)
"""
def __init__(self, shutter_state=0, configuration=1):
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'effective_area_per_module.csv'
effarea_file = os.path.join(path, filename)
optics_effective_area = pd.read_csv(effarea_file, index_col=0, skiprows=4)
optics_effective_area = optics_effective_area[optics_effective_area.columns[configuration-1]]
if configuration == 1:
pyfoxsi.focal_length = 15 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 2:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 3
elif configuration == 3:
pyfoxsi.focal_length = 10 * u.m
pyfoxsi.number_of_telescopes = 2
self.optics_effective_area = pd.DataFrame(dict(total=optics_effective_area.copy(),
module=optics_effective_area.copy()))
# find what shells are missing
#shell_numbers = np.array(self._eff_area_per_shell.columns, np.uint)
#missing_shells = np.setdiff1d(shell_numbers, pyfoxsi.shell_ids)
# remove the missing shells
self.__number_of_telescopes = 1
#for missing_shell in missing_shells:
# self._eff_area_per_shell.drop(str(missing_shell), 1, inplace=True)
# now add the effective area of all of the shells together
#self.optics_effective_area = pd.DataFrame({'module': self._eff_area_per_shell.sum(axis=1), 'total': self._eff_area_per_shell.sum(axis=1)})
self.effective_area = pd.DataFrame(dict(total=self.optics_effective_area['total'].copy(), module=self.optics_effective_area['module'].copy()))
self.number_of_telescopes = pyfoxsi.number_of_telescopes
self._set_default_optical_path()
if shutter_state > 0:
self.__optical_path.append(Material('al', pyfoxsi.shutters_thickness[shutter_state]))
self.__shutter_state = shutter_state
self._add_optical_path_to_effective_area()
def plot(self, axes=None):
"""Plot the effective area"""
if axes is None:
axes = plt.gca()
a = self.effective_area.plot(axes=axes)
axes.set_title(pyfoxsi.mission_title + ' ' + str(self.number_of_telescopes) + 'x ' + 'Shutter State ' + str(self.shutter_state))
axes.set_ylabel('Effective area [cm$^2$]')
axes.set_xlabel('Energy [keV]')
def _set_default_optical_path(self):
self.__optical_path = [Material('mylar', pyfoxsi.blanket_thickness),
Material(pyfoxsi.detector_material, pyfoxsi.detector_thickness)]
@property
def number_of_telescopes(self):
"""The total number of telescope modules"""
return self.__number_of_telescopes
@number_of_telescopes.setter
def number_of_telescopes(self, x):
self.optics_effective_area['total'] = self.optics_effective_area['total'] / self.__number_of_telescopes * x
self.__number_of_telescopes = x
@property
def optical_path(self):
"""The materials in the optical path including the detector"""
return self.__optical_path
@optical_path.setter
def optical_path(self, x):
self.optical_path = x
self._add_optical_path_to_effective_area()
@property
def shutter_state(self):
"""The shutter state, allowed values are 0, 1, 2"""
return self.__shutter_state
@shutter_state.setter
def shutter_state(self, x):
raise AttributeError('Cannot change shutter state. Create new object with desired shutter state')
def _add_optical_path_to_effective_area(self):
"""Add the effect of the optical path to the effective area"""
energies = np.array(self.optics_effective_area.index)
# Remove 10% of flux due to spiders
factor = np.ones_like(energies) * 0.9
# Apply all of the materials in the optical path to factor
for material in self.optical_path:
print(material.name)
if material.name == pyfoxsi.detector_material:
# if it is the detector than we want the absorption
factor *= material.absorption(energies)
else:
factor *= material.transmission(energies)
self.effective_area['factor'] = factor
self.effective_area['total'] = factor * self.optics_effective_area['total']
self.effective_area['module'] = factor * self.optics_effective_area['module']
class Material(object):
"""An object which provides the optical properties of a material in x-rays
Parameters
----------
material : str
A string representing a material (e.g. cdte, be, mylar, si)
thickness : `astropy.units.Quantity`
The thickness of the material in the optical path.
Examples
--------
>>> from pyfoxsi.response import Material
>>> import astropy.units as u
>>> detector = Material('cdte', 500 * u.um)
>>> thermal_blankets = Material('mylar', 0.5 * u.mm)
"""
def __init__(self, material, thickness):
self.name = material
self.thickness = thickness
path = os.path.dirname(pyfoxsi.__file__)
for i in np.arange(3):
path = os.path.dirname(path)
path = os.path.join(path, 'data/')
filename = 'mass_attenuation_coefficient.hdf5'
data_file = os.path.join(path, filename)
h = h5py.File(data_file, 'r')
data = h[self.name]
self._source_data = data
self.density = u.Quantity(self._source_data.attrs['density'], self._source_data.attrs['density unit'])
data_energy_kev = np.log10(self._source_data[0,:] * 1000)
data_attenuation_coeff = np.log10(self._source_data[1,:])
self._f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff, bounds_error=False, fill_value=0.0)
self._mass_attenuation_coefficient_func = lambda x: 10 ** self._f(np.log10(x))
def __repr__(self):
"""Returns a human-readable representation."""
return '<Material ' + str(self.name) + ' ' + str(self.thickness) + '>'
def transmission(self, energy):
"""Provide the transmission fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV
"""
coefficients = self._mass_attenuation_coefficient_func(energy) * u.cm ** 2 / u.gram
transmission = np.exp(- coefficients * self.density * self.thickness)
return transmission
def absorption(self, energy):
"""Provides the absorption fraction (0 to 1).
Parameters
----------
energy : `astropy.units.Quantity`
An array of energies in keV.
"""
return 1 - self.transmission(energy)
def plot(self, axes=None):
if axes is None:
axes = plt.gca()
energies = np.arange(1, 60)
axes.plot(energies, self.transmission(energies), label='Transmission')
axes.plot(energies, self.absorption(energies), label='Absorption')
axes.set_ylim(0, 1.2)
axes.legend()
axes.set_title(self.name + ' ' + str(self.thickness))
axes.set_xlabel('Energy [keV]')
| mit |
chirilo/kuma | kuma/users/tests/test_adapters.py | 13 | 4706 | from nose.plugins.attrib import attr
from nose.tools import eq_, ok_
from django.contrib import messages as django_messages
from django.test import RequestFactory
from allauth.exceptions import ImmediateHttpResponse
from allauth.socialaccount.models import SocialLogin, SocialAccount
from kuma.core.urlresolvers import reverse
from kuma.users.adapters import KumaSocialAccountAdapter, KumaAccountAdapter
from . import UserTestCase
class KumaSocialAccountAdapterTestCase(UserTestCase):
rf = RequestFactory()
def setUp(self):
""" extra setUp to make a working session """
super(KumaSocialAccountAdapterTestCase, self).setUp()
self.adapter = KumaSocialAccountAdapter()
@attr('bug1055870')
def test_pre_social_login_overwrites_session_var(self):
""" https://bugzil.la/1055870 """
# Set up a pre-existing GitHub sign-in session
request = self.rf.get('/')
session = self.client.session
session['sociallogin_provider'] = 'github'
session.save()
request.session = session
# Set up a Persona SocialLogin
account = SocialAccount.objects.get(user__username='testuser')
sociallogin = SocialLogin(account=account)
# Verify the social_login receiver over-writes the provider
# stored in the session
self.adapter.pre_social_login(request, sociallogin)
eq_(account.provider,
request.session['sociallogin_provider'],
"receiver should have over-written sociallogin_provider "
"session variable")
@attr('bug1063830')
def test_pre_social_login_error_for_unmatched_login(self):
""" https://bugzil.la/1063830 """
# Set up a GitHub SocialLogin in the session
github_account = SocialAccount.objects.get(user__username='testuser2')
github_login = SocialLogin(account=github_account,
user=github_account.user)
request = self.rf.get('/')
session = self.client.session
session['socialaccount_sociallogin'] = github_login.serialize()
session.save()
request.session = session
messages = self.get_messages(request)
# Set up an un-matching Persona SocialLogin for request
persona_account = SocialAccount(user=self.user_model(),
provider='persona',
uid='noone@inexistant.com')
persona_login = SocialLogin(account=persona_account)
self.assertRaises(ImmediateHttpResponse,
self.adapter.pre_social_login, request, persona_login)
queued_messages = list(messages)
eq_(len(queued_messages), 1)
eq_(django_messages.ERROR, queued_messages[0].level)
class KumaAccountAdapterTestCase(UserTestCase):
localizing_client = True
rf = RequestFactory()
def setUp(self):
""" extra setUp to make a working session """
super(KumaAccountAdapterTestCase, self).setUp()
self.adapter = KumaAccountAdapter()
@attr('bug1054461')
def test_account_connected_message(self):
""" https://bugzil.la/1054461 """
message_template = 'socialaccount/messages/account_connected.txt'
request = self.rf.get('/')
# first check for the case in which the next url in the account
# connection process is the frontpage, there shouldn't be a message
session = self.client.session
session['sociallogin_next_url'] = '/'
session.save()
request.session = session
request.user = self.user_model.objects.get(username='testuser')
request.locale = 'en-US'
messages = self.get_messages(request)
self.adapter.add_message(request, django_messages.INFO,
message_template)
eq_(len(messages), 0)
# secondly check for the case in which the next url in the connection
# process is the profile edit page, there should be a message
session = self.client.session
next_url = reverse('users.user_edit',
kwargs={'username': request.user.username},
locale=request.locale)
session['sociallogin_next_url'] = next_url
session.save()
request.session = session
messages = self.get_messages(request)
self.adapter.add_message(request, django_messages.INFO,
message_template)
queued_messages = list(messages)
eq_(len(queued_messages), 1)
eq_(django_messages.SUCCESS, queued_messages[0].level)
ok_('connected' in queued_messages[0].message)
| mpl-2.0 |
hyperized/ansible | lib/ansible/modules/network/ios/ios_lacp.py | 1 | 4046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for ios_lacp
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: ios_lacp
version_added: 2.9
short_description: Manage Global Link Aggregation Control Protocol (LACP) on Cisco IOS devices.
description: This module provides declarative management of Global LACP on Cisco IOS network devices.
author: Sumit Jaiswal (@justjais)
notes:
- Tested against Cisco IOSv Version 15.2 on VIRL
- This module works with connection C(network_cli),
See L(IOS Platform Options,../network/user_guide/platform_ios.html).
options:
config:
description: The provided configurations.
type: dict
suboptions:
system:
description: This option sets the default system parameters for LACP.
type: dict
suboptions:
priority:
description:
- LACP priority for the system.
- Refer to vendor documentation for valid values.
type: int
required: True
state:
description:
- The state of the configuration after module completion
type: str
choices:
- merged
- replaced
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 32768, 5e00.0000.8000
- name: Merge provided configuration with device configuration
ios_lacp:
config:
system:
priority: 123
state: merged
# After state:
# ------------
#
# vios#show lacp sys-id
# 123, 5e00.0000.8000
# Using replaced
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 500, 5e00.0000.8000
- name: Replaces Global LACP configuration
ios_lacp:
config:
system:
priority: 123
state: replaced
# After state:
# ------------
#
# vios#show lacp sys-id
# 123, 5e00.0000.8000
# Using Deleted
#
# Before state:
# -------------
#
# vios#show lacp sys-id
# 500, 5e00.0000.8000
- name: Delete Global LACP attribute
ios_lacp:
state: deleted
# After state:
# -------------
#
# vios#show lacp sys-id
# 32768, 5e00.0000.8000
"""
RETURN = """
before:
description: The configuration as structured data prior to module invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The configuration as structured data after module completion.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['lacp system-priority 10']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.ios.argspec.lacp.lacp import LacpArgs
from ansible.module_utils.network.ios.config.lacp.lacp import Lacp
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=LacpArgs.argument_spec,
supports_check_mode=True)
result = Lacp(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
vv1133/home_web | tests/urlpatterns_reverse/views.py | 64 | 1511 | from functools import partial, update_wrapper
from django.http import HttpResponse
from django.views.generic import RedirectView
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import user_passes_test
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = "Can I be a view? Pleeeease?"
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| bsd-3-clause |
havt/odoo | addons/account/wizard/account_statement_from_invoice.py | 224 | 4128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_statement_from_invoice_lines(osv.osv_memory):
"""
Generate Entries by Statement from Invoices
"""
_name = "account.statement.from.invoice.lines"
_description = "Entries by Statement from Invoices"
_columns = {
'line_ids': fields.many2many('account.move.line', 'account_move_line_relation', 'move_id', 'line_id', 'Invoices'),
}
def populate_statement(self, cr, uid, ids, context=None):
context = dict(context or {})
statement_id = context.get('statement_id', False)
if not statement_id:
return {'type': 'ir.actions.act_window_close'}
data = self.read(cr, uid, ids, context=context)[0]
line_ids = data['line_ids']
if not line_ids:
return {'type': 'ir.actions.act_window_close'}
line_obj = self.pool.get('account.move.line')
statement_obj = self.pool.get('account.bank.statement')
statement_line_obj = self.pool.get('account.bank.statement.line')
currency_obj = self.pool.get('res.currency')
statement = statement_obj.browse(cr, uid, statement_id, context=context)
line_date = statement.date
# for each selected move lines
for line in line_obj.browse(cr, uid, line_ids, context=context):
ctx = context.copy()
# take the date for computation of currency => use payment date
ctx['date'] = line_date
amount = 0.0
if line.debit > 0:
amount = line.debit
elif line.credit > 0:
amount = -line.credit
if line.amount_currency:
if line.company_id.currency_id.id != statement.currency.id:
# In the specific case where the company currency and the statement currency are the same
# the debit/credit field already contains the amount in the right currency.
# We therefore avoid to re-convert the amount in the currency, to prevent Gain/loss exchanges
amount = currency_obj.compute(cr, uid, line.currency_id.id,
statement.currency.id, line.amount_currency, context=ctx)
elif (line.invoice and line.invoice.currency_id.id != statement.currency.id):
amount = currency_obj.compute(cr, uid, line.invoice.currency_id.id,
statement.currency.id, amount, context=ctx)
context.update({'move_line_ids': [line.id],
'invoice_id': line.invoice.id})
statement_line_obj.create(cr, uid, {
'name': line.name or '?',
'amount': amount,
'partner_id': line.partner_id.id,
'statement_id': statement_id,
'ref': line.ref,
'date': statement.date,
'amount_currency': line.amount_currency,
'currency_id': line.currency_id.id,
}, context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mikewiebe-ansible/ansible | lib/ansible/module_utils/xenserver.py | 14 | 30814 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, Bojan Vitnik <bvitnik@mainstream.rs>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import atexit
import time
import re
import traceback
XENAPI_IMP_ERR = None
try:
import XenAPI
HAS_XENAPI = True
except ImportError:
HAS_XENAPI = False
XENAPI_IMP_ERR = traceback.format_exc()
from ansible.module_utils.basic import env_fallback, missing_required_lib
from ansible.module_utils.common.network import is_mac
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
def xenserver_common_argument_spec():
return dict(
hostname=dict(type='str',
aliases=['host', 'pool'],
required=False,
default='localhost',
fallback=(env_fallback, ['XENSERVER_HOST']),
),
username=dict(type='str',
aliases=['user', 'admin'],
required=False,
default='root',
fallback=(env_fallback, ['XENSERVER_USER'])),
password=dict(type='str',
aliases=['pass', 'pwd'],
required=False,
no_log=True,
fallback=(env_fallback, ['XENSERVER_PASSWORD'])),
validate_certs=dict(type='bool',
required=False,
default=True,
fallback=(env_fallback, ['XENSERVER_VALIDATE_CERTS'])),
)
def xapi_to_module_vm_power_state(power_state):
"""Maps XAPI VM power states to module VM power states."""
module_power_state_map = {
"running": "poweredon",
"halted": "poweredoff",
"suspended": "suspended",
"paused": "paused"
}
return module_power_state_map.get(power_state)
def module_to_xapi_vm_power_state(power_state):
"""Maps module VM power states to XAPI VM power states."""
vm_power_state_map = {
"poweredon": "running",
"poweredoff": "halted",
"restarted": "running",
"suspended": "suspended",
"shutdownguest": "halted",
"rebootguest": "running",
}
return vm_power_state_map.get(power_state)
def is_valid_ip_addr(ip_addr):
"""Validates given string as IPv4 address for given string.
Args:
ip_addr (str): string to validate as IPv4 address.
Returns:
bool: True if string is valid IPv4 address, else False.
"""
ip_addr_split = ip_addr.split('.')
if len(ip_addr_split) != 4:
return False
for ip_addr_octet in ip_addr_split:
if not ip_addr_octet.isdigit():
return False
ip_addr_octet_int = int(ip_addr_octet)
if ip_addr_octet_int < 0 or ip_addr_octet_int > 255:
return False
return True
def is_valid_ip_netmask(ip_netmask):
"""Validates given string as IPv4 netmask.
Args:
ip_netmask (str): string to validate as IPv4 netmask.
Returns:
bool: True if string is valid IPv4 netmask, else False.
"""
ip_netmask_split = ip_netmask.split('.')
if len(ip_netmask_split) != 4:
return False
valid_octet_values = ['0', '128', '192', '224', '240', '248', '252', '254', '255']
for ip_netmask_octet in ip_netmask_split:
if ip_netmask_octet not in valid_octet_values:
return False
if ip_netmask_split[0] != '255' and (ip_netmask_split[1] != '0' or ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
return False
elif ip_netmask_split[1] != '255' and (ip_netmask_split[2] != '0' or ip_netmask_split[3] != '0'):
return False
elif ip_netmask_split[2] != '255' and ip_netmask_split[3] != '0':
return False
return True
def is_valid_ip_prefix(ip_prefix):
"""Validates given string as IPv4 prefix.
Args:
ip_prefix (str): string to validate as IPv4 prefix.
Returns:
bool: True if string is valid IPv4 prefix, else False.
"""
if not ip_prefix.isdigit():
return False
ip_prefix_int = int(ip_prefix)
if ip_prefix_int < 0 or ip_prefix_int > 32:
return False
return True
def ip_prefix_to_netmask(ip_prefix, skip_check=False):
"""Converts IPv4 prefix to netmask.
Args:
ip_prefix (str): IPv4 prefix to convert.
skip_check (bool): Skip validation of IPv4 prefix
(default: False). Use if you are sure IPv4 prefix is valid.
Returns:
str: IPv4 netmask equivalent to given IPv4 prefix if
IPv4 prefix is valid, else an empty string.
"""
if skip_check:
ip_prefix_valid = True
else:
ip_prefix_valid = is_valid_ip_prefix(ip_prefix)
if ip_prefix_valid:
return '.'.join([str((0xffffffff << (32 - int(ip_prefix)) >> i) & 0xff) for i in [24, 16, 8, 0]])
else:
return ""
def ip_netmask_to_prefix(ip_netmask, skip_check=False):
"""Converts IPv4 netmask to prefix.
Args:
ip_netmask (str): IPv4 netmask to convert.
skip_check (bool): Skip validation of IPv4 netmask
(default: False). Use if you are sure IPv4 netmask is valid.
Returns:
str: IPv4 prefix equivalent to given IPv4 netmask if
IPv4 netmask is valid, else an empty string.
"""
if skip_check:
ip_netmask_valid = True
else:
ip_netmask_valid = is_valid_ip_netmask(ip_netmask)
if ip_netmask_valid:
return str(sum([bin(int(i)).count("1") for i in ip_netmask.split(".")]))
else:
return ""
def is_valid_ip6_addr(ip6_addr):
"""Validates given string as IPv6 address.
Args:
ip6_addr (str): string to validate as IPv6 address.
Returns:
bool: True if string is valid IPv6 address, else False.
"""
ip6_addr = ip6_addr.lower()
ip6_addr_split = ip6_addr.split(':')
if ip6_addr_split[0] == "":
ip6_addr_split.pop(0)
if ip6_addr_split[-1] == "":
ip6_addr_split.pop(-1)
if len(ip6_addr_split) > 8:
return False
if ip6_addr_split.count("") > 1:
return False
elif ip6_addr_split.count("") == 1:
ip6_addr_split.remove("")
else:
if len(ip6_addr_split) != 8:
return False
ip6_addr_hextet_regex = re.compile('^[0-9a-f]{1,4}$')
for ip6_addr_hextet in ip6_addr_split:
if not bool(ip6_addr_hextet_regex.match(ip6_addr_hextet)):
return False
return True
def is_valid_ip6_prefix(ip6_prefix):
"""Validates given string as IPv6 prefix.
Args:
ip6_prefix (str): string to validate as IPv6 prefix.
Returns:
bool: True if string is valid IPv6 prefix, else False.
"""
if not ip6_prefix.isdigit():
return False
ip6_prefix_int = int(ip6_prefix)
if ip6_prefix_int < 0 or ip6_prefix_int > 128:
return False
return True
def get_object_ref(module, name, uuid=None, obj_type="VM", fail=True, msg_prefix=""):
"""Finds and returns a reference to arbitrary XAPI object.
An object is searched by using either name (name_label) or UUID
with UUID taken precedence over name.
Args:
module: Reference to Ansible module object.
name (str): Name (name_label) of an object to search for.
uuid (str): UUID of an object to search for.
obj_type (str): Any valid XAPI object type. See XAPI docs.
fail (bool): Should function fail with error message if object
is not found or exit silently (default: True). The function
always fails if multiple objects with same name are found.
msg_prefix (str): A string error messages should be prefixed
with (default: "").
Returns:
XAPI reference to found object or None if object is not found
and fail=False.
"""
xapi_session = XAPI.connect(module)
if obj_type in ["template", "snapshot"]:
real_obj_type = "VM"
elif obj_type == "home server":
real_obj_type = "host"
elif obj_type == "ISO image":
real_obj_type = "VDI"
else:
real_obj_type = obj_type
obj_ref = None
# UUID has precedence over name.
if uuid:
try:
# Find object by UUID. If no object is found using given UUID,
# an exception will be generated.
obj_ref = xapi_session.xenapi_request("%s.get_by_uuid" % real_obj_type, (uuid,))
except XenAPI.Failure as f:
if fail:
module.fail_json(msg="%s%s with UUID '%s' not found!" % (msg_prefix, obj_type, uuid))
elif name:
try:
# Find object by name (name_label).
obj_ref_list = xapi_session.xenapi_request("%s.get_by_name_label" % real_obj_type, (name,))
except XenAPI.Failure as f:
module.fail_json(msg="XAPI ERROR: %s" % f.details)
# If obj_ref_list is empty.
if not obj_ref_list:
if fail:
module.fail_json(msg="%s%s with name '%s' not found!" % (msg_prefix, obj_type, name))
# If obj_ref_list contains multiple object references.
elif len(obj_ref_list) > 1:
module.fail_json(msg="%smultiple %ss with name '%s' found! Please use UUID." % (msg_prefix, obj_type, name))
# The obj_ref_list contains only one object reference.
else:
obj_ref = obj_ref_list[0]
else:
module.fail_json(msg="%sno valid name or UUID supplied for %s!" % (msg_prefix, obj_type))
return obj_ref
def gather_vm_params(module, vm_ref):
"""Gathers all VM parameters available in XAPI database.
Args:
module: Reference to Ansible module object.
vm_ref (str): XAPI reference to VM.
Returns:
dict: VM parameters.
"""
# We silently return empty vm_params if bad vm_ref was supplied.
if not vm_ref or vm_ref == "OpaqueRef:NULL":
return {}
xapi_session = XAPI.connect(module)
try:
vm_params = xapi_session.xenapi.VM.get_record(vm_ref)
# We need some params like affinity, VBDs, VIFs, VDIs etc. dereferenced.
# Affinity.
if vm_params['affinity'] != "OpaqueRef:NULL":
vm_affinity = xapi_session.xenapi.host.get_record(vm_params['affinity'])
vm_params['affinity'] = vm_affinity
else:
vm_params['affinity'] = {}
# VBDs.
vm_vbd_params_list = [xapi_session.xenapi.VBD.get_record(vm_vbd_ref) for vm_vbd_ref in vm_params['VBDs']]
# List of VBDs is usually sorted by userdevice but we sort just
# in case. We need this list sorted by userdevice so that we can
# make positional pairing with module.params['disks'].
vm_vbd_params_list = sorted(vm_vbd_params_list, key=lambda vm_vbd_params: int(vm_vbd_params['userdevice']))
vm_params['VBDs'] = vm_vbd_params_list
# VDIs.
for vm_vbd_params in vm_params['VBDs']:
if vm_vbd_params['VDI'] != "OpaqueRef:NULL":
vm_vdi_params = xapi_session.xenapi.VDI.get_record(vm_vbd_params['VDI'])
else:
vm_vdi_params = {}
vm_vbd_params['VDI'] = vm_vdi_params
# VIFs.
vm_vif_params_list = [xapi_session.xenapi.VIF.get_record(vm_vif_ref) for vm_vif_ref in vm_params['VIFs']]
# List of VIFs is usually sorted by device but we sort just
# in case. We need this list sorted by device so that we can
# make positional pairing with module.params['networks'].
vm_vif_params_list = sorted(vm_vif_params_list, key=lambda vm_vif_params: int(vm_vif_params['device']))
vm_params['VIFs'] = vm_vif_params_list
# Networks.
for vm_vif_params in vm_params['VIFs']:
if vm_vif_params['network'] != "OpaqueRef:NULL":
vm_network_params = xapi_session.xenapi.network.get_record(vm_vif_params['network'])
else:
vm_network_params = {}
vm_vif_params['network'] = vm_network_params
# Guest metrics.
if vm_params['guest_metrics'] != "OpaqueRef:NULL":
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_params['guest_metrics'])
vm_params['guest_metrics'] = vm_guest_metrics
else:
vm_params['guest_metrics'] = {}
# Detect customization agent.
xenserver_version = get_xenserver_version(module)
if (xenserver_version[0] >= 7 and xenserver_version[1] >= 0 and vm_params.get('guest_metrics') and
"feature-static-ip-setting" in vm_params['guest_metrics']['other']):
vm_params['customization_agent'] = "native"
else:
vm_params['customization_agent'] = "custom"
except XenAPI.Failure as f:
module.fail_json(msg="XAPI ERROR: %s" % f.details)
return vm_params
def gather_vm_facts(module, vm_params):
"""Gathers VM facts.
Args:
module: Reference to Ansible module object.
vm_params (dict): A dictionary with VM parameters as returned
by gather_vm_params() function.
Returns:
dict: VM facts.
"""
# We silently return empty vm_facts if no vm_params are available.
if not vm_params:
return {}
xapi_session = XAPI.connect(module)
# Gather facts.
vm_facts = {
"state": xapi_to_module_vm_power_state(vm_params['power_state'].lower()),
"name": vm_params['name_label'],
"name_desc": vm_params['name_description'],
"uuid": vm_params['uuid'],
"is_template": vm_params['is_a_template'],
"folder": vm_params['other_config'].get('folder', ''),
"hardware": {
"num_cpus": int(vm_params['VCPUs_max']),
"num_cpu_cores_per_socket": int(vm_params['platform'].get('cores-per-socket', '1')),
"memory_mb": int(int(vm_params['memory_dynamic_max']) / 1048576),
},
"disks": [],
"cdrom": {},
"networks": [],
"home_server": vm_params['affinity'].get('name_label', ''),
"domid": vm_params['domid'],
"platform": vm_params['platform'],
"other_config": vm_params['other_config'],
"xenstore_data": vm_params['xenstore_data'],
"customization_agent": vm_params['customization_agent'],
}
for vm_vbd_params in vm_params['VBDs']:
if vm_vbd_params['type'] == "Disk":
vm_disk_sr_params = xapi_session.xenapi.SR.get_record(vm_vbd_params['VDI']['SR'])
vm_disk_params = {
"size": int(vm_vbd_params['VDI']['virtual_size']),
"name": vm_vbd_params['VDI']['name_label'],
"name_desc": vm_vbd_params['VDI']['name_description'],
"sr": vm_disk_sr_params['name_label'],
"sr_uuid": vm_disk_sr_params['uuid'],
"os_device": vm_vbd_params['device'],
"vbd_userdevice": vm_vbd_params['userdevice'],
}
vm_facts['disks'].append(vm_disk_params)
elif vm_vbd_params['type'] == "CD":
if vm_vbd_params['empty']:
vm_facts['cdrom'].update(type="none")
else:
vm_facts['cdrom'].update(type="iso")
vm_facts['cdrom'].update(iso_name=vm_vbd_params['VDI']['name_label'])
for vm_vif_params in vm_params['VIFs']:
vm_guest_metrics_networks = vm_params['guest_metrics'].get('networks', {})
vm_network_params = {
"name": vm_vif_params['network']['name_label'],
"mac": vm_vif_params['MAC'],
"vif_device": vm_vif_params['device'],
"mtu": vm_vif_params['MTU'],
"ip": vm_guest_metrics_networks.get("%s/ip" % vm_vif_params['device'], ''),
"prefix": "",
"netmask": "",
"gateway": "",
"ip6": [vm_guest_metrics_networks[ipv6] for ipv6 in sorted(vm_guest_metrics_networks.keys()) if ipv6.startswith("%s/ipv6/" %
vm_vif_params['device'])],
"prefix6": "",
"gateway6": "",
}
if vm_params['customization_agent'] == "native":
if vm_vif_params['ipv4_addresses'] and vm_vif_params['ipv4_addresses'][0]:
vm_network_params['prefix'] = vm_vif_params['ipv4_addresses'][0].split('/')[1]
vm_network_params['netmask'] = ip_prefix_to_netmask(vm_network_params['prefix'])
vm_network_params['gateway'] = vm_vif_params['ipv4_gateway']
if vm_vif_params['ipv6_addresses'] and vm_vif_params['ipv6_addresses'][0]:
vm_network_params['prefix6'] = vm_vif_params['ipv6_addresses'][0].split('/')[1]
vm_network_params['gateway6'] = vm_vif_params['ipv6_gateway']
elif vm_params['customization_agent'] == "custom":
vm_xenstore_data = vm_params['xenstore_data']
for f in ['prefix', 'netmask', 'gateway', 'prefix6', 'gateway6']:
vm_network_params[f] = vm_xenstore_data.get("vm-data/networks/%s/%s" % (vm_vif_params['device'], f), "")
vm_facts['networks'].append(vm_network_params)
return vm_facts
def set_vm_power_state(module, vm_ref, power_state, timeout=300):
"""Controls VM power state.
Args:
module: Reference to Ansible module object.
vm_ref (str): XAPI reference to VM.
power_state (str): Power state to put VM into. Accepted values:
- poweredon
- poweredoff
- restarted
- suspended
- shutdownguest
- rebootguest
timeout (int): timeout in seconds (default: 300).
Returns:
tuple (bool, str): Bool element is True if VM power state has
changed by calling this function, else False. Str element carries
a value of resulting power state as defined by XAPI - 'running',
'halted' or 'suspended'.
"""
# Fail if we don't have a valid VM reference.
if not vm_ref or vm_ref == "OpaqueRef:NULL":
module.fail_json(msg="Cannot set VM power state. Invalid VM reference supplied!")
xapi_session = XAPI.connect(module)
power_state = power_state.replace('_', '').replace('-', '').lower()
vm_power_state_resulting = module_to_xapi_vm_power_state(power_state)
state_changed = False
try:
# Get current state of the VM.
vm_power_state_current = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
if vm_power_state_current != power_state:
if power_state == "poweredon":
if not module.check_mode:
# VM can be in either halted, suspended, paused or running state.
# For VM to be in running state, start has to be called on halted,
# resume on suspended and unpause on paused VM.
if vm_power_state_current == "poweredoff":
xapi_session.xenapi.VM.start(vm_ref, False, False)
elif vm_power_state_current == "suspended":
xapi_session.xenapi.VM.resume(vm_ref, False, False)
elif vm_power_state_current == "paused":
xapi_session.xenapi.VM.unpause(vm_ref)
elif power_state == "poweredoff":
if not module.check_mode:
# hard_shutdown will halt VM regardless of current state.
xapi_session.xenapi.VM.hard_shutdown(vm_ref)
elif power_state == "restarted":
# hard_reboot will restart VM only if VM is in paused or running state.
if vm_power_state_current in ["paused", "poweredon"]:
if not module.check_mode:
xapi_session.xenapi.VM.hard_reboot(vm_ref)
else:
module.fail_json(msg="Cannot restart VM in state '%s'!" % vm_power_state_current)
elif power_state == "suspended":
# running state is required for suspend.
if vm_power_state_current == "poweredon":
if not module.check_mode:
xapi_session.xenapi.VM.suspend(vm_ref)
else:
module.fail_json(msg="Cannot suspend VM in state '%s'!" % vm_power_state_current)
elif power_state == "shutdownguest":
# running state is required for guest shutdown.
if vm_power_state_current == "poweredon":
if not module.check_mode:
if timeout == 0:
xapi_session.xenapi.VM.clean_shutdown(vm_ref)
else:
task_ref = xapi_session.xenapi.Async.VM.clean_shutdown(vm_ref)
task_result = wait_for_task(module, task_ref, timeout)
if task_result:
module.fail_json(msg="Guest shutdown task failed: '%s'!" % task_result)
else:
module.fail_json(msg="Cannot shutdown guest when VM is in state '%s'!" % vm_power_state_current)
elif power_state == "rebootguest":
# running state is required for guest reboot.
if vm_power_state_current == "poweredon":
if not module.check_mode:
if timeout == 0:
xapi_session.xenapi.VM.clean_reboot(vm_ref)
else:
task_ref = xapi_session.xenapi.Async.VM.clean_reboot(vm_ref)
task_result = wait_for_task(module, task_ref, timeout)
if task_result:
module.fail_json(msg="Guest reboot task failed: '%s'!" % task_result)
else:
module.fail_json(msg="Cannot reboot guest when VM is in state '%s'!" % vm_power_state_current)
else:
module.fail_json(msg="Requested VM power state '%s' is unsupported!" % power_state)
state_changed = True
except XenAPI.Failure as f:
module.fail_json(msg="XAPI ERROR: %s" % f.details)
return (state_changed, vm_power_state_resulting)
def wait_for_task(module, task_ref, timeout=300):
"""Waits for async XAPI task to finish.
Args:
module: Reference to Ansible module object.
task_ref (str): XAPI reference to task.
timeout (int): timeout in seconds (default: 300).
Returns:
str: failure message on failure, else an empty string.
"""
# Fail if we don't have a valid task reference.
if not task_ref or task_ref == "OpaqueRef:NULL":
module.fail_json(msg="Cannot wait for task. Invalid task reference supplied!")
xapi_session = XAPI.connect(module)
interval = 2
result = ""
# If we have to wait indefinitely, make time_left larger than 0 so we can
# enter while loop.
if timeout == 0:
time_left = 1
else:
time_left = timeout
try:
while time_left > 0:
task_status = xapi_session.xenapi.task.get_status(task_ref).lower()
if task_status == "pending":
# Task is still running.
time.sleep(interval)
# We decrease time_left only if we don't wait indefinitely.
if timeout != 0:
time_left -= interval
continue
elif task_status == "success":
# Task is done.
break
else:
# Task failed.
result = task_status
break
else:
# We timed out.
result = "timeout"
xapi_session.xenapi.task.destroy(task_ref)
except XenAPI.Failure as f:
module.fail_json(msg="XAPI ERROR: %s" % f.details)
return result
def wait_for_vm_ip_address(module, vm_ref, timeout=300):
"""Waits for VM to acquire an IP address.
Args:
module: Reference to Ansible module object.
vm_ref (str): XAPI reference to VM.
timeout (int): timeout in seconds (default: 300).
Returns:
dict: VM guest metrics as retrieved by
VM_guest_metrics.get_record() XAPI method with info
on IP address acquired.
"""
# Fail if we don't have a valid VM reference.
if not vm_ref or vm_ref == "OpaqueRef:NULL":
module.fail_json(msg="Cannot wait for VM IP address. Invalid VM reference supplied!")
xapi_session = XAPI.connect(module)
vm_guest_metrics = {}
try:
# We translate VM power state string so that error message can be
# consistent with module VM power states.
vm_power_state = xapi_to_module_vm_power_state(xapi_session.xenapi.VM.get_power_state(vm_ref).lower())
if vm_power_state != 'poweredon':
module.fail_json(msg="Cannot wait for VM IP address when VM is in state '%s'!" % vm_power_state)
interval = 2
# If we have to wait indefinitely, make time_left larger than 0 so we can
# enter while loop.
if timeout == 0:
time_left = 1
else:
time_left = timeout
while time_left > 0:
vm_guest_metrics_ref = xapi_session.xenapi.VM.get_guest_metrics(vm_ref)
if vm_guest_metrics_ref != "OpaqueRef:NULL":
vm_guest_metrics = xapi_session.xenapi.VM_guest_metrics.get_record(vm_guest_metrics_ref)
vm_ips = vm_guest_metrics['networks']
if "0/ip" in vm_ips:
break
time.sleep(interval)
# We decrease time_left only if we don't wait indefinitely.
if timeout != 0:
time_left -= interval
else:
# We timed out.
module.fail_json(msg="Timed out waiting for VM IP address!")
except XenAPI.Failure as f:
module.fail_json(msg="XAPI ERROR: %s" % f.details)
return vm_guest_metrics
def get_xenserver_version(module):
"""Returns XenServer version.
Args:
module: Reference to Ansible module object.
Returns:
list: Element [0] is major version. Element [1] is minor version.
Element [2] is update number.
"""
xapi_session = XAPI.connect(module)
host_ref = xapi_session.xenapi.session.get_this_host(xapi_session._session)
try:
xenserver_version = [int(version_number) for version_number in xapi_session.xenapi.host.get_software_version(host_ref)['product_version'].split('.')]
except ValueError:
xenserver_version = [0, 0, 0]
return xenserver_version
class XAPI(object):
"""Class for XAPI session management."""
_xapi_session = None
@classmethod
def connect(cls, module, disconnect_atexit=True):
"""Establishes XAPI connection and returns session reference.
If no existing session is available, establishes a new one
and returns it, else returns existing one.
Args:
module: Reference to Ansible module object.
disconnect_atexit (bool): Controls if method should
register atexit handler to disconnect from XenServer
on module exit (default: True).
Returns:
XAPI session reference.
"""
if cls._xapi_session is not None:
return cls._xapi_session
hostname = module.params['hostname']
username = module.params['username']
password = module.params['password']
ignore_ssl = not module.params['validate_certs']
if hostname == 'localhost':
cls._xapi_session = XenAPI.xapi_local()
username = ''
password = ''
else:
# If scheme is not specified we default to http:// because https://
# is problematic in most setups.
if not hostname.startswith("http://") and not hostname.startswith("https://"):
hostname = "http://%s" % hostname
try:
# ignore_ssl is supported in XenAPI library from XenServer 7.2
# SDK onward but there is no way to tell which version we
# are using. TypeError will be raised if ignore_ssl is not
# supported. Additionally, ignore_ssl requires Python 2.7.9
# or newer.
cls._xapi_session = XenAPI.Session(hostname, ignore_ssl=ignore_ssl)
except TypeError:
# Try without ignore_ssl.
cls._xapi_session = XenAPI.Session(hostname)
if not password:
password = ''
try:
cls._xapi_session.login_with_password(username, password, ANSIBLE_VERSION, 'Ansible')
except XenAPI.Failure as f:
module.fail_json(msg="Unable to log on to XenServer at %s as %s: %s" % (hostname, username, f.details))
# Disabling atexit should be used in special cases only.
if disconnect_atexit:
atexit.register(cls._xapi_session.logout)
return cls._xapi_session
class XenServerObject(object):
"""Base class for all XenServer objects.
This class contains active XAPI session reference and common
attributes with useful info about XenServer host/pool.
Attributes:
module: Reference to Ansible module object.
xapi_session: Reference to XAPI session.
pool_ref (str): XAPI reference to a pool currently connected to.
default_sr_ref (str): XAPI reference to a pool default
Storage Repository.
host_ref (str): XAPI rerefence to a host currently connected to.
xenserver_version (list of str): Contains XenServer major and
minor version.
"""
def __init__(self, module):
"""Inits XenServerObject using common module parameters.
Args:
module: Reference to Ansible module object.
"""
if not HAS_XENAPI:
module.fail_json(changed=False, msg=missing_required_lib("XenAPI"), exception=XENAPI_IMP_ERR)
self.module = module
self.xapi_session = XAPI.connect(module)
try:
self.pool_ref = self.xapi_session.xenapi.pool.get_all()[0]
self.default_sr_ref = self.xapi_session.xenapi.pool.get_default_SR(self.pool_ref)
self.xenserver_version = get_xenserver_version(module)
except XenAPI.Failure as f:
self.module.fail_json(msg="XAPI ERROR: %s" % f.details)
| gpl-3.0 |
aferr/TimingCompartments | tests/quick/se/00.hello/test.py | 19 | 1801 | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
root.system.cpu.workload = LiveProcess(cmd = 'hello',
executable = binpath('hello'))
if root.system.cpu.checker != NULL:
root.system.cpu.checker.workload = root.system.cpu.workload
| bsd-3-clause |
2015fallhw/cptocadp | static/Brython3.2.3-20151122-082712/Lib/encodings/cp1255.py | 37 | 12773 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xd7' # 0xAA -> MULTIPLICATION SIGN
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xf7' # 0xBA -> DIVISION SIGN
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
'\u05b5' # 0xC5 -> HEBREW POINT TSERE
'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
'\u05b7' # 0xC7 -> HEBREW POINT PATAH
'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
'\ufffe' # 0xCA -> UNDEFINED
'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
'\u05bd' # 0xCD -> HEBREW POINT METEG
'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
'\u05bf' # 0xCF -> HEBREW POINT RAFE
'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
'\ufffe' # 0xD9 -> UNDEFINED
'\ufffe' # 0xDA -> UNDEFINED
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
'\u05d1' # 0xE1 -> HEBREW LETTER BET
'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
'\u05d3' # 0xE3 -> HEBREW LETTER DALET
'\u05d4' # 0xE4 -> HEBREW LETTER HE
'\u05d5' # 0xE5 -> HEBREW LETTER VAV
'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
'\u05d7' # 0xE7 -> HEBREW LETTER HET
'\u05d8' # 0xE8 -> HEBREW LETTER TET
'\u05d9' # 0xE9 -> HEBREW LETTER YOD
'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
'\u05db' # 0xEB -> HEBREW LETTER KAF
'\u05dc' # 0xEC -> HEBREW LETTER LAMED
'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
'\u05de' # 0xEE -> HEBREW LETTER MEM
'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
'\u05e0' # 0xF0 -> HEBREW LETTER NUN
'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
'\u05e4' # 0xF4 -> HEBREW LETTER PE
'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
'\u05e7' # 0xF7 -> HEBREW LETTER QOF
'\u05e8' # 0xF8 -> HEBREW LETTER RESH
'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
'\u05ea' # 0xFA -> HEBREW LETTER TAV
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/numpy-1.9.2/numpy/distutils/fcompiler/hpux.py | 229 | 1464 | from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['HPUXFCompiler']
class HPUXFCompiler(FCompiler):
compiler_type = 'hpux'
description = 'HP Fortran 90 Compiler'
version_pattern = r'HP F90 (?P<version>[^\s*,]*)'
executables = {
'version_cmd' : ["f90", "+version"],
'compiler_f77' : ["f90"],
'compiler_fix' : ["f90"],
'compiler_f90' : ["f90"],
'linker_so' : ["ld", "-b"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX: fix me
module_include_switch = None #XXX: fix me
pic_flags = ['+Z']
def get_flags(self):
return self.pic_flags + ['+ppu', '+DD64']
def get_flags_opt(self):
return ['-O3']
def get_libraries(self):
return ['m']
def get_library_dirs(self):
opt = ['/usr/lib/hpux64']
return opt
def get_version(self, force=0, ok_status=[256, 0, 1]):
# XXX status==256 may indicate 'unrecognized option' or
# 'no input file'. So, version_cmd needs more work.
return FCompiler.get_version(self, force, ok_status)
if __name__ == '__main__':
from distutils import log
log.set_verbosity(10)
from numpy.distutils.fcompiler import new_fcompiler
compiler = new_fcompiler(compiler='hpux')
compiler.customize()
print(compiler.get_version())
| mit |
theicfire/djangofun | djangotoolbox/fields.py | 6 | 11104 | # All fields except for BlobField written by Jonas Haag <jonas@lophus.org>
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.importlib import import_module
__all__ = ('RawField', 'ListField', 'DictField', 'SetField',
'BlobField', 'EmbeddedModelField')
class _HandleAssignment(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class RawField(models.Field):
""" Generic field to store anything your database backend allows you to. """
def get_internal_type(self):
return 'RawField'
class AbstractIterableField(models.Field):
"""
Abstract field for fields for storing iterable data type like ``list``,
``set`` and ``dict``.
You can pass an instance of a field as the first argument.
If you do, the iterable items will be piped through the passed field's
validation and conversion routines, converting the items to the
appropriate data type.
"""
def __init__(self, item_field=None, *args, **kwargs):
if item_field is None:
item_field = RawField()
self.item_field = item_field
default = kwargs.get('default', None if kwargs.get('null') else ())
if default is not None and not callable(default):
# ensure a new object is created every time the default is accessed
kwargs['default'] = lambda: self._type(default)
super(AbstractIterableField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name):
self.item_field.model = cls
self.item_field.name = name
super(AbstractIterableField, self).contribute_to_class(cls, name)
metaclass = getattr(self.item_field, '__metaclass__', None)
if issubclass(metaclass, models.SubfieldBase):
setattr(cls, self.name, _HandleAssignment(self))
def db_type(self, connection):
item_db_type = self.item_field.db_type(connection=connection)
return '%s:%s' % (self.__class__.__name__, item_db_type)
def _convert(self, func, values, *args, **kwargs):
if isinstance(values, (list, tuple, set)):
return self._type(func(value, *args, **kwargs) for value in values)
return values
def to_python(self, value):
return self._convert(self.item_field.to_python, value)
def pre_save(self, model_instance, add):
class fake_instance(object):
pass
fake_instance = fake_instance()
def wrapper(value):
assert not hasattr(self.item_field, 'attname')
fake_instance.value = value
self.item_field.attname = 'value'
try:
return self.item_field.pre_save(fake_instance, add)
finally:
del self.item_field.attname
return self._convert(wrapper, getattr(model_instance, self.attname))
def get_db_prep_value(self, value, connection, prepared=False):
return self._convert(self.item_field.get_db_prep_value, value,
connection=connection, prepared=prepared)
def get_db_prep_save(self, value, connection):
return self._convert(self.item_field.get_db_prep_save,
value, connection=connection)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# TODO/XXX: Remove as_lookup_value() once we have a cleaner solution
# for dot-notation queries
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return self.item_field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def validate(self, values, model_instance):
try:
iter(values)
except TypeError:
raise ValidationError('Value of type %r is not iterable' % type(values))
def formfield(self, **kwargs):
raise NotImplementedError('No form field implemented for %r' % type(self))
class ListField(AbstractIterableField):
"""
Field representing a Python ``list``.
If the optional keyword argument `ordering` is given, it must be a callable
that is passed to :meth:`list.sort` as `key` argument. If `ordering` is
given, the items in the list will be sorted before sending them to the
database.
"""
_type = list
def __init__(self, *args, **kwargs):
self.ordering = kwargs.pop('ordering', None)
if self.ordering is not None and not callable(self.ordering):
raise TypeError("'ordering' has to be a callable or None, "
"not of type %r" % type(self.ordering))
super(ListField, self).__init__(*args, **kwargs)
def pre_save(self, model_instance, add):
values = getattr(model_instance, self.attname)
if values is None:
return None
if values and self.ordering:
values.sort(key=self.ordering)
return super(ListField, self).pre_save(model_instance, add)
class SetField(AbstractIterableField):
"""
Field representing a Python ``set``.
"""
_type = set
class DictField(AbstractIterableField):
"""
Field representing a Python ``dict``.
The field type conversions described in :class:`AbstractIterableField`
only affect values of the dictionary, not keys.
Depending on the backend, keys that aren't strings might not be allowed.
"""
_type = dict
def _convert(self, func, values, *args, **kwargs):
if values is None:
return None
return dict((key, func(value, *args, **kwargs))
for key, value in values.iteritems())
def validate(self, values, model_instance):
if not isinstance(values, dict):
raise ValidationError('Value is of type %r. Should be a dict.' % type(values))
class BlobField(models.Field):
"""
A field for storing blobs of binary data.
The value might either be a string (or something that can be converted to
a string), or a file-like object.
In the latter case, the object has to provide a ``read`` method from which
the blob is read.
"""
def get_internal_type(self):
return 'BlobField'
def formfield(self, **kwargs):
# A file widget is provided, but use model FileField or ImageField
# for storing specific files most of the time
from .widgets import BlobWidget
from django.forms import FileField
defaults = {'form_class': FileField, 'widget': BlobWidget}
defaults.update(kwargs)
return super(BlobField, self).formfield(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if hasattr(value, 'read'):
return value.read()
else:
return str(value)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
raise TypeError("BlobFields do not support lookups")
def value_to_string(self, obj):
return str(self._get_val_from_obj(obj))
class EmbeddedModelField(models.Field):
"""
Field that allows you to embed a model instance.
:param model: (optional) The model class that shall be embedded
(may also be passed as string similar to relation fields)
"""
__metaclass__ = models.SubfieldBase
def __init__(self, model=None, *args, **kwargs):
self.embedded_model = model
kwargs.setdefault('default', None)
super(EmbeddedModelField, self).__init__(*args, **kwargs)
def db_type(self, connection):
return 'DictField:RawField'
def _set_model(self, model):
# EmbeddedModelFields are not contribute[d]_to_class if using within
# ListFields (and friends), so we can only know the model field is
# used in when the IterableField sets our 'model' attribute in its
# contribute_to_class method.
# We need to know the model to generate a valid key for the lookup.
if model is not None and isinstance(self.embedded_model, basestring):
# The model argument passed to __init__ was a string, so we need
# to make sure to resolve that string to the corresponding model
# class, similar to relation fields. We abuse some of the
# relation fields' code to do the lookup here:
def _resolve_lookup(self_, resolved_model, model):
self.embedded_model = resolved_model
from django.db.models.fields.related import add_lazy_relation
add_lazy_relation(model, self, self.embedded_model, _resolve_lookup)
self._model = model
model = property(lambda self:self._model, _set_model)
def pre_save(self, model_instance, add):
embedded_instance = super(EmbeddedModelField, self).pre_save(model_instance, add)
if embedded_instance is None:
return None, None
model = self.embedded_model or models.Model
if not isinstance(embedded_instance, model):
raise TypeError("Expected instance of type %r, not %r" % (
type(model), type(embedded_instance)))
data = dict((field.name, field.pre_save(embedded_instance, add))
for field in embedded_instance._meta.fields)
return embedded_instance, data
def get_db_prep_value(self, (embedded_instance, embedded_dict), **kwargs):
if embedded_dict is None:
return None
values = {}
for name, value in embedded_dict.iteritems():
field = embedded_instance._meta.get_field(name)
values[field.column] = field.get_db_prep_value(value, **kwargs)
if self.embedded_model is None:
values.update({'_module' : embedded_instance.__class__.__module__,
'_model' : embedded_instance.__class__.__name__})
return values
# TODO/XXX: Remove this once we have a cleaner solution
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if hasattr(value, 'as_lookup_value'):
value = value.as_lookup_value(self, lookup_type, connection)
return value
def to_python(self, values):
if not isinstance(values, dict):
return values
module, model = values.pop('_module', None), values.pop('_model', None)
# TODO/XXX: Workaround for old Python releases. Remove this someday.
# Let's make sure keys are instances of str
values = dict([(str(k), v) for k,v in values.items()])
if module is not None:
return getattr(import_module(module), model)(**values)
return self.embedded_model(**values)
| bsd-3-clause |
schambers/civmarket | civmarket/lib/python2.7/site-packages/setuptools/tests/test_markerlib.py | 449 | 2506 | import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("os_name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'%s' in os_name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("os_name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
self.assertFalse(interpret("'buuuu' in os_name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
| apache-2.0 |
Mhynlo/SickRage | tests/sickrage_tests/__init__.py | 22 | 1273 | # coding=utf-8
# This file is part of SickRage.
#
# URL: https://SickRage.GitHub.io
# Git: https://github.com/SickRage/SickRage.git
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for SickRage
"""
from __future__ import print_function
import helper
import media
import providers
import show
import system
import unittest
if __name__ == '__main__':
print('=====> Running all test in "sickrage_tests" <=====')
TEST_MODULES = [
helper,
media,
providers,
show,
system,
]
for test_module in TEST_MODULES:
SUITE = unittest.TestLoader().loadTestsFromModule(test_module)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-3.0 |
ramusus/django-vkontakte-groups-migration | vkontakte_groups_migration/migrations/0002_auto__add_field_groupmigration_hidden.py | 1 | 16399 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GroupMigration.hidden'
db.add_column('vkontakte_groups_groupstatmembers', 'hidden',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GroupMigration.hidden'
db.delete_column('vkontakte_groups_groupstatmembers', 'hidden')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
'vkontakte_groups_migration.groupmigration': {
'Meta': {'ordering': "('group', 'time', '-id')", 'unique_together': "(('group', 'time'),)", 'object_name': 'GroupMigration', 'db_table': "'vkontakte_groups_groupstatmembers'"},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'migrations'", 'to': "orm['vkontakte_groups.Group']"}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.TextField', [], {}),
'members_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_entered': ('django.db.models.fields.TextField', [], {}),
'members_deactivated_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_deactivated_left': ('django.db.models.fields.TextField', [], {}),
'members_deactivated_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_deactivated_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_entered': ('django.db.models.fields.TextField', [], {}),
'members_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_has_avatar_entered': ('django.db.models.fields.TextField', [], {}),
'members_has_avatar_entered_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_has_avatar_entered_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_has_avatar_left': ('django.db.models.fields.TextField', [], {}),
'members_has_avatar_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_has_avatar_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'members_left': ('django.db.models.fields.TextField', [], {}),
'members_left_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'members_left_ids': ('picklefield.fields.PickledObjectField', [], {'default': '[]'}),
'offset': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': "orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'vkontakte_wall.comment': {
'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': "orm['vkontakte_wall.Post']"}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'vkontakte_wall.post': {
'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'},
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'copy_owner_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'copy_post_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'like_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'repost_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['vkontakte_groups_migration'] | bsd-3-clause |
spaceboats/busbus | tests/test_provider_ctran.py | 1 | 1025 | import busbus
from busbus.provider.ctran import CTranProvider
from .conftest import mock_gtfs_zip
import arrow
import pytest
import responses
@pytest.fixture(scope='module')
@responses.activate
def ctran_provider(engine):
responses.add(responses.GET, CTranProvider.gtfs_url,
body=mock_gtfs_zip('ctran'), status=200,
content_type='application/zip')
return CTranProvider(engine)
# test that we are indeed using our local abridged copy of the GTFS feed
def test_len_routes(ctran_provider):
assert len(list(ctran_provider.routes)) == 28
@pytest.mark.parametrize('stop_id,count', [
(u'2058', 4)
])
def test_43_to_eaton_hall(ctran_provider, stop_id, count):
stop = ctran_provider.get(busbus.Stop, stop_id)
route = ctran_provider.get(busbus.Route, u'46')
assert len(list(ctran_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == count
| mit |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev.jython_4.5.5.201603221110/Lib/encodings/mac_croatian.py | 593 | 13889 | """ Python Character Mapping Codec mac_croatian generated from 'MAPPINGS/VENDORS/APPLE/CROATIAN.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-croatian',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\u0160' # 0xA9 -> LATIN CAPITAL LETTER S WITH CARON
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u017d' # 0xAE -> LATIN CAPITAL LETTER Z WITH CARON
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u2206' # 0xB4 -> INCREMENT
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u0161' # 0xB9 -> LATIN SMALL LETTER S WITH CARON
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\u017e' # 0xBE -> LATIN SMALL LETTER Z WITH CARON
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\uf8ff' # 0xD8 -> Apple logo
u'\xa9' # 0xD9 -> COPYRIGHT SIGN
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\xc6' # 0xDE -> LATIN CAPITAL LETTER AE
u'\xbb' # 0xDF -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2013' # 0xE0 -> EN DASH
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u03c0' # 0xF9 -> GREEK SMALL LETTER PI
u'\xcb' # 0xFA -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\xca' # 0xFD -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xe6' # 0xFE -> LATIN SMALL LETTER AE
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
doomsterinc/odoo | doc/_extensions/github_link.py | 229 | 3484 | import inspect
import importlib
import os.path
from urlparse import urlunsplit
"""
* adds github_link(mode) context variable: provides URL (in relevant mode) of
current document on github
* if sphinx.ext.linkcode is enabled, automatically generates github linkcode
links (by setting config.linkcode_resolve)
Settings
========
* ``github_user``, username/organisation under which the project lives
* ``github_project``, name of the project on github
* (optional) ``version``, github branch to link to (default: master)
Notes
=====
* provided ``linkcode_resolve`` only supports Python domain
* generates https github links
* explicitly imports ``openerp``, so useless for anyone else
"""
def setup(app):
app.add_config_value('github_user', None, 'env')
app.add_config_value('github_project', None, 'env')
app.connect('html-page-context', add_doc_link)
def linkcode_resolve(domain, info):
""" Resolves provided object to corresponding github URL
"""
# TODO: js?
if domain != 'py':
return None
if not (app.config.github_user and app.config.github_project):
return None
module, fullname = info['module'], info['fullname']
# TODO: attributes/properties don't have modules, maybe try to look
# them up based on their cached host object?
if not module:
return None
obj = importlib.import_module(module)
for item in fullname.split('.'):
obj = getattr(obj, item, None)
if obj is None:
return None
# get original from decorated methods
try: obj = getattr(obj, '_orig')
except AttributeError: pass
try:
obj_source_path = inspect.getsourcefile(obj)
_, line = inspect.getsourcelines(obj)
except (TypeError, IOError):
# obj doesn't have a module, or something
return None
import openerp
# FIXME: make finding project root project-independent
project_root = os.path.join(os.path.dirname(openerp.__file__), '..')
return make_github_link(
app,
os.path.relpath(obj_source_path, project_root),
line)
app.config.linkcode_resolve = linkcode_resolve
def make_github_link(app, path, line=None, mode="blob"):
config = app.config
urlpath = "/{user}/{project}/{mode}/{branch}/{path}".format(
user=config.github_user,
project=config.github_project,
branch=config.version or 'master',
path=path,
mode=mode,
)
return urlunsplit((
'https',
'github.com',
urlpath,
'',
'' if line is None else 'L%d' % line
))
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
# FIXME: find other way to recover current document's source suffix
# in Sphinx 1.3 it's possible to have mutliple source suffixes and that
# may be useful in the future
source_suffix = app.config.source_suffix
source_suffix = source_suffix if isinstance(source_suffix, basestring) else source_suffix[0]
# can't use functools.partial because 3rd positional is line not mode
context['github_link'] = lambda mode='edit': make_github_link(
app, 'doc/%s%s' % (pagename, source_suffix), mode=mode)
| agpl-3.0 |
muminoff/fabric-bolt | src/fabric_bolt/accounts/urls.py | 14 | 1297 | from django.conf.urls import url, patterns
from fabric_bolt.accounts import views
urlpatterns = patterns('',
url(r'^login/$', views.Login.as_view(), name='accounts_user_login'),
url(r'^logout/$', views.Logout.as_view(), name='accounts_user_logout'),
url(r'^users/$', views.UserList.as_view(), name='accounts_user_list'),
url(r'^user/add/$', views.UserAdd.as_view(), name='accounts_user_add'),
url(r'^user/change/(?P<pk>.+)/$', views.UserChange.as_view(), name='accounts_user_change'),
url(r'^user/view/(?P<pk>.+)/$', views.UserDetail.as_view(), name='accounts_user_view'),
url(r'^user/delete/(?P<pk>.+)/$', views.UserDelete.as_view(), name='accounts_user_delete'),
url(r'^password_change/$', views.PasswordChange.as_view(), name='accounts_password_change'), # django.contrib.auth.views.password_change'),
url(r'^password_change/done/$', 'django.contrib.auth.views.password_change_done'),
url(r'^password_reset/$', 'django.contrib.auth.views.password_reset'),
url(r'^password_reset/done/$', 'django.contrib.auth.views.password_reset_done'),
url(r'^reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$', views.PasswordCreate.as_view(), name='auth_password_reset_confirm'),
url(r'^reset/done/$', 'django.contrib.auth.views.password_reset_complete'),
) | mit |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geometry/test_data.py | 364 | 2994 | """
This module has the mock object definitions used to hold reference geometry
for the GEOS and GDAL tests.
"""
import gzip
import os
from django.contrib import gis
from django.utils import simplejson
# This global used to store reference geometry data.
GEOMETRIES = None
# Path where reference test data is located.
TEST_DATA = os.path.join(os.path.dirname(gis.__file__), 'tests', 'data')
def tuplize(seq):
"Turn all nested sequences to tuples in given sequence."
if isinstance(seq, (list, tuple)):
return tuple([tuplize(i) for i in seq])
return seq
def strconvert(d):
"Converts all keys in dictionary to str type."
return dict([(str(k), v) for k, v in d.iteritems()])
def get_ds_file(name, ext):
return os.path.join(TEST_DATA,
name,
name + '.%s' % ext
)
class TestObj(object):
"""
Base testing object, turns keyword args into attributes.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class TestDS(TestObj):
"""
Object for testing GDAL data sources.
"""
def __init__(self, name, **kwargs):
# Shapefile is default extension, unless specified otherwise.
ext = kwargs.pop('ext', 'shp')
self.ds = get_ds_file(name, ext)
super(TestDS, self).__init__(**kwargs)
class TestGeom(TestObj):
"""
Testing object used for wrapping reference geometry data
in GEOS/GDAL tests.
"""
def __init__(self, **kwargs):
# Converting lists to tuples of certain keyword args
# so coordinate test cases will match (JSON has no
# concept of tuple).
coords = kwargs.pop('coords', None)
if coords:
self.coords = tuplize(coords)
centroid = kwargs.pop('centroid', None)
if centroid:
self.centroid = tuple(centroid)
ext_ring_cs = kwargs.pop('ext_ring_cs', None)
if ext_ring_cs:
ext_ring_cs = tuplize(ext_ring_cs)
self.ext_ring_cs = ext_ring_cs
super(TestGeom, self).__init__(**kwargs)
class TestGeomSet(object):
"""
Each attribute of this object is a list of `TestGeom` instances.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, [TestGeom(**strconvert(kw)) for kw in value])
class TestDataMixin(object):
"""
Mixin used for GEOS/GDAL test cases that defines a `geometries`
property, which returns and/or loads the reference geometry data.
"""
@property
def geometries(self):
global GEOMETRIES
if GEOMETRIES is None:
# Load up the test geometry data from fixture into global.
gzf = gzip.GzipFile(os.path.join(TEST_DATA, 'geometries.json.gz'))
geometries = simplejson.loads(gzf.read())
GEOMETRIES = TestGeomSet(**strconvert(geometries))
return GEOMETRIES
| apache-2.0 |
nexec/vkcopy2mp3p | vkcopy2mp3p.py | 1 | 4785 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
import sqlite3 as db
import sys
import os
import pycurl
import StringIO
import re
import urllib
import json
from random import shuffle
PROFILE = 'default'
argc = len(sys.argv)
if argc < 3 or argc > 4:
sys.stderr.write('Usage: %s /path/to/dir count_of_songs [PROFILE]\n'%sys.argv[0])
sys.exit(1)
PATH_TO_SAVE=sys.argv[1]
count_of_songs = int(sys.argv[2])
if argc==4:
print "update PROFILE"
PROFILE=sys.argv[3]
#sys.exit(0)
# find needed profile dir and cookiesdb from it
cookiedbpath = os.environ['HOME']+'/.mozilla/firefox/'
for name in os.listdir(cookiedbpath):
if os.path.isdir(cookiedbpath+name) and (PROFILE in name):
cookiedbpath=cookiedbpath+name+'/cookies.sqlite'
break
what = '.vk.com'
addHash='undef'
connection = db.connect(cookiedbpath)
cursor = connection.cursor()
contents = "name, value"
cursor.execute("SELECT " +contents+ " FROM moz_cookies WHERE host='" +what+ "'")
cookiemas=[]
for row in cursor.fetchall():
cookiemas.append(row[0]+'='+row[1])
connection.close()
cookiestr='; '.join(cookiemas)
tmpdir = '/tmp/add_audio_vk'
songlist=[]
# this is first run, so lets write hash value
if not os.path.isdir(tmpdir):
mus = pycurl.Curl()
ans = StringIO.StringIO()
# let's figure out our pageid
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/feed')
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
profile=re.search('<a href=\"/([^\"]+)\" onclick=\"return nav.go\(this, event, {noback: true}\)\" id=\"myprofile\" class=\"left_row\">',data)
pageid=profile.group(1)
# figure out our hash
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr)])
mus.setopt(pycurl.URL, 'https://vk.com/'+pageid)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.setopt(pycurl.USERAGENT, "Mozilla/5.0 (X11; Linux x86_64; rv:20.0) Gecko/20100101 Firefox/20.0")
mus.perform()
mus.close()
data=ans.getvalue()
addhash=re.search('Page.audioStatusUpdate\(\'([^\']+)\'\)',data).group(1)
os.mkdir(tmpdir)
fwrite=open(tmpdir+'/addhash','w')
fwrite.write(addhash)
fwrite.close()
fread=open(tmpdir+'/addhash','r')
HASHSUM=fread.read()
fread.close()
# looking for first match
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
mus.setopt(pycurl.POST, 0)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
js = json.loads(data)
if js[1]==False and js[4]==False:
sys.stderr.write('Firefox\'s profile is unauthorized at vk.com\n')
sys.exit(1)
page = js[5]
page1=page
page1 = re.sub(r'cur.au_search = new QuickSearch\(extend\(',r'',page1)
page1 = re.sub(r'\)\);extend\(cur,{module:\'audio\'}\);',r'',page1)
page1 = re.sub(r'\\/',r'/',page1)
page1 = re.sub(r'mp3\?([^"]+)',r'mp3',page1)
page1 = re.sub("(\n|\r).*", '', page1)
page1 = re.sub(',"_new":true\}, \{*','}',page1)
mlist = json.loads(page1)
count=0
for index, mas in mlist['_cache'].iteritems():
#mas[2] - link
#mas[3] - author
#mas[4] - song
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
##
offset=count
if count==200:
while (count>0):
count=0
mus = pycurl.Curl()
ans = StringIO.StringIO()
mus.setopt(pycurl.URL, 'https://m.vk.com/audio')
mus.setopt(pycurl.HTTPHEADER, [str('Cookie: '+cookiestr),'X-Requested-With: XMLHttpRequest'])
req = '_ajax=1&offset=%d'%(offset)
mus.setopt(pycurl.POSTFIELDS, req)
mus.setopt(pycurl.POST, 1)
mus.setopt(pycurl.VERBOSE, 0)
mus.setopt(pycurl.FOLLOWLOCATION, 1)
mus.setopt(pycurl.WRITEFUNCTION, ans.write)
mus.perform()
mus.close()
data=ans.getvalue()
data = re.sub(r'\\/',r'/',data)
data = re.sub(r'mp3\?([^"]+)',r'mp3',data)
mlist = json.loads(data)
mlist=mlist[3][0]
if len(mlist)>0:
for index, mas in mlist.iteritems():
songlist.append(dict([('link',mas[2]),('author',mas[3]),('song',mas[4])]))
count=count+1
offset=offset+count
print "total count: %d"%(len(songlist))
shuffle(songlist)
mkremove = "if [ -e '%(path)s' ]; then rm -r '%(path)s'; fi; mkdir '%(path)s'" % {"path":PATH_TO_SAVE}
os.system(mkremove)
for i in range(count_of_songs):
print "%s - %s" %(songlist[i]['author'],songlist[i]['song'])
os.system("wget -P '%s' %s"%(PATH_TO_SAVE,songlist[i]['link']))
print "complete"
sys.exit(0)
| gpl-2.0 |
cakeboss893/volatility | volatility/plugins/overlays/mac/mac.py | 44 | 33471 | # Volatility
# Copyright (C) 2010 Brendan Dolan-Gavitt
# Copyright (c) 2011 Michael Cohen <scudette@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import re,copy
import sys, os
import zipfile
import struct
import time
import volatility.plugins as plugins
import volatility.debug as debug
import volatility.obj as obj
import volatility.plugins.overlays.basic as basic
import volatility.addrspace as addrspace
import volatility.scan as scan
import volatility.plugins.addrspaces.amd64 as amd64
import volatility.plugins.addrspaces.intel as intel
import volatility.plugins.overlays.native_types as native_types
import volatility.utils as utils
import volatility.plugins.mac.common as common
x64_native_types = copy.deepcopy(native_types.x64_native_types)
x64_native_types['long'] = [8, '<q']
x64_native_types['unsigned long'] = [8, '<Q']
class catfishScan(scan.BaseScanner):
""" Scanner for Catfish string for Mountain Lion """
checks = []
def __init__(self, needles = None):
self.needles = needles
self.checks = [ ("MultiStringFinderCheck", {'needles':needles}) ]
scan.BaseScanner.__init__(self)
def scan(self, address_space, offset = 0, maxlen = None):
for offset in scan.BaseScanner.scan(self, address_space, offset, maxlen):
yield offset
class VolatilityDTB(obj.VolatilityMagic):
"""A scanner for DTB values."""
def _get_dtb_pre_m_lion(self):
profile = self.obj_vm.profile
if self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit":
ret = profile.get_symbol("_IdlePDPT")
# on 10.5.x the PDTD symbol is a pointer instead of an array like 10.6 and 10.7
if ret % 0x1000:
ret = self.obj_vm.read(ret, 4)
ret = struct.unpack("<I", ret)[0]
else:
ret = profile.get_symbol("_IdlePML4")
# so it seems some kernels don't define this as the physical address, but actually the virtual
# while others define it as the physical, easy enough to figure out on the fly
if ret > 0xffffff8000000000:
ret = ret - 0xffffff8000000000
return ret
## Based off volafox's method for finding vm_kernel_shift through loGlo & hardcoded Catfish
def _get_dtb_m_lion(self):
tbl = self.obj_vm.profile.sys_map["kernel"]
config = self.obj_vm.get_config()
if config.SHIFT:
shift_address = config.SHIFT
else:
scanner = catfishScan(needles = ["Catfish \x00\x00"])
for catfish_offset in scanner.scan(self.obj_vm):
shift_address = catfish_offset - (tbl["_lowGlo"][0][0] % 0xFFFFFF80)
break
self.obj_vm.profile.shift_address = shift_address
bootpml4 = (tbl["_BootPML4"][0][0] % 0xFFFFFF80) + shift_address
boot_pml4_dtb = amd64.AMD64PagedMemory(self.obj_vm, config, dtb = bootpml4)
idlepml4_addr = (tbl['_IdlePML4'][0][0]) + shift_address
idlepml4_ptr = obj.Object("unsigned int", offset = idlepml4_addr, vm = boot_pml4_dtb)
return idlepml4_ptr.v()
def generate_suggestions(self):
profile = self.obj_vm.profile
bootpml = profile.get_symbol("_BootPML4")
if bootpml:
ret = self._get_dtb_m_lion()
else:
ret = self._get_dtb_pre_m_lion()
yield ret
class VolatilityMacIntelValidAS(obj.VolatilityMagic):
"""An object to check that an address space is a valid Mac Intel Paged space"""
def _set_profile_metadata(self, version):
start = version[len("Darwin Kernel Version "):]
idx = start.find(":")
(major, minor, _) = [int(x) for x in start[:idx].split(".")]
setattr(self.obj_vm.profile, '_md_major', major)
setattr(self.obj_vm.profile, '_md_minor', minor)
def generate_suggestions(self):
version_addr = self.obj_vm.profile.get_symbol("_version")
string = self.obj_vm.read(version_addr, 60)
if string and string.startswith("Darwin"):
self._set_profile_metadata(string)
yield True
else:
yield False
class vnode(obj.CType):
def _do_calc_path(self, ret, vnodeobj, vname):
if vnodeobj == None:
return
if vname:
ret.append(vname)
if vnodeobj.v_flag.v() & 0x000001 != 0 and vnodeobj.v_mount.v() != 0:
if vnodeobj.v_mount.mnt_vnodecovered.v() != 0:
self._do_calc_path(ret, vnodeobj.v_mount.mnt_vnodecovered, vnodeobj.v_mount.mnt_vnodecovered.v_name)
else:
self._do_calc_path(ret, vnodeobj.v_parent, vnodeobj.v_parent.v_name)
def full_path(self):
if self.v_flag.v() & 0x000001 != 0 and self.v_mount.v() != 0 and self.v_mount.mnt_flag.v() & 0x00004000 != 0:
ret = "/"
else:
elements = []
files = []
self._do_calc_path(elements, self, self.v_name)
elements.reverse()
for e in elements:
files.append(str(e.dereference()))
ret = "/".join(files)
if ret:
ret = "/" + ret
return ret
class fileglob(obj.CType):
@property
def fg_type(self):
ret = self.members.get("fg_type")
if ret:
ret = self.m("fg_type")
else:
ret = self.fg_ops.fo_type
ret = str(ret)
return ret
class proc(obj.CType):
@property
def p_gid(self):
cred = self.p_ucred
if not cred.is_valid():
return "-"
if hasattr(cred, "cr_posix"):
ret = cred.cr_posix.cr_groups[0]
else:
ret = cred.cr_groups[0]
return ret
@property
def p_uid(self):
cred = self.p_ucred
if not cred.is_valid():
return "-"
if hasattr(cred, "cr_posix"):
ret = cred.cr_posix.cr_uid
else:
ret = cred.cr_uid
return ret
def get_process_address_space(self):
cr3 = self.task.map.pmap.pm_cr3
map_val = str(self.task.map.pmap.pm_task_map or '')
# if the machine is 64 bit capable
is_64bit_cap = common.is_64bit_capable(self.obj_vm)
if map_val == "TASK_MAP_32BIT" and is_64bit_cap:
# A 32 bit process on a 64 bit system, requires 64 bit paging
# Catch exceptions when trying to get a process AS for kernel_task
# which isn't really even a process. It needs to use the default cr3
try:
proc_as = amd64.AMD64PagedMemory(self.obj_vm.base,
self.obj_vm.get_config(), dtb = cr3, skip_as_check = True)
except IOError:
proc_as = self.obj_vm
elif map_val == "TASK_MAP_32BIT":
# A 32 bit process on a 32 bit system need
# bypass b/c no sharing of address space
proc_as = intel.IA32PagedMemoryPae(self.obj_vm.base,
self.obj_vm.get_config(), dtb = cr3,
skip_as_check = True)
elif (map_val == "TASK_MAP_64BIT_SHARED" and
self.obj_vm.profile.metadata.get('memory_model', '32bit') == "32bit"):
# A 64 bit process running on a 32 bit system
proc_as = amd64.AMD64PagedMemory(self.obj_vm.base,
self.obj_vm.get_config(), dtb = cr3,
skip_as_check = True)
elif map_val in ["TASK_MAP_64BIT", "TASK_MAP_64BIT_SHARED"]:
# A 64 bit process on a 64 bit system
cr3 &= 0xFFFFFFE0
proc_as = amd64.AMD64PagedMemory(self.obj_vm.base,
self.obj_vm.get_config(), dtb = cr3,
skip_as_check = True)
else:
proc_as = obj.NoneObject("Cannot get process AS for pm_task_map: {0}".format(map_val))
return proc_as
def start_time(self):
nsecs_per = 1000000
start_time = self.p_start
start_secs = start_time.tv_sec + (start_time.tv_usec / nsecs_per)
# convert the integer as little endian. we catch struct.error
# here because if the process has exited (i.e. detected with mac_dead_procs)
# then the timestamp may not be valid. start_secs could be negative
# or higher than can fit in a 32-bit "I" integer field.
try:
data = struct.pack("<I", start_secs)
except struct.error:
return ""
bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data)
dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True)
return dt
def get_proc_maps(self):
map = self.task.map.hdr.links.next
for i in xrange(self.task.map.hdr.nentries):
if not map:
break
yield map
map = map.links.next
def search_process_memory(self, s):
"""Search process memory.
@param s: a list of strings like ["one", "two"]
"""
# Allow for some overlap in case objects are
# right on page boundaries
overlap = 1024
scan_blk_sz = 1024 * 1024 * 10
addr_space = self.get_process_address_space()
for vma in self.get_proc_maps():
offset = vma.links.start
out_of_range = vma.links.start + (vma.links.end - vma.links.start)
while offset < out_of_range:
# Read some data and match it.
to_read = min(scan_blk_sz + overlap, out_of_range - offset)
data = addr_space.zread(offset, to_read)
if not data:
break
for x in s:
for hit in utils.iterfind(data, x):
yield offset + hit
offset += min(to_read, scan_blk_sz)
def get_arguments(self):
proc_as = self.get_process_address_space()
# We need a valid process AS to continue
if not proc_as:
return ""
argsstart = self.user_stack - self.p_argslen
# Stack location may be paged out or not contain any args
if (not proc_as.is_valid_address(argsstart) or
self.p_argslen == 0 or self.p_argc == 0):
return ""
# Add one because the first two are usually duplicates
argc = self.p_argc + 1
args = []
while argc > 0:
arg = obj.Object("String", offset = argsstart, vm = proc_as, length = 256)
if not arg:
break
# Initial address of the next string
argsstart += len(str(arg)) + 1
# Very first one is aligned in some crack ass way
if len(args) == 0:
while (proc_as.read(argsstart, 1) == "\x00" and
argsstart < self.user_stack):
argsstart += 1
args.append(arg)
else:
# Only add this string if its not a duplicate of the first
if str(arg) != str(args[0]):
args.append(arg)
argc -= 1
return " ".join([str(s) for s in args])
class rtentry(obj.CType):
def get_time(self):
if not hasattr(self, "base_calendartime"):
return "N/A"
data = struct.pack("<I", self.base_calendartime)
bufferas = addrspace.BufferAddressSpace(self.obj_vm.get_config(), data = data)
dt = obj.Object("UnixTimeStamp", offset = 0, vm = bufferas, is_utc = True)
return dt
@property
def sent(self):
if hasattr(self, "rt_stats"):
ret = self.rt_stats.nstat_txpackets
else:
ret = "N/A"
return ret
@property
def rx(self):
if hasattr(self, "rt_stats"):
ret = self.rt_expire
else:
ret = "N/A"
@property
def delta(self):
if self.rt_expire == 0:
ret = 0
else:
ret = self.rt_expire - self.base_uptime
return ret
@property
def name(self):
return "{}{}".format(self.rt_ifp.if_name.dereference(), self.rt_ifp.if_unit)
@property
def source_ip(self):
return self.rt_nodes[0].rn_u.rn_leaf.rn_Key.dereference_as("sockaddr").get_address()
@property
def dest_ip(self):
return self.rt_gateway.get_address()
class queue_entry(obj.CType):
def walk_list(self, list_head):
n = self.next.dereference_as("task")
while n and n.obj_offset != list_head:
yield n
n = n.tasks.next.dereference_as("task")
p = self.prev.dereference_as("task")
while p and p.obj_offset != list_head:
yield p
p = p.tasks.prev.dereference_as("task")
class zone(obj.CType):
def _get_from_active_zones(self):
ret = []
first_elem = self.active_zones
elem = first_elem
# TODO
sz = 16
i = 0
while elem != first_elem.v() or i == 0:
a = elem.v()
b = sz
off = a + b
ret.append(off)
i = i + 1
if i == 4:
break
elem = elem.m("next")
return ret
def get_active_elements(self, elem_type, zone_idx=-1):
ret = []
if hasattr(self, "active_zones"):
objs = self._get_from_active_zones()
else:
debug.error("zone does not have active zones.")
for o in objs:
val = obj.Object(elem_type, offset = o, vm = self.obj_vm)
ret.append(val)
return ret
def get_free_elements(self, elem_type):
ret = []
nxt = obj.Object("zone_free_element", offset = self.free_elements, vm = self.obj_vm)
while nxt:
o = nxt.obj_offset
val = obj.Object(elem_type, offset = o, vm = self.obj_vm)
ret.append(val)
nxt = nxt.m("next")
return ret
class sysctl_oid(obj.CType):
def get_perms(self):
"""
# define CTLFLAG_RD 0x80000000 /* Allow reads of variable */
# define CTLFLAG_WR 0x40000000 /* Allow writes to the variable */
# define CTLFLAG_LOCKED 0x00800000 /* node will handle locking itself */
"""
ret = ""
checks = [0x80000000, 0x40000000, 0x00800000]
perms = ["R", "W", "L"]
for (i, c) in enumerate(checks):
if c & self.oid_kind:
ret = ret + perms[i]
else:
ret = ret + "-"
return ret
def get_ctltype(self):
"""
#define CTLTYPE_NODE 1
#define CTLTYPE_INT 2 /* name describes an integer */
#define CTLTYPE_STRING 3 /* name describes a string */
#define CTLTYPE_QUAD 4 /* name describes a 64-bit number */
#define CTLTYPE_OPAQUE 5 /* name describes a structure */
#define CTLTYPE_STRUCT CTLTYPE_OPAQUE /* name describes a structure */
"""
types = {1: 'CTLTYPE_NODE', 2: 'CTLTYPE_INT', 3: 'CTLTYPE_STRING', 4: 'CTLTYPE_QUAD', 5: 'CTLTYPE_OPAQUE'}
ctltype = self.oid_kind & 0xf
try:
return types[ctltype]
except KeyError:
return "INVALID -1"
class OSString(obj.CType):
def __str__(self):
string_object = obj.Object("String", offset = self.string, vm = self.obj_vm, length = self.length)
return str(string_object or '')
class vm_map_entry(obj.CType):
def get_perms(self):
permask = "rwx"
perms = ""
for (ctr, i) in enumerate([1, 3, 5]):
if (self.protection & i) == i:
perms = perms + permask[ctr]
else:
perms = perms + "-"
return perms
def get_path(self):
vnode = self._get_vnode()
if type(vnode) == str and vnode == "sub_map":
ret = vnode
elif vnode:
path = []
while vnode:
path.append(str(vnode.v_name.dereference() or ''))
vnode = vnode.v_parent
path.reverse()
ret = "/".join(path)
else:
ret = ""
return ret
def _get_vnode(self):
if self.is_sub_map == 1:
return "sub_map"
# find_vnode_object
vnode_object = self.object.vm_object
while vnode_object.shadow.dereference() != None:
vnode_object = vnode_object.shadow.dereference()
ops = vnode_object.pager.mo_pager_ops.v()
if ops == self.obj_vm.profile.get_symbol("_vnode_pager_ops"):
vpager = obj.Object("vnode_pager", offset = vnode_object.pager, vm = self.obj_vm)
ret = vpager.vnode_handle
else:
ret = None
return ret
class socket(obj.CType):
@property
def family(self):
return self.so_proto.pr_domain.dom_family
@property
def protocol(self):
proto = self.so_proto.pr_protocol
if proto == 6:
ret = "TCP"
elif proto == 17:
ret = "UDP"
else:
ret = ""
return ret
def _get_tcp_state(self):
tcp_states = (
"CLOSED",
"LISTEN",
"SYN_SENT",
"SYN_RECV",
"ESTABLISHED",
"CLOSE_WAIT",
"FIN_WAIT1",
"CLOSING",
"LAST_ACK",
"FIN_WAIT2",
"TIME_WAIT")
inpcb = self.so_pcb.dereference_as("inpcb")
tcpcb = inpcb.inp_ppcb.dereference_as("tcpcb")
return tcp_states[tcpcb.t_state]
@property
def state(self):
if self.so_proto.pr_protocol == 6:
ret = self._get_tcp_state()
else:
ret = ""
return ret
def _parse_ipv4(self, pcb):
lip = pcb.inp_dependladdr.inp46_local.ia46_addr4.s_addr.v()
lport = pcb.inp_lport
rip = pcb.inp_dependfaddr.inp46_foreign.ia46_addr4.s_addr.v()
rport = pcb.inp_fport
return [lip, lport, rip, rport]
def _parse_ipv6(self, pcb):
lip = pcb.inp_dependladdr.inp6_local.__u6_addr.v()
lport = pcb.inp_lport
rip = pcb.inp_dependfaddr.inp6_foreign.__u6_addr.v()
rport = pcb.inp_fport
return [lip, lport, rip, rport]
def get_connection_info(self):
ipcb = self.so_pcb.dereference_as("inpcb")
if self.family == 2:
ret = self._parse_ipv4(ipcb)
else:
ret = self._parse_ipv6(ipcb)
return ret
class sockaddr_dl(obj.CType):
def v(self):
"""Get the value of the sockaddr_dl object."""
ret = ""
for i in xrange(self.sdl_alen):
try:
e = self.sdl_data[self.sdl_nlen + i]
e = ord(e.v())
except IndexError:
e = 0
ret = ret + "%.02x:" % e
if ret and ret[-1] == ":":
ret = ret[:-1]
return ret
class sockaddr(obj.CType):
def get_address(self):
family = self.sa_family
ip = ""
if family == 2: # AF_INET
addr_in = obj.Object("sockaddr_in", offset = self.obj_offset, vm = self.obj_vm)
ip = addr_in.sin_addr.s_addr.v()
elif family == 30: # AF_INET6
addr_in6 = obj.Object("sockaddr_in6", offset = self.obj_offset, vm = self.obj_vm)
ip = addr_in6.sin6_addr.__u6_addr.v()
elif family == 18: # AF_LINK
addr_dl = obj.Object("sockaddr_dl", offset = self.obj_offset, vm = self.obj_vm)
ip = addr_dl.v()
return ip
def exec_vtypes(filename):
env = {}
exec(filename, dict(__builtins__ = None), env)
return env["mac_types"]
def parse_dsymutil(data, module):
"""Parse the symbol file."""
sys_map = {}
sys_map[module] = {}
want_lower = ["_IdlePML4"]
arch = ""
# get the system map
for line in data.splitlines():
ents = line.split()
match = re.search("\[.*?\)\s+[0-9A-Fa-z]+\s+\d+\s+([0-9A-Fa-f]+)\s'(\w+)'", line)
if match:
(addr, name) = match.groups()
addr = int(addr, 16)
if addr == 0:
continue
if not name in sys_map[module]:
sys_map[module][name] = [(0, "default value")]
# every symbol is in the symbol table twice
# except for the entries in 'want_lower', we need the higher address for all
if name in sys_map[module]:
oldaddr = sys_map[module][name][0][0]
if oldaddr > addr and name not in want_lower:
pass
else:
sys_map[module][name] = [(addr, "sym type?")]
else:
sys_map[module][name] = [(addr, "sym type?")]
elif line.find("Symbol table for") != -1:
if line.find("i386") != -1:
arch = "32bit"
else:
arch = "64bit"
if arch == "":
return None
return arch, sys_map
def MacProfileFactory(profpkg):
vtypesvar = {}
sysmapvar = {}
memmodel, arch = "32bit", "x86"
profilename = os.path.splitext(os.path.basename(profpkg.filename))[0]
for f in profpkg.filelist:
if 'symbol.dsymutil' in f.filename.lower():
memmodel, sysmap = parse_dsymutil(profpkg.read(f.filename), "kernel")
if memmodel == "64bit":
arch = "x64"
sysmapvar.update(sysmap)
debug.debug("{2}: Found system file {0} with {1} symbols".format(f.filename, len(sysmapvar.keys()), profilename))
elif f.filename.endswith(".vtypes"):
v = exec_vtypes(profpkg.read(f.filename))
vtypesvar.update(v)
if not sysmapvar or not vtypesvar:
# Might be worth throwing an exception here?
return None
class AbstractMacProfile(obj.Profile):
__doc__ = "A Profile for Mac " + profilename + " " + arch
_md_os = "mac"
_md_memory_model = memmodel
native_mapping = {'32bit': native_types.x86_native_types,
'64bit': x64_native_types}
def __init__(self, *args, **kwargs):
self.sys_map = {}
self.shift_address = 0
obj.Profile.__init__(self, *args, **kwargs)
def clear(self):
"""Clear out the system map, and everything else"""
self.sys_map = {}
obj.Profile.clear(self)
def reset(self):
"""Reset the vtypes, sysmap and apply modifications, then compile"""
self.clear()
self.load_vtypes()
self.load_sysmap()
self.load_modifications()
self.compile()
def load_vtypes(self):
"""Loads up the vtypes data"""
ntvar = self.metadata.get('memory_model', '32bit')
self.native_types = copy.deepcopy(self.native_mapping.get(ntvar))
self.vtypes.update(vtypesvar)
def load_sysmap(self):
"""Loads up the system map data"""
self.sys_map.update(sysmapvar)
# Returns a list of (name, addr)
def get_all_symbols(self, module = "kernel"):
""" Gets all the symbol tuples for the given module """
ret = []
symtable = self.sys_map
if module in symtable:
mod = symtable[module]
for (name, addrs) in mod.items():
addr = addrs[0][0]
if self.shift_address and addr:
addr = addr + self.shift_address
ret.append([name, addr])
else:
debug.info("All symbols requested for non-existent module %s" % module)
return ret
def get_all_addresses(self, module = "kernel"):
""" Gets all the symbol addresses for the given module """
# returns a hash table for quick looks
# the main use of this function is to see if an address is known
ret = {}
symbols = self.get_all_symbols(module)
for (_name, addr) in symbols:
ret[addr] = 1
return ret
def get_symbol_by_address(self, module, sym_address):
ret = ""
symtable = self.sys_map
mod = symtable[module]
for (name, addrs) in mod.items():
for (addr, addr_type) in addrs:
if sym_address == addr or sym_address == self.shift_address + addr:
ret = name
break
return ret
def get_all_symbol_names(self, module = "kernel"):
symtable = self.sys_map
if module in symtable:
ret = symtable[module].keys()
else:
debug.error("get_all_symbol_names called on non-existent module")
return ret
def get_next_symbol_address(self, sym_name, module = "kernel"):
"""
This is used to find the address of the next symbol in the profile
For some data structures, we cannot determine their size automaticlaly so this
can be used to figure it out on the fly
"""
high_addr = 0xffffffffffffffff
table_addr = self.get_symbol(sym_name, module = module)
addrs = self.get_all_addresses(module = module)
for addr in addrs.keys():
if table_addr < addr < high_addr:
high_addr = addr
return high_addr
def get_symbol(self, sym_name, nm_type = "", module = "kernel"):
"""Gets a symbol out of the profile
sym_name -> name of the symbol
nm_tyes -> types as defined by 'nm' (man nm for examples)
module -> which module to get the symbol from, default is kernel, otherwise can be any name seen in 'lsmod'
This fixes a few issues from the old static hash table method:
1) Conflicting symbols can be handled, if a symbol is found to conflict on any profile,
then the plugin will need to provide the nm_type to differentiate, otherwise the plugin will be errored out
2) Can handle symbols gathered from modules on disk as well from the static kernel
symtable is stored as a hash table of:
symtable[module][sym_name] = [(symbol address, symbol type), (symbol addres, symbol type), ...]
The function has overly verbose error checking on purpose...
"""
symtable = self.sys_map
ret = None
# check if the module is there...
if module in symtable:
mod = symtable[module]
# check if the requested symbol is in the module
if sym_name in mod:
sym_list = mod[sym_name]
# if a symbol has multiple definitions, then the plugin needs to specify the type
if len(sym_list) > 1:
if nm_type == "":
debug.error("Requested symbol {0:s} in module {1:s} has multiple definitions and no type given\n".format(sym_name, module))
else:
for (addr, stype) in sym_list:
if stype == nm_type:
ret = addr
break
if ret == None:
debug.error("Requested symbol {0:s} in module {1:s} could not be found\n".format(sym_name, module))
else:
# get the address of the symbol
ret = sym_list[0][0]
else:
debug.debug("Requested symbol {0:s} not found in module {1:s}\n".format(sym_name, module))
else:
debug.info("Requested module {0:s} not found in symbol table\n".format(module))
if self.shift_address and ret:
ret = ret + self.shift_address
return ret
cls = AbstractMacProfile
cls.__name__ = 'Mac' + profilename.replace('.', '_') + arch
return cls
################################
# Track down the zip files
# Push them through the factory
# Check whether ProfileModifications will work
new_classes = []
for path in set(plugins.__path__):
for path, _, files in os.walk(path):
for fn in files:
if zipfile.is_zipfile(os.path.join(path, fn)):
new_classes.append(MacProfileFactory(zipfile.ZipFile(os.path.join(path, fn))))
class MacOverlay(obj.ProfileModification):
conditions = {'os': lambda x: x == 'mac'}
before = ['BasicObjectClasses']
def modification(self, profile):
profile.merge_overlay(mac_overlay)
class MacObjectClasses(obj.ProfileModification):
conditions = {'os': lambda x: x == 'mac'}
before = ['BasicObjectClasses']
def modification(self, profile):
profile.object_classes.update({
'VolatilityDTB': VolatilityDTB,
'VolatilityMacIntelValidAS' : VolatilityMacIntelValidAS,
'proc' : proc,
'fileglob' : fileglob,
'vnode' : vnode,
'socket' : socket,
'zone' : zone,
'OSString' : OSString,
'OSString_class' : OSString,
'sysctl_oid' : sysctl_oid,
'IpAddress': basic.IpAddress,
'Ipv6Address': basic.Ipv6Address,
'sockaddr' : sockaddr,
'sockaddr_dl' : sockaddr_dl,
'vm_map_entry' : vm_map_entry,
'rtentry' : rtentry,
'queue_entry' : queue_entry,
})
mac_overlay = {
'VOLATILITY_MAGIC': [None, {
'DTB' : [ 0x0, ['VolatilityDTB', dict(configname = "DTB")]],
'IA32ValidAS' : [ 0x0, ['VolatilityMacIntelValidAS']],
'AMD64ValidAS' : [ 0x0, ['VolatilityMacIntelValidAS']],
}],
'session' : [ None, {
's_login' : [ None , ['String', dict(length = 256)]],
}],
'kfs_event' : [ None, {
'str' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'zone' : [ None, {
'zone_name': [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'mac_policy_conf' : [ None, {
'mpc_name' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'proc' : [ None, {
'p_comm' : [ None, ['String', dict(length = 17)]],
'task' : [ None, ['pointer', ['task']]],
}],
'ifnet' : [ None, {
'if_name' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'vnode' : [ None, {
'v_name' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'boot_args' : [ None, {
'CommandLine' : [ None, ['String', dict(length = 1024)]],
}],
'vfsstatfs' : [ None, {
'f_fstypename' : [ None, ['String', dict(length = 16)]],
'f_mntonname' : [ None, ['String', dict(length = 1024)]],
'f_mntfromname' : [ None, ['String', dict(length = 1024)]],
}],
'kmod_info' : [ None, {
'name' : [ None, ['String', dict(length = 64)]],
'version' : [ None, ['String', dict(length = 64)]],
}],
'ipf_filter' : [ None, {
'name' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'sysctl_oid' : [ None, {
'oid_name' : [ None, ['pointer', ['String', dict(length = 256)]]],
}],
'sockaddr_un': [ None, {
'sun_path' : [ None, ['String', dict(length = 104)]],
}],
'in_addr' : [ None, {
's_addr' : [ None, ['IpAddress']],
}],
'in6_addr' : [ None, {
'__u6_addr' : [ None, ['Ipv6Address']],
}],
'inpcb' : [ None, {
'inp_lport' : [ None, ['unsigned be short']],
'inp_fport' : [ None, ['unsigned be short']],
}],
}
| gpl-2.0 |
faircloth-lab/conda-recipes | recipes/mafft/run_test.py | 2 | 3625 | #!/usr/bin/env python
# encoding: utf-8
"""
File: run_test.py
Author: Brant Faircloth
Created by Brant Faircloth on 22 December 2013 08:12 PST (-0800)
Copyright (c) 2013 Brant C. Faircloth. All rights reserved.
"""
import os
import pdb
import difflib
import tempfile
import subprocess
import unittest
class TestAlignments(unittest.TestCase):
def setUp(self):
self.TEST_DIR = os.path.join(os.environ["PREFIX"], "test", "mafft")
self.TEST_DATA = os.path.join(self.TEST_DIR, "sample")
self.BINARY = os.path.join(os.environ["PREFIX"], "bin", "mafft")
self.expected_results = {}
alignments = [
"sample.fftns2",
"sample.fftnsi",
"sample.gins1",
"sample.ginsi",
"sample.lins1",
"sample.linsi"
]
for expected in alignments:
self.expected_results[expected.replace("sample.", "")] = os.path.join(self.TEST_DIR, expected)
def compare(self, typ, tmp):
a = open(tmp, 'rU').readlines()
b = open(self.expected_results[typ], 'rU').readlines()
delta = ''.join(difflib.unified_diff(a, b))
assert delta == ''
os.remove(tmp)
def test_fftns2(self):
"""Testing fftns2"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("fftns2", tmp[1])
def test_fftnsi(self):
"""Testing fftnsi"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
"--maxiterate",
"100",
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("fftnsi", tmp[1])
def test_gins1(self):
"""Testing gins1"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
"--globalpair",
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("gins1", tmp[1])
def test_ginsi(self):
"""Testing ginsi"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
"--globalpair",
"--maxiterate",
"100",
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("ginsi", tmp[1])
def test_lins1(self):
"""Testing lins1"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
"--localpair",
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("lins1", tmp[1])
def test_lins1(self):
"""Testing linsi"""
tmp = tempfile.mkstemp(suffix=".mafft")
cmd = [
self.BINARY,
"--localpair",
"--maxiterate",
"100",
self.TEST_DATA
]
proc1 = subprocess.Popen(cmd, stdout=tmp[0], stderr=subprocess.PIPE)
proc1.communicate()
os.close(tmp[0])
self.compare("linsi", tmp[1])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
veveykocute/Spl | splc.py | 1 | 19239 | import sys
import math
"""A Shakespeare Compiler written in Python, splc.py
This is a compiler that implements the majority of the Shakespeare programming language
invented by Kalle Hasselstrom and Jon Aslund, I take no credit for inventing the language.
This software is free to edit or use, and though I doubt anyone would use this for many projects,
I guess I would appreciate some degree of acknowledgment if you do.
(c) V1.2 Sam Donow 2013-2014
sad3@williams.edu
drsam94@gmail.com"""
#missing features
#full support for multi-word nouns/names
#Stacks, who needs them?
pos_adj = []
neg_adj = []
pos_comp = []
neg_comp = []
pos_nouns = []
neg_nouns = []
valid_names= []
zero_nouns = ['nothing', 'zero']
src = ""
N = 0
vartable = set([])
speaker = ""
target = ""
stage = set([])
actnum = 0
act_names = {}
scene_names= []
#report a compile-time error, then exit
def Assert(b, s):
global N
if not b:
sys.stderr.write(s + " at line " + str(N) + "\n")
sys.exit(1)
#Abstraction for writing to file, eased python 2/3 agnosticity,
#and will eventually allow file output instead of stdout if that
#ever is desired
def writeToFile(s):
sys.stdout.write(str(s) + "\n")
def isNoun(word):
return word in pos_nouns or word in neg_nouns or word in zero_nouns
def isAdjective(word):
return word in pos_adj or word in neg_adj
def isComparative(word):
return word in pos_comp or word in neg_comp
#returns 1 for "nice" and neutral nouns, -1 for nasty ones
def nounValue(word):
Assert(isNoun(word), "Tried to find the nounvalue of a non-noun")
return 1 if word in pos_nouns else -1 if word in neg_nouns else 0
#return s with all whitespace characters removed
def trimWhitespace(s):
trimmed = ""
for c in s:
if c not in ['\t', '\r', '\n', ' ']:
trimmed += c
return trimmed
#return s with all whitespace characters before the first non-whitedspace character removed
def trimLeadingWhitespace(s):
trimIndex = 0
for c in s:
if c in ['\t', '\r', '\n', ' ']:
trimIndex +=1
else:
break
return s[trimIndex:]
#A whitespace-agnositic beginswith method
def beginsWithNoWhitespace(s, pattern):
return beginsWith(trimWhitespace(s), pattern)
def beginsWith(s, pattern):
return s[:len(pattern)] == pattern
def loadFileIntoList(filename, list):
f = open(filename, 'r')
for word in f.readlines():
list.append(word.split(" ")[-1][:-1])
f.close()
#load initial noun and adjective lists
def loadWordLists():
loadFileIntoList("include/neutral_adjective.wordlist" , pos_adj)
loadFileIntoList("include/positive_adjective.wordlist", pos_adj)
loadFileIntoList("include/negative_adjective.wordlist", neg_adj)
loadFileIntoList("include/positive_noun.wordlist", pos_nouns)
loadFileIntoList("include/neutral_noun.wordlist" , pos_nouns)
loadFileIntoList("include/negative_noun.wordlist", neg_nouns)
loadFileIntoList("include/positive_comparative.wordlist", pos_comp)
loadFileIntoList("include/positive_comparative.wordlist", neg_comp)
loadFileIntoList("include/character.wordlist", valid_names)
roman_values = { 'M': 1000, 'D': 500, 'C': 1000, 'L': 50, 'X': 10, 'V': 5, 'I': 1 }
def parseRomanNumeral(roman_string):
roman_string = roman_string.upper()
strindex = 0
roman_sum = 0
while strindex < len(roman_string) - 1:
if(roman_values[roman_string[strindex]] < roman_values[roman_string[strindex+1]]):
roman_sum -= roman_values[roman_string[strindex]]
else:
roman_sum += roman_values[roman_string[strindex]]
strindex += 1
return roman_sum + roman_values[roman_string[strindex]]
def isNumber(s):
words = s.split(" ")
for word in words:
if isNoun(word):
return True
return False
#parse a string that is supposed to evaluate to a number
#if failOk is set to true, will return 0 for phrases that do not evaluate to a number
def parseNum(s, failOk = False):
words = s.split(" ")
nounIndex = len(words)
for i in range(0,len(words)):
if isNoun(words[i]):
nounIndex = i
break
ok = nounIndex < len(words)
if not ok and failOk:
return 0
Assert (ok, str(words) + "\nExpected a number, but found no noun")
value = nounValue(words[nounIndex])
for word in words[:nounIndex]:
if isAdjective(word):
value *= 2
return value
def parseEnterOrExit():
global stage
endBracket = src[N].find(']')
Assert(endBracket >= 0, "[ without matching ]")
enterOrExit = src[N][src[N].find('[')+1:src[N].find(']')]
if beginsWithNoWhitespace(enterOrExit, "Enter"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in vartable, "Undeclared actor entering a scene")
stage.add(name)
Assert(len(stage) < 3, "Too many actors on stage")
elif beginsWithNoWhitespace(enterOrExit, "Exit"):
names = enterOrExit[enterOrExit.find(" ") + 1:].split(" and ")
for namestr in names:
name = namestr.split(" ")[-1]
Assert(name in stage, "Trying to make an actor who is not in the scene exit")
stage.remove(name)
elif beginsWithNoWhitespace(enterOrExit, "Exeunt"):
stage = set([])
else:
Assert(False, "Bracketed clause without Enter, Exit, or Exeunt")
#returns the index of the leftmost punctuation mark in s
def findPunctuation(s):
valids = []
for val in [s.find('.'), s.find('!'), s.find('?')]:
if val >= 0:
valids.append(val)
return -1 if len(valids) == 0 else min(valids)
#returns an array of the punctuation-delimited statements at the current location in the parsing
def getStatements():
global N
statements = []
line = trimLeadingWhitespace(src[N])
unfinished = False
while line.find(':') < 0 and line.find('[') < 0:
punctuation = findPunctuation(line)
if punctuation < 0:
if unfinished == False:
statements.append(line[:-1])
else:
statements[-1] += line[:-1]
N += 1
line = src[N]
unfinished = True
elif punctuation > 0:
if not unfinished:
statements.append("")
statements[-1] += line[:punctuation]
line = line[punctuation + 1:]
unfinished = False
retval = []
for stat in statements:
if len(trimWhitespace(stat)) > 0:
retval.append(stat)
return retval
class Tree:
def __init__(self, v, l, r):
self.value = v
self.left = l
self.right = r
def wordToOperator(op):
if op == "sum":
return "+"
elif op == "difference":
return "-"
elif op == "quotient":
return "/"
elif op == "product":
return "*"
else:
Assert(False, "Illegal Operator")
binop = ["sum", "difference", "quotient", "product"]
unop = ["square", "cube", "twice"]
def buildExpressionTree(expr):
Assert (len(expr) > 0, "Ill-formed Expression in " + str(expr))
if expr[0] == "square":
if expr[1] == "root":
op = "(int)sqrt"
expr = expr[2:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
elif expr[0] == "remainder":
if expr[1] == "of" and expr[2] == "the" and expr[3] == "quotient":
expr = expr[4:]
op = "%"
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
if expr[0] in binop:
op = wordToOperator(expr[0])
expr = expr[1:]
left, expr = buildExpressionTree(expr)
right, expr = buildExpressionTree(expr)
return Tree(op, left, right), expr
elif expr[0] in unop:
op = expr[0]
expr = expr[1:]
num, expr = buildExpressionTree(expr)
return Tree(op, num, ""), expr
if True:
i = 1 if expr[0] == "and" else 0
numstr = ""
while expr[i] not in binop and expr[i] not in unop and expr[i] not in ["and", "remainder"]:
if expr[i] in ["you", "thee", "yourself", "thyself", "thou"]:
expr = expr[i + 1:]
return Tree(target, "", ""), expr
elif expr[i] in ["me", "myself", "i"]:
expr = expr[i + 1:]
return Tree(speaker, "", ""), expr
elif expr[i].capitalize() in vartable:
name = expr[i]
expr = expr[i + 1:]
return Tree(name.capitalize(), "", ""), expr
elif i == len(expr) - 1:
numstr += expr[i]
i = len(expr)
break
else:
numstr += expr[i] + " "
i += 1
if i == len(expr):
expr = []
else:
expr = expr[i:]
if not isNumber(numstr):
return buildExpressionTree(expr)
else:
return Tree(str(parseNum(numstr)), "", ""), expr
def TreeToString(tree):
if tree.left == "":
#just a value
return str(tree.value)
elif tree.right == "":
#unary operator
return str(tree.value) + "(" + TreeToString(tree.left) + ")"
else:
#binary operator
return "(" + TreeToString(tree.left) + " " + str(tree.value) + " " + TreeToString(tree.right) + ")"
def parseExpr(expr):
tree = buildExpressionTree(expr.split(" "))[0]
return TreeToString(tree)
def concatWords(wordArray):
c = ""
for word in wordArray:
c += word
return c
def firstWord(statment):
words = statement.split(" ")
for word in words:
if len(word) > 0:
return word
def parseStatement(stat):
statement = trimLeadingWhitespace(stat).lower()
first = statement.split(" ")[0]
trimmed = trimWhitespace(statement)
if first in ["you", "thou"]:
#this is an assignment of the form Prounoun [as adj as] expression
expr = ""
if statement.rfind("as") >= 0:
expr = statement[statement.rfind("as") + 3:]
else:
expr = statement[len(first) + 1:]
return target + " = " + parseExpr(expr) + " ;\n"
elif trimmed == "openyourheart" or trimmed == "openthyheart":
#numerical output
return 'fprintf(stdout, "%d", ' + target + ');\n'
elif trimmed == "speakyourmind" or trimmed == "speakthymind":
#character output
return 'fprintf(stdout, "%c", (char)' + target + ');\n'
elif trimmed == "listentoyourheart" or trimmed == "listentothyheart":
#numerical input
return 'fgets(inputbuffer, BUFSIZ, stdin);\nsscanf(inputbuffer, "%d", &' + target + ');\n' #" = getchar() - '0';\n"
elif trimmed == "openyourmind" or trimmed == "openyourmind":
#character input
return target + " = getchar();\n"
elif first in ["am", "are", "art", "be", "is"]:
#questions - do not yet support "not"
left = ""
kind = ""
right = ""
if statement.find("as") >= 0:
left, kind, right = statement.split(" as ")
Assert(isAdjective(kind), "Ill-formed conditional in " + statement)
kind = "equal"
elif statement.find("more") >= 0:
words = statement.split(" ")
moreloc = 0
for i in range(0, len(words)):
if words[i] == "more":
moreloc = i
break
Assert(isAdjective(words[moreloc + 1]), "Ill-formed conditional in " + statement)
kind = "greater" if words[moreloc + 1] in pos_adj else "lesser"
left, right = statement.split(" more " + words[moreloc + 1] + " ")
else:
comp = ""
for word in statement.split(" "):
if isComparative(word):
comp = word
break
Assert(len(comp) > 0, "Ill-formed conditional in " + statement)
kind = "greater" if comp in pos_comp else "lesser"
left, right = statement.split(comp)
return "condition = (" + parseExpr(left) + ") " + (">" if kind == "greater" else "<" if kind == "lesser" else "==") + " (" + parseExpr(right) + ");\n"
elif beginsWith(statement, "if so,"):
#positive condition
location = statement.find("if so,")
return "if (condition) {\n " + parseStatement(statement[location + 7:]) + " }\n"
elif beginsWith(statement, "if not,"):
#negative condition
location = statement.find("if not,")
return "if (!condition) {\n " + parseStatement(statement[location + 8:]) + " }\n"
elif beginsWith(statement, "let us") or beginsWith(statement, "we shall") or beginsWith(statement, "we must"):
words = statement.split(" ")
nextTwo = words[2] + " " + words[3]
Assert (nextTwo == "return to" or nextTwo == "proceed to", "Ill-formed goto")
# classic goto with scene or act
if words[4] == "scene" or words[4] == "act":
typeword = words[4] if words[4] == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(parseRomanNumeral(words[5])) + ";\n"
else:
restOfPhrase = concatWords(words[4:])
type_ = "scene" if restOfPhrase in scene_names[actnum].keys() \
else "act" if restOfPhrase in act_names.keys() else "none"
Assert (type_ != "none", "Goto refers to nonexistant act or scene")
nameDict = act_names if type_ == "act" else scene_names[actnum]
typeword = act if type_ == "act" else ("act_" + str(actnum) + "_scene")
return "goto " + typeword + str(nameDict[restOfPhrase]) + ";\n"
else:
return ""
def writeScenes(scenes, isLast):
writeToFile("act" + str(actnum) + ": {\ngoto act_" + str(actnum) + "_scene1;\n}")
for j in range(0, len(scenes)):
writeToFile("act_" + str(actnum) + "_scene" + str(j + 1) + ": {")
writeToFile(scenes[j])
if j < len(scenes) - 1:
writeToFile("goto act_" + str(actnum) + "_scene" + str(j + 2) + ";\n")
elif not isLast:
writeToFile("goto act" + str(actnum + 1) + ";\n")
writeToFile("}")
def handleDeclarations():
global N
global src
#variables, declaration syntax:
#Name, value
declarations = []
unfinished = False
while not beginsWithNoWhitespace(src[N], 'Act'):
Assert(N < len(src) - 1, "File contains no Acts")
if len(trimWhitespace(src[N])) > 0:
if not unfinished:
declarations.append(src[N])
else:
declarations[-1] += src[N]
unfinished = src[N].find('.') < 0
N += 1
for dec in declarations:
commaIndex = dec.find(',')
Assert(commaIndex > 0, "Improper declaration " + str(declarations))
wordsInName = trimLeadingWhitespace(dec[:commaIndex]).split(" ")
varname = wordsInName[-1]
value = parseNum(dec[commaIndex:-2], True)
writeToFile("int " + str(varname) + " = " + str(value) + ";")
Assert(varname in valid_names, "Non-Shakespearean variable name")
vartable.add(varname)
def getActOrSceneNumber(s, actOrScene):
num = s[s.find(actOrScene):].split(" ")[1]
if num.find(':') > 0:
num = num[:num.find(':')]
else:
Assert (False, "Bad " + actOrScene + " heading")
return parseRomanNumeral(num)
def getActOrSceneDescription(s):
desc = trimWhitespace(s[s.find(':')+1:]).lower()
p = findPunctuation(desc)
if p > 0:
desc = desc[:p]
return desc
# Gets all the names of scenes and acts, and adds them to the respective tables
# This must be done in a preprocessing step, in order to enable gotos to future acts/scenes
def parseAllActAndSceneDescriptions():
global scene_names
global act_names
current_act = 0
current_scene = 0
scene_names = [{}]
for line in src:
if beginsWithNoWhitespace(line, "Act"):
desc = getActOrSceneDescription(line)
current_act += 1
act_names[desc] = current_act
scene_names.append(dict())
current_scene = 0
elif beginsWithNoWhitespace(line, "Scene"):
desc = getActOrSceneDescription(line)
current_scene += 1
scene_names[current_act][desc] = current_scene
#-------------------------------Begin Main Program-------------------------#
Assert(len(sys.argv) > 1, "No input file")
filename = sys.argv[1]
f = open(filename, 'r')
src = f.readlines()
f.close()
loadWordLists()
#parse the title - all the text up until the first .
#title is unimportant and is thrown out
while src[N].find('.') < 0:
N += 1
N += 1
#title is thrown out
writeToFile("// " + filename + "\n" +
"// compiled with splc.py (c) Sam Donow 2013-2015\n" +
"#include <stdio.h>\n" +
"#include <math.h>\n" +
'#include "include/mathhelpers.h"\n' +
"int condition = 0;\n" +
"char inputbuffer[BUFSIZ];\n" +
"int main() {\n")
handleDeclarations()
parseAllActAndSceneDescriptions()
scenes = []
unfinished = False
while N < len(src):
if beginsWithNoWhitespace(src[N], 'Act'):
Assert (getActOrSceneNumber(src[N], 'Act') == actnum + 1, "Illegal Act numbering")
if actnum > 0:
writeScenes(scenes, False)
scenes = []
actnum += 1
#act_names[getActOrSceneDescription(src[N])] = actnum
N += 1
elif beginsWithNoWhitespace(src[N], 'Scene'):
Assert (getActOrSceneNumber(src[N], 'Scene') == len(scenes) + 1, "Illegal Scene numbering")
#scene_names[getActOrSceneDescription(src[N])] = len(scenes) + 1
N += 1
speaker = ""
target = ""
while (N < len(src)) and not (beginsWithNoWhitespace(src[N], 'Scene') or beginsWithNoWhitespace(src[N], 'Act')):
if beginsWithNoWhitespace(src[N], '['):
parseEnterOrExit()
if not unfinished:
scenes.append(";\n")
unfinished = True
N += 1
elif src[N].find(':') >= 0:
name = (src[N][:src[N].find(':')]).split(" ")[-1]
Assert (name in stage, "An actor who is not on stage is trying to speak")
for actor in stage:
if actor != name:
target = actor
speaker = name
N += 1
statements = getStatements()
scenecode = ""
for statement in statements:
scenecode += parseStatement(statement)
if not unfinished:
scenes.append(scenecode)
unfinished = True
else:
scenes[-1] += scenecode
else:
N += 1
unfinished = False
else:
N += 1
writeScenes(scenes, True)
writeToFile("}")
| unlicense |
IndyMPO/IndyGeoTools | ConvertGeography/GetAreaConversionMatrix.py | 1 | 3774 | #This script copyright 2017 Indianapolis Metropolitan Planning Organization
from __future__ import division
import arcpy
import os
import pandas as pd
import numpy as np
from subprocess import Popen
import sys
def clear_temp():
'''
Clears the temporary directory that is created when running this tool
'''
temp_dir = r'C:\TEMP'
for f in os.listdir(temp_dir): #Remove all files within the directory
os.remove(os.path.join(temp_dir, f))
os.rmdir(temp_dir) #Remove the directory itself
def main(*args):
#Read in inputs
from_shp_file = args[0]
from_field = args[1]
to_shp_file = args[2]
to_field = args[3]
outfile = args[4]
show_matrix = args[5]
remove_temp_if_successful = args[6]
remove_temp_if_error = args[7]
if from_field == to_field:
to_field += '_1'
#Check if the outfile is specified as a csv file. If it isn't, do so.
if outfile[-4:] != '.csv':
outfile += '.csv'
#Create temporary directory
temp_dir = r'C:\TEMP'
os.mkdir(temp_dir)
temp_shp = os.path.join(temp_dir, 'TEMP.shp')
from_shp = os.path.join(temp_dir, 'FROM.shp')
to_shp = os.path.join(temp_dir, 'TO.shp')
#Copy input shapefiles into temporary directory
arcpy.CopyFeatures_management(from_shp_file, from_shp)
arcpy.CopyFeatures_management(to_shp_file, to_shp)
#Process the data. If an error occurs, the temporary directory will be deleted, and then the exception will be raised
try:
#Intersect the two shapefiles and calculate the area of the intersected shapefile
arcpy.Intersect_analysis([from_shp, to_shp], temp_shp)
temp2_shp = temp_shp.replace('.shp', '2.shp')
arcpy.CalculateAreas_stats(temp_shp, temp2_shp)
#Create a list of all of the origin and destination polygons
from_list = []
to_list = []
polygons = arcpy.da.SearchCursor(temp_shp, [from_field, to_field])
for polygon in polygons:
from_list += [polygon[0]]
to_list += [polygon[1]]
del polygons
from_codes = pd.Series(from_list).value_counts().index
to_codes = pd.Series(to_list).value_counts().index
#Create matrix with total area of each intersected polygon, arranged by the from polygon and to polygon
areas = pd.DataFrame(np.zeros((len(to_codes), len(from_codes))), index = to_codes, columns = from_codes)
polygons = arcpy.da.SearchCursor(temp2_shp, [from_field, to_field, 'F_AREA'])
for polygon in polygons:
areas.loc[polygon[1], polygon[0]] = polygon[2]
del polygons
#Divide each column of the matrix by its sum
total = areas.sum(0)
out_data = areas.copy()
for row in out_data.index:
out_data.loc[row] /= total
#Write to csv, and delete the temporary directory
out_data.to_csv(outfile)
if remove_temp_if_successful:
clear_temp()
except Exception as e:
if remove_temp_if_error:
clear_temp()
exc_type, exc_obj, exc_tb = sys.exc_info()
print (exc_tb.tb_lineno)
raise e
#Open the file if instructed to do so
if show_matrix:
Popen(outfile, shell = True)
if __name__ == '__main__':
from_shp_file = arcpy.GetParameterAsText(0)
from_field = arcpy.GetParameterAsText(1)
to_shp_file = arcpy.GetParameterAsText(2)
to_field = arcpy.GetParameterAsText(3)
outfile = arcpy.GetParameter(4)
show_matrix = arcpy.GetParameter(5)
remove_temp_if_successful = arcpy.GetParameter(6)
remove_temp_if_error = arcpy.GetParameter(7)
main(from_shp_file, from_field, to_shp_file, to_field, outfile, show_matrix, remove_temp_if_successful, remove_temp_if_error)
| apache-2.0 |
SDSG-Invenio/invenio | invenio/celery/tasks.py | 17 | 1196 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
from invenio.celery import celery
@celery.task
def invenio_version():
""" Task that will return the current running Invenio version """
from invenio.base.globals import cfg
return cfg['CFG_VERSION']
@celery.task
def invenio_db_test(num):
""" Task will execute a simple query in the database"""
from invenio.ext.sqlalchemy import db
return db.engine.execute("select %s" % int(num)).scalar()
| gpl-2.0 |
goliveirab/odoo | addons/base_gengo/__openerp__.py | 312 | 2117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Automated Translations through Gengo API',
'version': '0.1',
'category': 'Tools',
'description': """
Automated Translations through Gengo API
========================================
This module will install passive scheduler job for automated translations
using the Gengo API. To activate it, you must
1) Configure your Gengo authentication parameters under `Settings > Companies > Gengo Parameters`
2) Launch the wizard under `Settings > Application Terms > Gengo: Manual Request of Translation` and follow the wizard.
This wizard will activate the CRON job and the Scheduler and will start the automatic translation via Gengo Services for all the terms where you requested it.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['base'],
'data': [
'gengo_sync_schedular_data.xml',
'ir_translation.xml',
'res_company_view.xml',
'wizard/base_gengo_translations_view.xml',
],
'demo': [],
'test': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shinglyu/moztrap | moztrap/model/core/migrations/0003_auto__add_field_productversion_cc_version__add_field_product.py | 5 | 13293 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProductVersion.cc_version'
db.add_column('core_productversion', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Product.cc_version'
db.add_column('core_product', 'cc_version', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'ProductVersion.cc_version'
db.delete_column('core_productversion', 'cc_version')
# Deleting field 'Product.cc_version'
db.delete_column('core_product', 'cc_version')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.product': {
'Meta': {'ordering': "['name']", 'object_name': 'Product'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558895)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'core.productversion': {
'Meta': {'ordering': "['product', 'order']", 'object_name': 'ProductVersion'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 559819)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productversion'", 'symmetrical': 'False', 'to': "orm['environments.Environment']"}),
'has_team': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 560004)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'own_team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['core.Product']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'}
},
'environments.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562776)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562967)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.element': {
'Meta': {'ordering': "['name']", 'object_name': 'Element'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'elements'", 'to': "orm['environments.Category']"}),
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 561818)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 562003)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'environments.environment': {
'Meta': {'object_name': 'Environment'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555711)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'elements': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'environments'", 'symmetrical': 'False', 'to': "orm['environments.Element']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 555910)'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'environments'", 'null': 'True', 'to': "orm['environments.Profile']"})
},
'environments.profile': {
'Meta': {'object_name': 'Profile'},
'cc_version': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 557817)'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_on': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 25, 0, 0, 59, 558002)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core']
| bsd-2-clause |
pducks32/intergrala | python/sympy/sympy/physics/quantum/tests/test_operator.py | 7 | 6165 | from sympy import (Derivative, diff, Function, Integer, Mul, pi, sin, Symbol,
symbols)
from sympy.physics.quantum.qexpr import QExpr
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.hilbert import HilbertSpace
from sympy.physics.quantum.operator import (Operator, UnitaryOperator,
HermitianOperator, OuterProduct,
DifferentialOperator,
IdentityOperator)
from sympy.physics.quantum.state import Ket, Bra, Wavefunction
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.represent import represent
from sympy.core.trace import Tr
from sympy.physics.quantum.spin import JzKet, JzBra
from sympy.matrices import eye
class CustomKet(Ket):
@classmethod
def default_args(self):
return ("t",)
class CustomOp(HermitianOperator):
@classmethod
def default_args(self):
return ("T",)
t_ket = CustomKet()
t_op = CustomOp()
def test_operator():
A = Operator('A')
B = Operator('B')
C = Operator('C')
assert isinstance(A, Operator)
assert isinstance(A, QExpr)
assert A.label == (Symbol('A'),)
assert A.is_commutative is False
assert A.hilbert_space == HilbertSpace()
assert A*B != B*A
assert (A*(B + C)).expand() == A*B + A*C
assert ((A + B)**2).expand() == A**2 + A*B + B*A + B**2
assert t_op.label[0] == Symbol(t_op.default_args()[0])
assert Operator() == Operator("O")
def test_operator_inv():
A = Operator('A')
assert A*A.inv() == 1
assert A.inv()*A == 1
def test_hermitian():
H = HermitianOperator('H')
assert isinstance(H, HermitianOperator)
assert isinstance(H, Operator)
assert Dagger(H) == H
assert H.inv() != H
assert H.is_commutative is False
assert Dagger(H).is_commutative is False
def test_unitary():
U = UnitaryOperator('U')
assert isinstance(U, UnitaryOperator)
assert isinstance(U, Operator)
assert U.inv() == Dagger(U)
assert U*Dagger(U) == 1
assert Dagger(U)*U == 1
assert U.is_commutative is False
assert Dagger(U).is_commutative is False
def test_identity():
I = IdentityOperator()
O = Operator('O')
x = Symbol("x")
assert isinstance(I, IdentityOperator)
assert isinstance(I, Operator)
assert I * O == O
assert O * I == O
assert isinstance(I * I, IdentityOperator)
assert isinstance(3 * I, Mul)
assert isinstance(I * x, Mul)
assert I.inv() == I
assert Dagger(I) == I
assert qapply(I * O) == O
assert qapply(O * I) == O
for n in [2, 3, 5]:
assert represent(IdentityOperator(n)) == eye(n)
def test_outer_product():
k = Ket('k')
b = Bra('b')
op = OuterProduct(k, b)
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = k*b
assert isinstance(op, OuterProduct)
assert isinstance(op, Operator)
assert op.ket == k
assert op.bra == b
assert op.label == (k, b)
assert op.is_commutative is False
op = 2*k*b
assert op == Mul(Integer(2), k, b)
op = 2*(k*b)
assert op == Mul(Integer(2), OuterProduct(k, b))
assert Dagger(k*b) == OuterProduct(Dagger(b), Dagger(k))
assert Dagger(k*b).is_commutative is False
#test the _eval_trace
assert Tr(OuterProduct(JzKet(1, 1), JzBra(1, 1))).doit() == 1
def test_operator_dagger():
A = Operator('A')
B = Operator('B')
assert Dagger(A*B) == Dagger(B)*Dagger(A)
assert Dagger(A + B) == Dagger(A) + Dagger(B)
assert Dagger(A**2) == Dagger(A)**2
def test_differential_operator():
x = Symbol('x')
f = Function('f')
d = DifferentialOperator(Derivative(f(x), x), f(x))
g = Wavefunction(x**2, x)
assert qapply(d*g) == Wavefunction(2*x, x)
assert d.expr == Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 2), f(x))
d = DifferentialOperator(Derivative(f(x), x, 2), f(x))
g = Wavefunction(x**3, x)
assert qapply(d*g) == Wavefunction(6*x, x)
assert d.expr == Derivative(f(x), x, 2)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == DifferentialOperator(Derivative(f(x), x, 3), f(x))
d = DifferentialOperator(1/x*Derivative(f(x), x), f(x))
assert d.expr == 1/x*Derivative(f(x), x)
assert d.function == f(x)
assert d.variables == (x,)
assert diff(d, x) == \
DifferentialOperator(Derivative(1/x*Derivative(f(x), x), x), f(x))
assert qapply(d*g) == Wavefunction(3*x, x)
# 2D cartesian Laplacian
y = Symbol('y')
d = DifferentialOperator(Derivative(f(x, y), x, 2) +
Derivative(f(x, y), y, 2), f(x, y))
w = Wavefunction(x**3*y**2 + y**3*x**2, x, y)
assert d.expr == Derivative(f(x, y), x, 2) + Derivative(f(x, y), y, 2)
assert d.function == f(x, y)
assert d.variables == (x, y)
assert diff(d, x) == \
DifferentialOperator(Derivative(d.expr, x), f(x, y))
assert diff(d, y) == \
DifferentialOperator(Derivative(d.expr, y), f(x, y))
assert qapply(d*w) == Wavefunction(2*x**3 + 6*x*y**2 + 6*x**2*y + 2*y**3,
x, y)
# 2D polar Laplacian (th = theta)
r, th = symbols('r th')
d = DifferentialOperator(1/r*Derivative(r*Derivative(f(r, th), r), r) +
1/(r**2)*Derivative(f(r, th), th, 2), f(r, th))
w = Wavefunction(r**2*sin(th), r, (th, 0, pi))
assert d.expr == \
1/r*Derivative(r*Derivative(f(r, th), r), r) + \
1/(r**2)*Derivative(f(r, th), th, 2)
assert d.function == f(r, th)
assert d.variables == (r, th)
assert diff(d, r) == \
DifferentialOperator(Derivative(d.expr, r), f(r, th))
assert diff(d, th) == \
DifferentialOperator(Derivative(d.expr, th), f(r, th))
assert qapply(d*w) == Wavefunction(3*sin(th), r, (th, 0, pi))
| mit |
neteler/QGIS | python/plugins/processing/algs/gdal/rgb2pct.py | 3 | 2612 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
rgb2pct.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.core.parameters import ParameterRaster
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputRaster
from processing.tools.system import isWindows
from processing.algs.gdal.GdalUtils import GdalUtils
class rgb2pct(GdalAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NCOLORS = 'NCOLORS'
def defineCharacteristics(self):
self.name = 'RGB to PCT'
self.group = '[GDAL] Conversion'
self.addParameter(ParameterRaster(rgb2pct.INPUT,
self.tr('Input layer'), False))
self.addParameter(ParameterNumber(rgb2pct.NCOLORS,
self.tr('Number of colors'), 1, None, 2))
self.addOutput(OutputRaster(rgb2pct.OUTPUT, self.tr('Output layer')))
def processAlgorithm(self, progress):
arguments = []
arguments.append('-n')
arguments.append(str(self.getParameterValue(rgb2pct.NCOLORS)))
arguments.append('-of')
out = self.getOutputValue(rgb2pct.OUTPUT)
arguments.append(GdalUtils.getFormatShortNameFromFilename(out))
arguments.append(self.getParameterValue(rgb2pct.INPUT))
arguments.append(out)
if isWindows():
commands = ['cmd.exe', '/C ', 'rgb2pct.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = ['rgb2pct.py', GdalUtils.escapeAndJoin(arguments)]
GdalUtils.runGdal(commands, progress)
| gpl-2.0 |
D3f0/AutobahnPython | examples/websocket/echo/client.py | 19 | 1977 | ###############################################################################
##
## Copyright 2011,2012 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from twisted.python import log
from autobahn.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def sendHello(self):
self.sendMessage("Hello, world!")
def onOpen(self):
self.sendHello()
def onMessage(self, msg, binary):
print "Got echo: " + msg
reactor.callLater(1, self.sendHello)
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Need the WebSocket server address, i.e. ws://localhost:9000"
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WebSocketClientFactory(sys.argv[1],
debug = debug,
debugCodePaths = debug)
# uncomment to use Hixie-76 protocol
#factory.setProtocolOptions(allowHixie76 = True, version = 0)
factory.protocol = EchoClientProtocol
connectWS(factory)
reactor.run()
| apache-2.0 |
mission-peace/interview | python/dynamic/weighted_job_scheduling_max_profit.py | 1 | 1192 | """
Problem Statement
=================
Given set of jobs with start and end interval and profit, how to maximize profit such that jobs in subset do not
overlap.
Video
-----
* https://youtu.be/cr6Ip0J9izc
Complexity
----------
* Runtime Complexity: O(n^2)
* Space Complexity: O(n)
Reference Link
--------------
* http://www.cs.princeton.edu/courses/archive/spr05/cos423/lectures/06dynamic-programming.pdf
"""
def can_sequence(job1, job2):
_, job1_finish_time = job1
job2_start_time, _ = job2
return job1_finish_time <= job2_start_time
def find_max_profit(jobs):
sequenced_jobs = sorted(jobs.keys(), key=lambda x: x[1])
T = [jobs[job_key] for job_key in sequenced_jobs]
num_jobs = len(sequenced_jobs)
for j in range(1, num_jobs):
for i in range(0, j):
if can_sequence(sequenced_jobs[i], sequenced_jobs[j]):
T[j] = max(T[j], T[i] + jobs[sequenced_jobs[j]])
return max(T)
if __name__ == '__main__':
jobs = {
(1, 3): 5, # (start_time, end_time, total_cost)
(2, 5): 6,
(4, 6): 5,
(6, 7): 4,
(5, 8): 11,
(7, 9): 2
}
assert 17 == find_max_profit(jobs)
| apache-2.0 |
markeTIC/server-tools | base_suspend_security/base_suspend_security.py | 29 | 1308 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV <http://therp.nl>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
class BaseSuspendSecurityUid(int):
def __int__(self):
return self
def __eq__(self, other):
if isinstance(other, (int, long)):
return False
return super(BaseSuspendSecurityUid, self).__int__() == other
def __iter__(self):
yield super(BaseSuspendSecurityUid, self).__int__()
SUSPEND_METHOD = 'suspend_security'
| agpl-3.0 |
nthorne/xmppmote | configuration/test/test_commands.py | 1 | 7152 | #!/usr/bin/env python
#Copyright (C) 2012 Niklas Thorne.
#This file is part of XMPPMote.
#
#XMPPMote is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#XMPPMote is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with XMPPMote. If not, see <http://www.gnu.org/licenses/>.
""" This module provides unit tests for the commands module. """
import sys
import os
sys.path.append(os.path.abspath("../.."))
import mox
import unittest
from ConfigParser import SafeConfigParser
from ConfigParser import NoSectionError
from ConfigParser import NoOptionError
from configuration.commands import get_command_handler
from configuration.commands import UnknownHandler
from configuration.commands import restricted_set
from configuration.commands import MalformedCommand
from configuration.configurationparser import ConfigurationParser
from bot import commandhandlers
class GetCommandHandlerTest(mox.MoxTestBase):
""" Provides test cases for the get_command_handler function. """
def test_getting_existing_commandhandlers(self):
""" If any of the two known command handlers are configured, an instance
of the named command handler should be returned by get_command_handler
"""
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
# case this one wierdly just to make sure that character casing is taken
# into consideration when parsing the string..
config.get("general", "handler").AndReturn("rEstrIctEd")
config.get("general", "handler").AndReturn("pAssthrU")
self.mox.ReplayAll()
expected_type = commandhandlers.RestrictedCommandHandler()
self.assertEquals(type(get_command_handler()),
type(expected_type))
expected_type = commandhandlers.UnsafeCommandHandler()
self.assertEquals(type(get_command_handler()),
type(expected_type))
def test_getting_nonexisting_commandhandler(self):
""" If the command handler returned by the configuration is unknown to
get_command_handler, an UnknownHandler exception should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
config.get("general", "handler").AndReturn("foobar")
self.mox.ReplayAll()
self.assertRaises(UnknownHandler, get_command_handler)
def test_getting_commandhandler_undefined_in_config(self):
""" If either the section or the option that details the command handler
is missing, an UnknownHandler exception should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "get")
config = ConfigurationParser()
config.parse(mock_file)
config.get("general", "handler").AndRaise(NoSectionError("general"))
config.get("general", "handler").AndRaise(NoOptionError("general",
"handler"))
self.mox.ReplayAll()
self.assertRaises(UnknownHandler, get_command_handler)
self.assertRaises(UnknownHandler, get_command_handler)
class GetRestrictedSetTest(mox.MoxTestBase):
""" Provides test cases for the restricted_set function. """
def test_getting_defined_restricted_set(self):
""" Make sure that properly formed commands are parsed into a list of
command tuples. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([
("foo", "ls::List files"),
("bar", "df:-h:Disk space usage (human readable)"),
("baz", "du:-sh .:"),
("foz", "pwd")
])
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [
("ls", None, "List files"),
("df", ["-h"], "Disk space usage (human readable)"),
("du", ["-sh ."], ""),
("pwd", None, "")
])
def test_restricted_set_missing_section(self):
""" If there is no commands section in the configuration file, an empty
list should be returned. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(False)
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [])
def test_restricted_set_undefined_set(self):
""" If there is a command section defined, but no commands in it, an
empty list should be returned. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([])
self.mox.ReplayAll()
self.assertEquals(restricted_set(), [])
def test_getting_malformed_restricted_set(self):
""" If there is a malformed command defined in the commands section, a
MalformedCommand should be raised. """
mock_file = self.mox.CreateMockAnything()
mock_file.closed = False
mock_file.name = "foobar"
self.mox.StubOutWithMock(SafeConfigParser, "has_section")
self.mox.StubOutWithMock(SafeConfigParser, "items")
config = ConfigurationParser()
config.parse(mock_file)
config.has_section("commands").AndReturn(True)
config.items("commands").AndReturn([("foo", "")])
self.mox.ReplayAll()
self.assertRaises(MalformedCommand, restricted_set)
if "__main__" == __name__:
unittest.main()
| gpl-3.0 |
YYWen0o0/python-frame-django | django/forms/fields.py | 1 | 47569 | """
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import os
import re
import sys
import warnings
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
TextInput, NumberInput, EmailInput, URLInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION
)
from django.utils import formats
from django.utils.encoding import smart_text, force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.deprecation import RemovedInDjango19Warning, RemovedInDjango20Warning
from django.utils import six
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField'
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=[], localize=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def get_limit_choices_to(self):
"""
Returns ``limit_choices_to`` for this form field.
If it is a callable, it will be invoked and the result will be
returned.
"""
if callable(self.limit_choices_to):
return self.limit_choices_to()
return self.limit_choices_to
def _has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it w/ ''.
initial_value = initial if initial is not None else ''
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
data = self._coerce(data)
except ValidationError:
return True
data_value = data if data is not None else ''
return initial_value != data_value
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None:
# The HTML attribute is maxlength, not max_length.
attrs.update({'maxlength': str(self.max_length)})
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
'max_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit in total.',
'Ensure that there are no more than %(max)s digits in total.',
'max'),
'max_decimal_places': ungettext_lazy(
'Ensure that there are no more than %(max)s decimal place.',
'Ensure that there are no more than %(max)s decimal places.',
'max'),
'max_whole_digits': ungettext_lazy(
'Ensure that there are no more than %(max)s digit before the decimal point.',
'Ensure that there are no more than %(max)s digits before the decimal point.',
'max'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(
self.error_messages['max_digits'],
code='max_digits',
params={'max': self.max_digits},
)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(
self.error_messages['max_decimal_places'],
code='max_decimal_places',
params={'max': self.decimal_places},
)
if (self.max_digits is not None and self.decimal_places is not None
and whole_digits > (self.max_digits - self.decimal_places)):
raise ValidationError(
self.error_messages['max_whole_digits'],
code='max_whole_digits',
params={'max': (self.max_digits - self.decimal_places)},
)
return value
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
warnings.warn(
'Using SplitDateTimeWidget with DateTimeField is deprecated. '
'Use SplitDateTimeField instead.',
RemovedInDjango19Warning, stacklevel=2)
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if value[0] in self.empty_values and value[1] in self.empty_values:
return None
value = '%s %s' % tuple(value)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message is not None:
warnings.warn(
"The 'error_message' argument is deprecated. Use "
"Field.error_messages['invalid'] instead.",
RemovedInDjango20Warning, stacklevel=2
)
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def _has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _("Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
f.content_type = Image.MIME[image.format]
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
def clean(self, value):
value = self.to_python(value).strip()
return super(URLField, self).clean(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def _has_changed(self, initial, data):
# Sometimes data or initial could be None or '' which should be the
# same thing as False.
if initial == 'False':
# show_hidden_initial may have transformed False to 'False'
initial = False
return bool(initial) != bool(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
def _has_changed(self, initial, data):
# None (unknown) and False (No) are not the same
if initial is not None:
initial = bool(initial)
if data is not None:
data = bool(data)
return initial != data
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return smart_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [smart_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple([x.__deepcopy__(memo) for x in self.fields])
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
if field._has_changed(field.to_python(initial), data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class IPAddressField(CharField):
default_validators = [validators.validate_ipv4_address]
def __init__(self, *args, **kwargs):
warnings.warn("IPAddressField has been deprecated. Use GenericIPAddressField instead.",
RemovedInDjango19Warning)
super(IPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
return value.strip()
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def clean(self, value):
value = self.to_python(value).strip()
return super(SlugField, self).clean(value)
| bsd-3-clause |
offbyone/Flexget | flexget/plugins/filter/exists_series.py | 6 | 5509 | from __future__ import unicode_literals, division, absolute_import
import logging
from path import Path
from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.log import log_once
from flexget.utils.template import RenderError
from flexget.plugins.parsers import ParseWarning
from flexget.plugin import get_plugin_by_name
log = logging.getLogger('exists_series')
class FilterExistsSeries(object):
"""
Intelligent series aware exists rejecting.
Example::
exists_series: /storage/series/
"""
schema = {
'anyOf': [
one_or_more({'type': 'string', 'format': 'path'}),
{
'type': 'object',
'properties': {
'path': one_or_more({'type': 'string', 'format': 'path'}),
'allow_different_qualities': {'enum': ['better', True, False], 'default': False}
},
'required': ['path'],
'additionalProperties': False
}
]
}
def prepare_config(self, config):
# if config is not a dict, assign value to 'path' key
if not isinstance(config, dict):
config = {'path': config}
# if only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
return config
@plugin.priority(-1)
def on_task_filter(self, task, config):
if not task.accepted:
log.debug('Scanning not needed')
return
config = self.prepare_config(config)
accepted_series = {}
paths = set()
for entry in task.accepted:
if 'series_parser' in entry:
if entry['series_parser'].valid:
accepted_series.setdefault(entry['series_parser'].name, []).append(entry)
for folder in config['path']:
try:
paths.add(entry.render(folder))
except RenderError as e:
log.error('Error rendering path `%s`: %s', folder, e)
else:
log.debug('entry %s series_parser invalid', entry['title'])
if not accepted_series:
log.warning('No accepted entries have series information. exists_series cannot filter them')
return
# scan through
# For speed, only test accepted entries since our priority should be after everything is accepted.
for series in accepted_series:
# make new parser from parser in entry
series_parser = accepted_series[series][0]['series_parser']
for folder in paths:
folder = Path(folder).expanduser()
if not folder.isdir():
log.warning('Directory %s does not exist', folder)
continue
for filename in folder.walk(errors='ignore'):
# run parser on filename data
try:
disk_parser = get_plugin_by_name('parsing').instance.parse_series(data=filename.name,
name=series_parser.name)
except ParseWarning as pw:
disk_parser = pw.parsed
log_once(pw.value, logger=log)
if disk_parser.valid:
log.debug('name %s is same series as %s', filename.name, series)
log.debug('disk_parser.identifier = %s', disk_parser.identifier)
log.debug('disk_parser.quality = %s', disk_parser.quality)
log.debug('disk_parser.proper_count = %s', disk_parser.proper_count)
for entry in accepted_series[series]:
log.debug('series_parser.identifier = %s', entry['series_parser'].identifier)
if disk_parser.identifier != entry['series_parser'].identifier:
log.trace('wrong identifier')
continue
log.debug('series_parser.quality = %s', entry['series_parser'].quality)
if config.get('allow_different_qualities') == 'better':
if entry['series_parser'].quality > disk_parser.quality:
log.trace('better quality')
continue
elif config.get('allow_different_qualities'):
if disk_parser.quality != entry['series_parser'].quality:
log.trace('wrong quality')
continue
log.debug('entry parser.proper_count = %s', entry['series_parser'].proper_count)
if disk_parser.proper_count >= entry['series_parser'].proper_count:
entry.reject('proper already exists')
continue
else:
log.trace('new one is better proper, allowing')
continue
@event('plugin.register')
def register_plugin():
plugin.register(FilterExistsSeries, 'exists_series', groups=['exists'], api_ver=2)
| mit |
raven47git/flask | tests/test_ext.py | 149 | 5861 | # -*- coding: utf-8 -*-
"""
tests.ext
~~~~~~~~~~~~~~~~~~~
Tests the extension import thing.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
import pytest
try:
from imp import reload as reload_module
except ImportError:
reload_module = reload
from flask._compat import PY2
@pytest.fixture(autouse=True)
def importhook_setup(monkeypatch, request):
# we clear this out for various reasons. The most important one is
# that a real flaskext could be in there which would disable our
# fake package. Secondly we want to make sure that the flaskext
# import hook does not break on reloading.
for entry, value in list(sys.modules.items()):
if (
entry.startswith('flask.ext.') or
entry.startswith('flask_') or
entry.startswith('flaskext.') or
entry == 'flaskext'
) and value is not None:
monkeypatch.delitem(sys.modules, entry)
from flask import ext
reload_module(ext)
# reloading must not add more hooks
import_hooks = 0
for item in sys.meta_path:
cls = type(item)
if cls.__module__ == 'flask.exthook' and \
cls.__name__ == 'ExtensionImporter':
import_hooks += 1
assert import_hooks == 1
def teardown():
from flask import ext
for key in ext.__dict__:
assert '.' not in key
request.addfinalizer(teardown)
@pytest.fixture
def newext_simple(modules_tmpdir):
x = modules_tmpdir.join('flask_newext_simple.py')
x.write('ext_id = "newext_simple"')
@pytest.fixture
def oldext_simple(modules_tmpdir):
flaskext = modules_tmpdir.mkdir('flaskext')
flaskext.join('__init__.py').write('\n')
flaskext.join('oldext_simple.py').write('ext_id = "oldext_simple"')
@pytest.fixture
def newext_package(modules_tmpdir):
pkg = modules_tmpdir.mkdir('flask_newext_package')
pkg.join('__init__.py').write('ext_id = "newext_package"')
pkg.join('submodule.py').write('def test_function():\n return 42\n')
@pytest.fixture
def oldext_package(modules_tmpdir):
flaskext = modules_tmpdir.mkdir('flaskext')
flaskext.join('__init__.py').write('\n')
oldext = flaskext.mkdir('oldext_package')
oldext.join('__init__.py').write('ext_id = "oldext_package"')
oldext.join('submodule.py').write('def test_function():\n'
' return 42')
@pytest.fixture
def flaskext_broken(modules_tmpdir):
ext = modules_tmpdir.mkdir('flask_broken')
ext.join('b.py').write('\n')
ext.join('__init__.py').write('import flask.ext.broken.b\n'
'import missing_module')
def test_flaskext_new_simple_import_normal(newext_simple):
from flask.ext.newext_simple import ext_id
assert ext_id == 'newext_simple'
def test_flaskext_new_simple_import_module(newext_simple):
from flask.ext import newext_simple
assert newext_simple.ext_id == 'newext_simple'
assert newext_simple.__name__ == 'flask_newext_simple'
def test_flaskext_new_package_import_normal(newext_package):
from flask.ext.newext_package import ext_id
assert ext_id == 'newext_package'
def test_flaskext_new_package_import_module(newext_package):
from flask.ext import newext_package
assert newext_package.ext_id == 'newext_package'
assert newext_package.__name__ == 'flask_newext_package'
def test_flaskext_new_package_import_submodule_function(newext_package):
from flask.ext.newext_package.submodule import test_function
assert test_function() == 42
def test_flaskext_new_package_import_submodule(newext_package):
from flask.ext.newext_package import submodule
assert submodule.__name__ == 'flask_newext_package.submodule'
assert submodule.test_function() == 42
def test_flaskext_old_simple_import_normal(oldext_simple):
from flask.ext.oldext_simple import ext_id
assert ext_id == 'oldext_simple'
def test_flaskext_old_simple_import_module(oldext_simple):
from flask.ext import oldext_simple
assert oldext_simple.ext_id == 'oldext_simple'
assert oldext_simple.__name__ == 'flaskext.oldext_simple'
def test_flaskext_old_package_import_normal(oldext_package):
from flask.ext.oldext_package import ext_id
assert ext_id == 'oldext_package'
def test_flaskext_old_package_import_module(oldext_package):
from flask.ext import oldext_package
assert oldext_package.ext_id == 'oldext_package'
assert oldext_package.__name__ == 'flaskext.oldext_package'
def test_flaskext_old_package_import_submodule(oldext_package):
from flask.ext.oldext_package import submodule
assert submodule.__name__ == 'flaskext.oldext_package.submodule'
assert submodule.test_function() == 42
def test_flaskext_old_package_import_submodule_function(oldext_package):
from flask.ext.oldext_package.submodule import test_function
assert test_function() == 42
def test_flaskext_broken_package_no_module_caching(flaskext_broken):
for x in range(2):
with pytest.raises(ImportError):
import flask.ext.broken
def test_no_error_swallowing(flaskext_broken):
with pytest.raises(ImportError) as excinfo:
import flask.ext.broken
assert excinfo.type is ImportError
if PY2:
message = 'No module named missing_module'
else:
message = 'No module named \'missing_module\''
assert str(excinfo.value) == message
assert excinfo.tb.tb_frame.f_globals is globals()
# reraise() adds a second frame so we need to skip that one too.
# On PY3 we even have another one :(
next = excinfo.tb.tb_next.tb_next
if not PY2:
next = next.tb_next
import os.path
assert os.path.join('flask_broken', '__init__.py') in \
next.tb_frame.f_code.co_filename
| bsd-3-clause |
lyn1337/LinuxDSc2 | linux-2.6.x/arch/ia64/scripts/unwcheck.py | 916 | 1718 | #!/usr/bin/env python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
finklabs/inquirer | examples/when.py | 2 | 1284 | # -*- coding: utf-8 -*-
"""
When example
"""
from __future__ import print_function, unicode_literals
from whaaaaat import style_from_dict, Token, prompt, print_json, default_style
style = style_from_dict({
Token.QuestionMark: '#FF9D00 bold',
Token.Selected: '#5F819D bold',
Token.Instruction: '', # default
Token.Answer: '#5F819D bold',
Token.Question: '',
})
def dislikes_bacon(answers):
# demonstrate use of a function... here a lambda function would be enough
return not answers['bacon']
questions = [
{
'type': 'confirm',
'name': 'bacon',
'message': 'Do you like bacon?'
},
{
'type': 'input',
'name': 'favorite',
'message': 'Bacon lover, what is your favorite type of bacon?',
'when': lambda answers: answers['bacon']
},
{
'type': 'confirm',
'name': 'pizza',
'message': 'Ok... Do you like pizza?',
'default': False, # only for demo :)
'when': dislikes_bacon
},
{
'type': 'input',
'name': 'favorite',
'message': 'Whew! What is your favorite type of pizza?',
'when': lambda answers: answers.get('pizza', False)
}
]
answers = prompt(questions, style=default_style)
print_json(answers)
| mit |
michelts/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/i18n/commands/tests.py | 55 | 1477 | import os
import re
from subprocess import Popen, PIPE
def find_command(cmd, path=None, pathext=None):
if path is None:
path = os.environ.get('PATH', []).split(os.pathsep)
if isinstance(path, basestring):
path = [path]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD').split(os.pathsep)
# don't use extensions if the command ends with one of them
for ext in pathext:
if cmd.endswith(ext):
pathext = ['']
break
# check if we find the command on PATH
for p in path:
f = os.path.join(p, cmd)
if os.path.isfile(f):
return f
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
return fext
return None
# checks if it can find xgettext on the PATH and
# imports the extraction tests if yes
xgettext_cmd = find_command('xgettext')
if xgettext_cmd:
p = Popen('%s --version' % xgettext_cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
output = p.communicate()[0]
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion >= (0, 15):
from extraction import *
del p
if find_command('msgfmt'):
from compilation import *
| gpl-3.0 |
evidation-health/ContinuousTimeMarkovModel | examples/small_sample_example_main.py | 1 | 6795 | import numpy as np
from theano.tensor import as_tensor_variable
from ContinuousTimeMarkovModel.distributions import *
from pymc3 import Model, sample, Metropolis, Dirichlet, Potential, Binomial, Beta, Slice, NUTS
import theano.tensor as TT
from ContinuousTimeMarkovModel.samplers.forwardS import *
from ContinuousTimeMarkovModel.samplers.forwardX import *
#import sys; sys.setrecursionlimit(50000)
#theano.config.compute_test_value = 'off'
# Load pre-generated data
from pickle import load
datadir = '../data/small_sample/'
infile = open(datadir+'pi.pkl','rb')
pi_start = load(infile)
infile.close()
infile = open(datadir+'Q.pkl','rb')
Q_start = load(infile)
infile.close()
infile = open(datadir+'S.pkl','rb')
S_start = load(infile)
infile.close()
infile = open(datadir+'B.pkl','rb')
B_start = load(infile)
infile.close()
infile = open(datadir+'B0.pkl','rb')
B0_start = load(infile)
infile.close()
infile = open(datadir+'X.pkl','rb')
X_start = load(infile)
infile.close()
infile = open(datadir+'Z.pkl','rb')
Z_start = load(infile)
infile.close()
infile = open(datadir+'L.pkl','rb')
L_start = load(infile)
infile.close()
infile = open(datadir+'obs_jumps.pkl','rb')
obs_jumps = load(infile)
infile.close()
infile = open(datadir+'T.pkl','rb')
T = load(infile)
infile.close()
infile = open(datadir+'O.pkl','rb')
O = load(infile)
infile.close()
#Cut down to 100 people
newN = 100
T = T[:newN]
nObs = T.sum()
S_start = S_start[0:nObs]
obs_jumps = obs_jumps[0:nObs]
X_start = X_start[0:nObs]
O = O[0:nObs]
nObs = S_start.shape[0]
N = T.shape[0] # Number of patients
M = pi_start.shape[0] # Number of hidden states
K = Z_start.shape[0] # Number of comorbidities
D = Z_start.shape[1] # Number of claims
Dd = 16 # Maximum number of claims that can occur at once
#import pdb; pdb.set_trace()
model = Model()
with model:
#Fails: #pi = Dirichlet('pi', a = as_tensor_variable([0.147026,0.102571,0.239819,0.188710,0.267137,0.054738]), shape=M, testval = np.ones(M)/float(M))
pi = Dirichlet('pi', a = as_tensor_variable(pi_start.copy()), shape=M)
pi_min_potential = Potential('pi_min_potential', TT.switch(TT.min(pi) < .001, -np.inf, 0))
Q = DiscreteObsMJP_unif_prior('Q', M=M, lower=0.0, upper=1.0, shape=(M,M))
#S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs), testval=np.ones(nObs,dtype='int32'))
S = DiscreteObsMJP('S', pi=pi, Q=Q, M=M, nObs=nObs, observed_jumps=obs_jumps, T=T, shape=(nObs))
#B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
#B = Beta('B', alpha = 1., beta = 1., shape=(K,M), testval=0.2*np.ones((K,M)))
B0 = Beta('B0', alpha = 1., beta = 1., shape=(K,M))
B = Beta('B', alpha = 1., beta = 1., shape=(K,M))
#X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K), testval=np.ones((nObs,K),dtype='int8'))
X = Comorbidities('X', S=S, B0=B0,B=B, T=T, shape=(nObs, K))
#Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D), testval=0.5*np.ones((K,D)))
#L = Beta('L', alpha = 1., beta = 1., shape=D, testval=0.5*np.ones(D))
Z = Beta('Z', alpha = 0.1, beta = 1., shape=(K,D))
L = Beta('L', alpha = 1., beta = 1., shape=D)
O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, O_input=O, shape=(nObs,Dd), observed=O)
#O_obs = Claims('O_obs', X=X, Z=Z, L=L, T=T, D=D, max_obs=max_obs, O_input=O, shape=(Dd,max_obs,N), observed=O)
#import pdb; pdb.set_trace()
from scipy.special import logit
Q_raw = []
for i in range(Q_start.shape[0]-1):
Q_raw.append(Q_start[i,i+1])
Q_raw_log = logit(np.asarray(Q_raw))
B_lo = logit(B_start)
B0_lo = logit(B0_start)
Z_lo = logit(Z_start)
L_lo = logit(L_start)
start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo}
#teststart = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_lo, 'pi_stickbreaking':np.ones(M)/float(M)}
#start = {'Q_ratematrixoneway': Q_raw_log, 'B_logodds':B_lo, 'B0_logodds':B0_lo, 'S':S_start, 'X':X_start, 'Z_logodds':Z_lo, 'L_logodds':L_start}
with model:
#import pdb; pdb.set_trace()
steps = []
steps.append(NUTS(vars=[pi]))
#steps.append(NUTS(vars=[pi], scaling=np.ones(M-1)*0.058))
#steps.append(Metropolis(vars=[pi], scaling=0.058, tune=False))
steps.append(NUTS(vars=[Q],scaling=np.ones(M-1,dtype=float)*10.))
#steps.append(Metropolis(vars=[Q], scaling=0.2, tune=False))
steps.append(ForwardS(vars=[S], nObs=nObs, T=T, N=N, observed_jumps=obs_jumps))
steps.append(NUTS(vars=[B0,B]))
#steps.append(Metropolis(vars=[B0], scaling=0.2, tune=False))
#steps.append(NUTS(vars=[B]))
#steps.append(Metropolis(vars=[B], scaling=0.198, tune=False))
steps.append(ForwardX(vars=[X], N=N, T=T, K=K, D=D,Dd=Dd, O=O, nObs=nObs))
steps.append(NUTS(vars=[Z], scaling=np.ones(K*D)))
#steps.append(Metropolis(vars=[Z], scaling=0.0132, tune=False))
steps.append(NUTS(vars=[L],scaling=np.ones(D)))
#steps.append(Metropolis(vars=[L],scaling=0.02, tune=False, ))
## 22 minutes per step with all NUTS set
#import pdb; pdb.set_trace()
#model.dlogp()
trace = sample(1001, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=111,progressbar=True)
#trace = sample(11, steps, start=start, random_seed=[111,112,113],progressbar=False,njobs=3)
pi = trace[pi]
Q = trace[Q]
S = trace[S]
#S0 = S[:,0] #now pibar
B0 = trace[B0]
B = trace[B]
X = trace[X]
Z = trace[Z]
L = trace[L]
Sbin = np.vstack([np.bincount(S[i],minlength=6)/float(len(S[i])) for i in range(len(S))])
zeroIndices = np.roll(T.cumsum(),1)
zeroIndices[0] = 0
pibar = np.vstack([np.bincount(S[i][zeroIndices],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
pibar = np.vstack([np.bincount(S_start[zeroIndices],minlength=M)/float(zeroIndices.shape[0]),pibar])
SEnd = np.vstack([np.bincount(S[i][zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]) for i in range(len(S))])
SEnd = np.vstack([np.bincount(S_start[zeroIndices-1],minlength=M)/float(zeroIndices.shape[0]),SEnd])
logp = steps[2].logp
Xlogp = steps[4].logp
XChanges = np.insert(1-(1-(X[:,1:]-X[:,:-1])).prod(axis=2),0,0,axis=1)
XChanges.T[zeroIndices] = 0
XChanges[XChanges.nonzero()] = XChanges[XChanges.nonzero()]/XChanges[XChanges.nonzero()]
XChanges = XChanges.sum(axis=1)/float(N)
logpTotal = [model.logp(trace[i]) for i in range(len(trace))]
#np.set_printoptions(2);np.set_printoptions(linewidth=160)
'''
for i in range(1001):
print "~~~",i ,"~~~"
print pi[i,:]
print "Bincount S0:", np.bincount(S0[i,:],minlength=6)
print "\n"
'''
#from pickle import dump
#with open('file.pkl','wb') as file:
# dump(trace,file)
| mit |
Multi01/Arducopter-clone | Tools/LogAnalyzer/DataflashLog.py | 83 | 27803 | #
# Code to abstract the parsing of APM Dataflash log files, currently only used by the LogAnalyzer
#
# Initial code by Andrew Chapman (amchapman@gmail.com), 16th Jan 2014
#
from __future__ import print_function
import collections
import os
import numpy
import bisect
import sys
import ctypes
class Format(object):
'''Data channel format as specified by the FMT lines in the log file'''
def __init__(self,msgType,msgLen,name,types,labels):
self.NAME = 'FMT'
self.msgType = msgType
self.msgLen = msgLen
self.name = name
self.types = types
self.labels = labels.split(',')
def __str__(self):
return "%8s %s" % (self.name, `self.labels`)
@staticmethod
def trycastToFormatType(value,valueType):
'''using format characters from libraries/DataFlash/DataFlash.h to cast strings to basic python int/float/string types
tries a cast, if it does not work, well, acceptable as the text logs do not match the format, e.g. MODE is expected to be int'''
try:
if valueType in "fcCeEL":
return float(value)
elif valueType in "bBhHiIM":
return int(value)
elif valueType in "nNZ":
return str(value)
except:
pass
return value
def to_class(self):
members = dict(
NAME = self.name,
labels = self.labels[:],
)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels[:]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
p = property(lambda x:getattr(x, attributename),
lambda x, v:setattr(x,attributename, Format.trycastToFormatType(v,format)))
members[propertyname] = p
members[attributename] = None
createproperty(label, _type)
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,'_'+k)) for k in x.labels]))
def init(a, *x):
if len(x) != len(a.labels):
raise ValueError("Invalid Length")
#print(list(zip(a.labels, x)))
for (l,v) in zip(a.labels, x):
try:
setattr(a, l, v)
except Exception as e:
print("{} {} {} failed".format(a,l,v))
print(e)
members['__init__'] = init
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(object,),
members
)
#print(members)
return cls
class logheader(ctypes.LittleEndianStructure):
_fields_ = [ \
('head1', ctypes.c_uint8),
('head2', ctypes.c_uint8),
('msgid', ctypes.c_uint8),
]
def __repr__(self):
return "<logheader head1=0x{self.head1:x} head2=0x{self.head2:x} msgid=0x{self.msgid:x} ({self.msgid})>".format(self=self)
class BinaryFormat(ctypes.LittleEndianStructure):
NAME = 'FMT'
MSG = 128
SIZE = 0
FIELD_FORMAT = {
'b': ctypes.c_int8,
'B': ctypes.c_uint8,
'h': ctypes.c_int16,
'H': ctypes.c_uint16,
'i': ctypes.c_int32,
'I': ctypes.c_uint32,
'f': ctypes.c_float,
'd': ctypes.c_double,
'n': ctypes.c_char * 4,
'N': ctypes.c_char * 16,
'Z': ctypes.c_char * 64,
'c': ctypes.c_int16,# * 100,
'C': ctypes.c_uint16,# * 100,
'e': ctypes.c_int32,# * 100,
'E': ctypes.c_uint32,# * 100,
'L': ctypes.c_int32,
'M': ctypes.c_uint8,
'q': ctypes.c_int64,
'Q': ctypes.c_uint64,
}
FIELD_SCALE = {
'c': 100,
'C': 100,
'e': 100,
'E': 100,
}
_packed_ = True
_fields_ = [ \
('head', logheader),
('type', ctypes.c_uint8),
('length', ctypes.c_uint8),
('name', ctypes.c_char * 4),
('types', ctypes.c_char * 16),
('labels', ctypes.c_char * 64),
]
def __repr__(self):
return "<{cls} {data}>".format(cls=self.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(self,k)) for (k,_) in self._fields_[1:]]))
def to_class(self):
members = dict(
NAME = self.name,
MSG = self.type,
SIZE = self.length,
labels = self.labels.split(",") if self.labels else [],
_pack_ = True)
fieldtypes = [i for i in self.types]
fieldlabels = self.labels.split(",")
if self.labels and (len(fieldtypes) != len(fieldlabels)):
print("Broken FMT message for {} .. ignoring".format(self.name), file=sys.stderr)
return None
fields = [('head',logheader)]
# field access
for (label, _type) in zip(fieldlabels, fieldtypes):
def createproperty(name, format):
# extra scope for variable sanity
# scaling via _NAME and def NAME(self): return self._NAME / SCALE
propertyname = name
attributename = '_' + name
scale = BinaryFormat.FIELD_SCALE.get(format, None)
p = property(lambda x:getattr(x, attributename))
if scale is not None:
p = property(lambda x:getattr(x, attributename) / scale)
members[propertyname] = p
try:
fields.append((attributename, BinaryFormat.FIELD_FORMAT[format]))
except KeyError:
print('ERROR: Failed to add FMT type: {}, with format: {}'.format(attributename, format))
raise
createproperty(label, _type)
members['_fields_'] = fields
# repr shows all values but the header
members['__repr__'] = lambda x: "<{cls} {data}>".format(cls=x.__class__.__name__, data = ' '.join(["{}:{}".format(k,getattr(x,k)) for k in x.labels]))
# finally, create the class
cls = type(\
'Log__{:s}'.format(self.name),
(ctypes.LittleEndianStructure,),
members
)
if ctypes.sizeof(cls) != cls.SIZE:
print("size mismatch for {} expected {} got {}".format(cls, ctypes.sizeof(cls), cls.SIZE), file=sys.stderr)
# for i in cls.labels:
# print("{} = {}".format(i,getattr(cls,'_'+i)))
return None
return cls
BinaryFormat.SIZE = ctypes.sizeof(BinaryFormat)
class Channel(object):
'''storage for a single stream of data, i.e. all GPS.RelAlt values'''
# TODO: rethink data storage, but do more thorough regression testing before refactoring it
# TODO: store data as a scipy spline curve so we can more easily interpolate and sample the slope?
def __init__(self):
self.dictData = {} # dict of linenum->value # store dupe data in dict and list for now, until we decide which is the better way to go
self.listData = [] # list of (linenum,value) # store dupe data in dict and list for now, until we decide which is the better way to go
def getSegment(self, startLine, endLine):
'''returns a segment of this data (from startLine to endLine, inclusive) as a new Channel instance'''
segment = Channel()
segment.dictData = {k:v for k,v in self.dictData.iteritems() if k >= startLine and k <= endLine}
return segment
def min(self):
return min(self.dictData.values())
def max(self):
return max(self.dictData.values())
def avg(self):
return numpy.mean(self.dictData.values())
def getNearestValueFwd(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
while index<len(self.listData):
line = self.listData[index][0]
#print "Looking forwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line >= lineNumber:
return (self.listData[index][1],line)
index += 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValueBack(self, lineNumber):
'''Returns (value,lineNumber)'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999)) - 1
while index>=0:
line = self.listData[index][0]
#print "Looking backwards for nearest value to line number %d, starting at line %d" % (lineNumber,line) # TEMP
if line <= lineNumber:
return (self.listData[index][1],line)
index -= 1
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getNearestValue(self, lineNumber, lookForwards=True):
'''find the nearest data value to the given lineNumber, defaults to first looking forwards. Returns (value,lineNumber)'''
if lookForwards:
try:
return self.getNearestValueFwd(lineNumber)
except:
return self.getNearestValueBack(lineNumber)
else:
try:
return self.getNearestValueBack(lineNumber)
except:
return self.getNearestValueFwd(lineNumber)
raise Exception("Error finding nearest value for line %d" % lineNumber)
def getInterpolatedValue(self, lineNumber):
(prevValue,prevValueLine) = self.getNearestValue(lineNumber, lookForwards=False)
(nextValue,nextValueLine) = self.getNearestValue(lineNumber, lookForwards=True)
if prevValueLine == nextValueLine:
return prevValue
weight = (lineNumber-prevValueLine) / float(nextValueLine-prevValueLine)
return ((weight*prevValue) + ((1-weight)*nextValue))
def getIndexOf(self, lineNumber):
'''returns the index within this channel's listData of the given lineNumber, or raises an Exception if not found'''
index = bisect.bisect_left(self.listData, (lineNumber,-99999))
#print "INDEX of line %d: %d" % (lineNumber,index)
#print "self.listData[index][0]: %d" % self.listData[index][0]
if (self.listData[index][0] == lineNumber):
return index
else:
raise Exception("Error finding index for line %d" % lineNumber)
class LogIterator:
'''Smart iterator that can move through a log by line number and maintain an index into the nearest values of all data channels'''
# TODO: LogIterator currently indexes the next available value rather than the nearest value, we should make it configurable between next/nearest
class LogIteratorSubValue:
'''syntactic sugar to allow access by LogIterator[lineLabel][dataLabel]'''
logdata = None
iterators = None
lineLabel = None
def __init__(self, logdata, iterators, lineLabel):
self.logdata = logdata
self.lineLabel = lineLabel
self.iterators = iterators
def __getitem__(self, dataLabel):
index = self.iterators[self.lineLabel][0]
return self.logdata.channels[self.lineLabel][dataLabel].listData[index][1]
iterators = {} # lineLabel -> (listIndex,lineNumber)
logdata = None
currentLine = None
def __init__(self, logdata, lineNumber=0):
self.logdata = logdata
self.currentLine = lineNumber
for lineLabel in self.logdata.formats:
if lineLabel in self.logdata.channels:
self.iterators[lineLabel] = ()
self.jump(lineNumber)
def __iter__(self):
return self
def __getitem__(self, lineLabel):
return LogIterator.LogIteratorSubValue(self.logdata, self.iterators, lineLabel)
def next(self):
'''increment iterator to next log line'''
self.currentLine += 1
if self.currentLine > self.logdata.lineCount:
return self
for lineLabel in self.iterators.keys():
# check if the currentLine has gone past our the line we're pointing to for this type of data
dataLabel = self.logdata.formats[lineLabel].labels[0]
(index, lineNumber) = self.iterators[lineLabel]
# if so, and it is not the last entry in the log, then increment the indices for all dataLabels under that lineLabel
if (self.currentLine > lineNumber) and (index < len(self.logdata.channels[lineLabel][dataLabel].listData)-1):
index += 1
lineNumber = self.logdata.channels[lineLabel][dataLabel].listData[index][0]
self.iterators[lineLabel] = (index,lineNumber)
return self
def jump(self, lineNumber):
'''jump iterator to specified log line'''
self.currentLine = lineNumber
for lineLabel in self.iterators.keys():
dataLabel = self.logdata.formats[lineLabel].labels[0]
(value,lineNumber) = self.logdata.channels[lineLabel][dataLabel].getNearestValue(self.currentLine)
self.iterators[lineLabel] = (self.logdata.channels[lineLabel][dataLabel].getIndexOf(lineNumber), lineNumber)
class DataflashLogHelper:
'''helper functions for dealing with log data, put here to keep DataflashLog class as a simple parser and data store'''
@staticmethod
def getTimeAtLine(logdata, lineNumber):
'''returns the nearest GPS timestamp in milliseconds after the given line number'''
if not "GPS" in logdata.channels:
raise Exception("no GPS log data found")
# older logs use 'TIme', newer logs use 'TimeMS'
timeLabel = "TimeMS"
if "Time" in logdata.channels["GPS"]:
timeLabel = "Time"
while lineNumber <= logdata.lineCount:
if lineNumber in logdata.channels["GPS"][timeLabel].dictData:
return logdata.channels["GPS"][timeLabel].dictData[lineNumber]
lineNumber = lineNumber + 1
sys.stderr.write("didn't find GPS data for " + str(lineNumber) + " - using maxtime\n")
return logdata.channels["GPS"][timeLabel].max()
@staticmethod
def findLoiterChunks(logdata, minLengthSeconds=0, noRCInputs=True):
'''returns a list of (to,from) pairs defining sections of the log which are in loiter mode. Ordered from longest to shortest in time. If noRCInputs == True it only returns chunks with no control inputs'''
# TODO: implement noRCInputs handling when identifying stable loiter chunks, for now we're ignoring it
def chunkSizeCompare(chunk1, chunk2):
chunk1Len = chunk1[1]-chunk1[0]
chunk2Len = chunk2[1]-chunk2[0]
if chunk1Len == chunk2Len:
return 0
elif chunk1Len > chunk2Len:
return -1
else:
return 1
od = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
chunks = []
for i in range(len(od.keys())):
if od.values()[i][0] == "LOITER":
startLine = od.keys()[i]
endLine = None
if i == len(od.keys())-1:
endLine = logdata.lineCount
else:
endLine = od.keys()[i+1]-1
chunkTimeSeconds = (DataflashLogHelper.getTimeAtLine(logdata,endLine)-DataflashLogHelper.getTimeAtLine(logdata,startLine)+1) / 1000.0
if chunkTimeSeconds > minLengthSeconds:
chunks.append((startLine,endLine))
#print "LOITER chunk: %d to %d, %d lines" % (startLine,endLine,endLine-startLine+1)
#print " (time %d to %d, %d seconds)" % (DataflashLogHelper.getTimeAtLine(logdata,startLine), DataflashLogHelper.getTimeAtLine(logdata,endLine), chunkTimeSeconds)
chunks.sort(chunkSizeCompare)
return chunks
@staticmethod
def isLogEmpty(logdata):
'''returns an human readable error string if the log is essentially empty, otherwise returns None'''
# naive check for now, see if the throttle output was ever above 20%
throttleThreshold = 20
if logdata.vehicleType == "ArduCopter":
throttleThreshold = 200 # copter uses 0-1000, plane+rover use 0-100
if "CTUN" in logdata.channels:
maxThrottle = logdata.channels["CTUN"]["ThrOut"].max()
if maxThrottle < throttleThreshold:
return "Throttle never above 20%"
return None
class DataflashLog(object):
'''APM Dataflash log file reader and container class. Keep this simple, add more advanced or specific functions to DataflashLogHelper class'''
knownHardwareTypes = ["APM", "PX4", "MPNG"]
intTypes = "bBhHiIM"
floatTypes = "fcCeEL"
charTypes = "nNZ"
def __init__(self, logfile=None, format="auto", ignoreBadlines=False):
self.filename = None
self.vehicleType = "" # ArduCopter, ArduPlane, ArduRover, etc, verbatim as given by header
self.firmwareVersion = ""
self.firmwareHash = ""
self.freeRAM = 0
self.hardwareType = "" # APM 1, APM 2, PX4, MPNG, etc What is VRBrain? BeagleBone, etc? Needs more testing
self.formats = {} # name -> Format
self.parameters = {} # token -> value
self.messages = {} # lineNum -> message
self.modeChanges = {} # lineNum -> (mode,value)
self.channels = {} # lineLabel -> {dataLabel:Channel}
self.filesizeKB = 0
self.durationSecs = 0
self.lineCount = 0
self.skippedLines = 0
if logfile:
self.read(logfile, format, ignoreBadlines)
def getCopterType(self):
'''returns quad/hex/octo/tradheli if this is a copter log'''
if self.vehicleType != "ArduCopter":
return None
motLabels = []
if "MOT" in self.formats: # not listed in PX4 log header for some reason?
motLabels = self.formats["MOT"].labels
if "GGain" in motLabels:
return "tradheli"
elif len(motLabels) == 4:
return "quad"
elif len(motLabels) == 6:
return "hex"
elif len(motLabels) == 8:
return "octo"
else:
return ""
def read(self, logfile, format="auto", ignoreBadlines=False):
'''returns on successful log read (including bad lines if ignoreBadlines==True), will throw an Exception otherwise'''
# TODO: dataflash log parsing code is pretty hacky, should re-write more methodically
self.filename = logfile
if self.filename == '<stdin>':
f = sys.stdin
else:
f = open(self.filename, 'r')
if format == 'bin':
head = '\xa3\x95\x80\x80'
elif format == 'log':
head = ""
elif format == 'auto':
if self.filename == '<stdin>':
# assuming TXT format
# raise ValueError("Invalid log format for stdin: {}".format(format))
head = ""
else:
head = f.read(4)
f.seek(0)
else:
raise ValueError("Unknown log format for {}: {}".format(self.filename, format))
if head == '\xa3\x95\x80\x80':
numBytes, lineNumber = self.read_binary(f, ignoreBadlines)
pass
else:
numBytes, lineNumber = self.read_text(f, ignoreBadlines)
# gather some general stats about the log
self.lineCount = lineNumber
self.filesizeKB = numBytes / 1024.0
# TODO: switch duration calculation to use TimeMS values rather than GPS timestemp
if "GPS" in self.channels:
# the GPS time label changed at some point, need to handle both
timeLabel = None
for i in 'TimeMS','TimeUS','Time':
if i in self.channels["GPS"]:
timeLabel = i
break
firstTimeGPS = self.channels["GPS"][timeLabel].listData[0][1]
lastTimeGPS = self.channels["GPS"][timeLabel].listData[-1][1]
if timeLabel == 'TimeUS':
firstTimeGPS /= 1000
lastTimeGPS /= 1000
self.durationSecs = (lastTimeGPS-firstTimeGPS) / 1000
# TODO: calculate logging rate based on timestamps
# ...
def process(self, lineNumber, e):
if e.NAME == 'FMT':
cls = e.to_class()
if cls is not None: # FMT messages can be broken ...
if hasattr(e, 'type') and e.type not in self._formats: # binary log specific
self._formats[e.type] = cls
if cls.NAME not in self.formats:
self.formats[cls.NAME] = cls
elif e.NAME == "PARM":
self.parameters[e.Name] = e.Value
elif e.NAME == "MSG":
if not self.vehicleType:
tokens = e.Message.split(' ')
vehicleTypes = ["ArduPlane", "ArduCopter", "ArduRover"]
self.vehicleType = tokens[0]
self.firmwareVersion = tokens[1]
if len(tokens) == 3:
self.firmwareHash = tokens[2][1:-1]
else:
self.messages[lineNumber] = e.Message
elif e.NAME == "MODE":
if self.vehicleType in ["ArduCopter"]:
try:
modes = {0:'STABILIZE',
1:'ACRO',
2:'ALT_HOLD',
3:'AUTO',
4:'GUIDED',
5:'LOITER',
6:'RTL',
7:'CIRCLE',
9:'LAND',
10:'OF_LOITER',
11:'DRIFT',
13:'SPORT',
14:'FLIP',
15:'AUTOTUNE',
16:'HYBRID',}
self.modeChanges[lineNumber] = (modes[int(e.Mode)], e.ThrCrs)
except:
self.modeChanges[lineNumber] = (e.Mode, e.ThrCrs)
elif self.vehicleType in ["ArduPlane", "APM:Plane", "ArduRover", "APM:Rover", "APM:Copter"]:
self.modeChanges[lineNumber] = (e.Mode, e.ModeNum)
else:
raise Exception("Unknown log type for MODE line {} {}".format(self.vehicleType, repr(e)))
# anything else must be the log data
else:
groupName = e.NAME
# first time seeing this type of log line, create the channel storage
if not groupName in self.channels:
self.channels[groupName] = {}
for label in e.labels:
self.channels[groupName][label] = Channel()
# store each token in its relevant channel
for label in e.labels:
value = getattr(e, label)
channel = self.channels[groupName][label]
channel.dictData[lineNumber] = value
channel.listData.append((lineNumber, value))
def read_text(self, f, ignoreBadlines):
self.formats = {'FMT':Format}
lineNumber = 0
numBytes = 0
knownHardwareTypes = ["APM", "PX4", "MPNG"]
for line in f:
lineNumber = lineNumber + 1
numBytes += len(line) + 1
try:
#print "Reading line: %d" % lineNumber
line = line.strip('\n\r')
tokens = line.split(', ')
# first handle the log header lines
if line == " Ready to drive." or line == " Ready to FLY.":
continue
if line == "----------------------------------------": # present in pre-3.0 logs
raise Exception("Log file seems to be in the older format (prior to self-describing logs), which isn't supported")
if len(tokens) == 1:
tokens2 = line.split(' ')
if line == "":
pass
elif len(tokens2) == 1 and tokens2[0].isdigit(): # log index
pass
elif len(tokens2) == 3 and tokens2[0] == "Free" and tokens2[1] == "RAM:":
self.freeRAM = int(tokens2[2])
elif tokens2[0] in knownHardwareTypes:
self.hardwareType = line # not sure if we can parse this more usefully, for now only need to report it back verbatim
elif (len(tokens2) == 2 or len(tokens2) == 3) and tokens2[1][0].lower() == "v": # e.g. ArduCopter V3.1 (5c6503e2)
self.vehicleType = tokens2[0]
self.firmwareVersion = tokens2[1]
if len(tokens2) == 3:
self.firmwareHash = tokens2[2][1:-1]
else:
errorMsg = "Error parsing line %d of log file: %s" % (lineNumber, self.filename)
if ignoreBadlines:
print(errorMsg + " (skipping line)", file=sys.stderr)
self.skippedLines += 1
else:
raise Exception("")
else:
if not tokens[0] in self.formats:
raise ValueError("Unknown Format {}".format(tokens[0]))
e = self.formats[tokens[0]](*tokens[1:])
self.process(lineNumber, e)
except Exception as e:
print("BAD LINE: " + line, file=sys.stderr)
if not ignoreBadlines:
raise Exception("Error parsing line %d of log file %s - %s" % (lineNumber,self.filename,e.args[0]))
return (numBytes,lineNumber)
def read_binary(self, f, ignoreBadlines):
lineNumber = 0
numBytes = 0
for e in self._read_binary(f, ignoreBadlines):
lineNumber += 1
if e is None:
continue
numBytes += e.SIZE
# print(e)
self.process(lineNumber, e)
return (numBytes,lineNumber)
def _read_binary(self, f, ignoreBadlines):
self._formats = {128:BinaryFormat}
data = bytearray(f.read())
offset = 0
while len(data) > offset + ctypes.sizeof(logheader):
h = logheader.from_buffer(data, offset)
if not (h.head1 == 0xa3 and h.head2 == 0x95):
if ignoreBadlines == False:
raise ValueError(h)
else:
if h.head1 == 0xff and h.head2 == 0xff and h.msgid == 0xff:
print("Assuming EOF due to dataflash block tail filled with \\xff... (offset={off})".format(off=offset), file=sys.stderr)
break
if h.msgid in self._formats:
typ = self._formats[h.msgid]
if len(data) <= offset + typ.SIZE:
break
try:
e = typ.from_buffer(data, offset)
except:
print("data:{} offset:{} size:{} sizeof:{} sum:{}".format(len(data),offset,typ.SIZE,ctypes.sizeof(typ),offset+typ.SIZE))
raise
offset += typ.SIZE
else:
raise ValueError(str(h) + "unknown type")
yield e
| gpl-3.0 |
dkubiak789/OpenUpgrade | addons/crm_profiling/crm_profiling.py | 40 | 10377 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.osv import orm
from openerp.tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids=None):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question",size=128, required=True),
'answers_ids': fields.one2many("crm_profiling.answer","question_id","Avalaible Answers",),
}
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire",size=128, required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer",size=128, required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You cannot create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
partner_obj = self.pool.get('res.partner')
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner in partner_obj.browse(cr, uid, partners):
category_ids = [categ_id.id for categ_id in partner.category_id]
if categ['categ_id'][0] not in category_ids:
cr.execute('insert into res_partner_res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner.id))
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
GoogleCloudPlatform/training-data-analyst | courses/ak8s/v1.0/16_Logging/query-custom-favicon-metric.py | 4 | 2473 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Derived from Monitoring example from snippets.py
#
# Package dependency: pip install google-cloud-monitoring==0.31.1
#
# https://github.com/GoogleCloudPlatform/python-docs-samples
import argparse
import os
import pprint
import time
from google.cloud import monitoring_v3
def list_time_series(project_id):
client = monitoring_v3.MetricServiceClient()
project_name = client.project_path(project_id)
interval = monitoring_v3.types.TimeInterval()
now = time.time()
interval.end_time.seconds = int(now)
interval.end_time.nanos = int(
(now - interval.end_time.seconds) * 10**9)
interval.start_time.seconds = int(now - 15000)
interval.start_time.nanos = interval.end_time.nanos
try:
results = client.list_time_series(
project_name,
'metric.type = "logging.googleapis.com/user/favicons_served"',
interval,
monitoring_v3.enums.ListTimeSeriesRequest.TimeSeriesView.FULL)
except:
return(0)
total=0
try:
for result in results:
total += 1
for point in result.points:
total += point.value.int64_value
#print (point.value.int64_value)
return(total)
except:
return(0)
def project_id():
"""Retreives the project id from the environment variable.
Raises:
MissingProjectIdError -- When not set.
Returns:
str -- the project name
"""
project_id = (os.environ['GOOGLE_CLOUD_PROJECT'] or
os.environ['GCLOUD_PROJECT'])
if not project_id:
raise MissingProjectIdError(
'Set the environment variable ' +
'GCLOUD_PROJECT to your Google Cloud Project Id.')
return project_id
if __name__ == '__main__':
result=list_time_series(project_id())
if result>1:
print ("Favicon count: "+str(result))
| apache-2.0 |
40223119/w16b_test | static/Brython3.1.3-20150514-095342/Lib/_abcoll.py | 688 | 5155 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
"""
### collection related types which are not exposed through builtin ###
## iterators ##
#fixme brython
#bytes_iterator = type(iter(b''))
bytes_iterator = type(iter(''))
#fixme brython
#bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
"""
def abstractmethod(self):
return self
### ONE-TRICK PONIES ###
#class Iterable(metaclass=ABCMeta):
class Iterable:
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Sized(metaclass=ABCMeta):
class Sized:
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Container(metaclass=ABCMeta):
class Container:
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
#MutableMapping.register(dict)
| gpl-3.0 |
nbeck90/data_structures_2 | test_insert_sort.py | 1 | 1280 | # -*- coding: utf-8 -*-
import pytest
from insert_sort import insert_sort
def test_sorted():
my_list = list(range(100))
insert_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
insert_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
insert_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
insert_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
insert_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
insert_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
insert_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
insert_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
insert_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [insert_sort(my_list)]
assert new_list == [None]
def test_non_iterable():
with pytest.raises(TypeError):
insert_sort(42)
| mit |
awkspace/ansible | lib/ansible/modules/files/archive.py | 6 | 18021 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ben Doherty <bendohmv@gmail.com>
# Sponsored by Oomph, Inc. http://www.oomphinc.com
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: archive
version_added: '2.3'
short_description: Creates a compressed archive of one or more files or trees
extends_documentation_fragment: files
description:
- Packs an archive.
- It is the opposite of M(unarchive).
- By default, it assumes the compression source exists on the target.
- It will not copy the source file from the local system to the target before archiving.
- Source files can be deleted after archival by specifying I(remove=True).
options:
path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to compress or archive.
type: list
required: true
format:
description:
- The type of compression to use.
- Support for xz was added in Ansible 2.5.
type: str
choices: [ bz2, gz, tar, xz, zip ]
default: gz
dest:
description:
- The file name of the destination archive.
- This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
type: path
exclude_path:
description:
- Remote absolute path, glob, or list of paths or globs for the file or files to exclude from the archive.
type: list
version_added: '2.4'
remove:
description:
- Remove any added source files and trees after adding to archive.
type: bool
default: no
notes:
- Requires tarfile, zipfile, gzip and bzip2 packages on target host.
- Requires lzma or backports.lzma if using xz format.
- Can produce I(gzip), I(bzip2), I(lzma) and I(zip) compressed files or archives.
seealso:
- module: unarchive
author:
- Ben Doherty (@bendoh)
'''
EXAMPLES = r'''
- name: Compress directory /path/to/foo/ into /path/to/foo.tgz
archive:
path: /path/to/foo
dest: /path/to/foo.tgz
- name: Compress regular file /path/to/foo into /path/to/foo.gz and remove it
archive:
path: /path/to/foo
remove: yes
- name: Create a zip archive of /path/to/foo
archive:
path: /path/to/foo
format: zip
- name: Create a bz2 archive of multiple files, rooted at /path
archive:
path:
- /path/to/foo
- /path/wong/foo
dest: /path/file.tar.bz2
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding specific dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/bar
- /path/to/foo/baz
format: bz2
- name: Create a bz2 archive of a globbed path, while excluding a glob of dirnames
archive:
path:
- /path/to/foo/*
dest: /path/file.tar.bz2
exclude_path:
- /path/to/foo/ba*
format: bz2
'''
RETURN = r'''
state:
description:
The current state of the archived file.
If 'absent', then no source files were found and the archive does not exist.
If 'compress', then the file source file is in the compressed state.
If 'archive', then the source file or paths are currently archived.
If 'incomplete', then an archive was created, but not all source paths were found.
type: str
returned: always
missing:
description: Any files that were missing from the source.
type: list
returned: success
archived:
description: Any files that were compressed or added to the archive.
type: list
returned: success
arcroot:
description: The archive root.
type: str
returned: always
expanded_paths:
description: The list of matching paths from paths argument.
type: list
returned: always
expanded_exclude_paths:
description: The list of matching exclude paths from the exclude_path argument.
type: list
returned: always
'''
import bz2
import filecmp
import glob
import gzip
import io
import os
import re
import shutil
import tarfile
import zipfile
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY3
LZMA_IMP_ERR = None
if PY3:
try:
import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
else:
try:
from backports import lzma
HAS_LZMA = True
except ImportError:
LZMA_IMP_ERR = format_exc()
HAS_LZMA = False
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='list', required=True),
format=dict(type='str', default='gz', choices=['bz2', 'gz', 'tar', 'xz', 'zip']),
dest=dict(type='path'),
exclude_path=dict(type='list'),
remove=dict(type='bool', default=False),
),
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
check_mode = module.check_mode
paths = params['path']
dest = params['dest']
exclude_paths = params['exclude_path']
remove = params['remove']
expanded_paths = []
expanded_exclude_paths = []
format = params['format']
globby = False
changed = False
state = 'absent'
# Simple or archive file compression (inapplicable with 'zip' since it's always an archive)
archive = False
successes = []
# Fail early
if not HAS_LZMA and format == 'xz':
module.fail_json(msg=missing_required_lib("lzma or backports.lzma", reason="when using xz format"),
exception=LZMA_IMP_ERR)
module.fail_json(msg="lzma or backports.lzma is required when using xz format.")
for path in paths:
path = os.path.expanduser(os.path.expandvars(path))
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if ('*' in path or '?' in path):
expanded_paths = expanded_paths + glob.glob(path)
globby = True
# If there are no glob characters the path is added to the expanded paths
# whether the path exists or not
else:
expanded_paths.append(path)
# Only attempt to expand the exclude paths if it exists
if exclude_paths:
for exclude_path in exclude_paths:
exclude_path = os.path.expanduser(os.path.expandvars(exclude_path))
# Expand any glob characters. If found, add the expanded glob to the
# list of expanded_paths, which might be empty.
if ('*' in exclude_path or '?' in exclude_path):
expanded_exclude_paths = expanded_exclude_paths + glob.glob(exclude_path)
# If there are no glob character the exclude path is added to the expanded
# exclude paths whether the path exists or not.
else:
expanded_exclude_paths.append(exclude_path)
if not expanded_paths:
return module.fail_json(path=', '.join(paths), expanded_paths=', '.join(expanded_paths), msg='Error, no source paths were found')
# If we actually matched multiple files or TRIED to, then
# treat this as a multi-file archive
archive = globby or os.path.isdir(expanded_paths[0]) or len(expanded_paths) > 1
# Default created file name (for single-file archives) to
# <file>.<format>
if not dest and not archive:
dest = '%s.%s' % (expanded_paths[0], format)
# Force archives to specify 'dest'
if archive and not dest:
module.fail_json(dest=dest, path=', '.join(paths), msg='Error, must specify "dest" when archiving multiple files or trees')
archive_paths = []
missing = []
arcroot = ''
for path in expanded_paths:
# Use the longest common directory name among all the files
# as the archive root path
if arcroot == '':
arcroot = os.path.dirname(path) + os.sep
else:
for i in range(len(arcroot)):
if path[i] != arcroot[i]:
break
if i < len(arcroot):
arcroot = os.path.dirname(arcroot[0:i + 1])
arcroot += os.sep
# Don't allow archives to be created anywhere within paths to be removed
if remove and os.path.isdir(path):
path_dir = path
if path[-1] != '/':
path_dir += '/'
if dest.startswith(path_dir):
module.fail_json(path=', '.join(paths), msg='Error, created archive can not be contained in source paths when remove=True')
if os.path.lexists(path) and path not in expanded_exclude_paths:
archive_paths.append(path)
else:
missing.append(path)
# No source files were found but the named archive exists: are we 'compress' or 'archive' now?
if len(missing) == len(expanded_paths) and dest and os.path.exists(dest):
# Just check the filename to know if it's an archive or simple compressed file
if re.search(r'(\.tar|\.tar\.gz|\.tgz|\.tbz2|\.tar\.bz2|\.tar\.xz|\.zip)$', os.path.basename(dest), re.IGNORECASE):
state = 'archive'
else:
state = 'compress'
# Multiple files, or globbiness
elif archive:
if not archive_paths:
# No source files were found, but the archive is there.
if os.path.lexists(dest):
state = 'archive'
elif missing:
# SOME source files were found, but not all of them
state = 'incomplete'
archive = None
size = 0
errors = []
if os.path.lexists(dest):
size = os.path.getsize(dest)
if state != 'archive':
if check_mode:
changed = True
else:
try:
# Slightly more difficult (and less efficient!) compression using zipfile module
if format == 'zip':
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
# Easier compression using tarfile module
elif format == 'gz' or format == 'bz2':
arcfile = tarfile.open(dest, 'w|' + format)
# python3 tarfile module allows xz format but for python2 we have to create the tarfile
# in memory and then compress it with lzma.
elif format == 'xz':
arcfileIO = io.BytesIO()
arcfile = tarfile.open(fileobj=arcfileIO, mode='w')
# Or plain tar archiving
elif format == 'tar':
arcfile = tarfile.open(dest, 'w')
match_root = re.compile('^%s' % re.escape(arcroot))
for path in archive_paths:
if os.path.isdir(path):
# Recurse into directories
for dirpath, dirnames, filenames in os.walk(path, topdown=True):
if not dirpath.endswith(os.sep):
dirpath += os.sep
for dirname in dirnames:
fullpath = dirpath + dirname
arcname = match_root.sub('', fullpath)
try:
if format == 'zip':
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
except Exception as e:
errors.append('%s: %s' % (fullpath, to_native(e)))
for filename in filenames:
fullpath = dirpath + filename
arcname = match_root.sub('', fullpath)
if not filecmp.cmp(fullpath, dest):
try:
if format == 'zip':
arcfile.write(fullpath, arcname)
else:
arcfile.add(fullpath, arcname, recursive=False)
successes.append(fullpath)
except Exception as e:
errors.append('Adding %s: %s' % (path, to_native(e)))
else:
if format == 'zip':
arcfile.write(path, match_root.sub('', path))
else:
arcfile.add(path, match_root.sub('', path), recursive=False)
successes.append(path)
except Exception as e:
module.fail_json(msg='Error when writing %s archive at %s: %s' % (format == 'zip' and 'zip' or ('tar.' + format), dest, to_native(e)),
exception=format_exc())
if arcfile:
arcfile.close()
state = 'archive'
if format == 'xz':
with lzma.open(dest, 'wb') as f:
f.write(arcfileIO.getvalue())
arcfileIO.close()
if errors:
module.fail_json(msg='Errors when writing archive at %s: %s' % (dest, '; '.join(errors)))
if state in ['archive', 'incomplete'] and remove:
for path in successes:
try:
if os.path.isdir(path):
shutil.rmtree(path)
elif not check_mode:
os.remove(path)
except OSError as e:
errors.append(path)
if errors:
module.fail_json(dest=dest, msg='Error deleting some source files: ' + str(e), files=errors)
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if not check_mode and os.path.getsize(dest) != size:
changed = True
if successes and state != 'incomplete':
state = 'archive'
# Simple, single-file compression
else:
path = expanded_paths[0]
# No source or compressed file
if not (os.path.exists(path) or os.path.lexists(dest)):
state = 'absent'
# if it already exists and the source file isn't there, consider this done
elif not os.path.lexists(path) and os.path.lexists(dest):
state = 'compress'
else:
if module.check_mode:
if not os.path.exists(dest):
changed = True
else:
size = 0
f_in = f_out = arcfile = None
if os.path.lexists(dest):
size = os.path.getsize(dest)
try:
if format == 'zip':
arcfile = zipfile.ZipFile(dest, 'w', zipfile.ZIP_DEFLATED, True)
arcfile.write(path, path[len(arcroot):])
arcfile.close()
state = 'archive' # because all zip files are archives
elif format == 'tar':
arcfile = tarfile.open(dest, 'w')
arcfile.add(path)
arcfile.close()
else:
f_in = open(path, 'rb')
if format == 'gz':
f_out = gzip.open(dest, 'wb')
elif format == 'bz2':
f_out = bz2.BZ2File(dest, 'wb')
elif format == 'xz':
f_out = lzma.LZMAFile(dest, 'wb')
else:
raise OSError("Invalid format")
shutil.copyfileobj(f_in, f_out)
successes.append(path)
except OSError as e:
module.fail_json(path=path, dest=dest, msg='Unable to write to compressed file: %s' % to_native(e), exception=format_exc())
if arcfile:
arcfile.close()
if f_in:
f_in.close()
if f_out:
f_out.close()
# Rudimentary check: If size changed then file changed. Not perfect, but easy.
if os.path.getsize(dest) != size:
changed = True
state = 'compress'
if remove and not check_mode:
try:
os.remove(path)
except OSError as e:
module.fail_json(path=path, msg='Unable to remove source file: %s' % to_native(e), exception=format_exc())
params['path'] = dest
file_args = module.load_file_common_arguments(params)
if not check_mode:
changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(archived=successes,
dest=dest,
changed=changed,
state=state,
arcroot=arcroot,
missing=missing,
expanded_paths=expanded_paths,
expanded_exclude_paths=expanded_exclude_paths)
if __name__ == '__main__':
main()
| gpl-3.0 |
hoosteeno/mozillians | vendor-local/lib/python/tablib/packages/odf/attrconverters.py | 64 | 69451 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2010 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import *
import re, types
pattern_color = re.compile(r'#[0-9a-fA-F]{6}')
pattern_vector3D = re.compile(r'\([ ]*-?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ ]+-?([0-9]+(\.[0-9]*)?|\.[0-9]+)){2}[ ]*\)')
def make_NCName(arg):
for c in (':',' '):
arg = arg.replace(c,"_%x_" % ord(c))
return arg
def cnv_anyURI(attribute, arg, element):
return unicode(arg)
def cnv_boolean(attribute, arg, element):
if arg.lower() in ("false","no"):
return "false"
if arg:
return "true"
return "false"
# Potentially accept color values
def cnv_color(attribute, arg, element):
""" A RGB color in conformance with §5.9.11 of [XSL], that is a RGB color in notation “#rrggbb”, where
rr, gg and bb are 8-bit hexadecimal digits.
"""
return str(arg)
def cnv_configtype(attribute, arg, element):
if str(arg) not in ("boolean", "short", "int", "long",
"double", "string", "datetime", "base64Binary"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def cnv_data_source_has_labels(attribute, arg, element):
if str(arg) not in ("none","row","column","both"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
# Understand different date formats
def cnv_date(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_dateTime(attribute, arg, element):
""" A dateOrDateTime value is either an [xmlschema-2] date value or an [xmlschema-2] dateTime
value.
"""
return str(arg)
def cnv_double(attribute, arg, element):
return str(arg)
def cnv_duration(attribute, arg, element):
return str(arg)
def cnv_family(attribute, arg, element):
""" A style family """
if str(arg) not in ("text", "paragraph", "section", "ruby", "table", "table-column", "table-row", "table-cell",
"graphic", "presentation", "drawing-page", "chart"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def __save_prefix(attribute, arg, element):
prefix = arg.split(':',1)[0]
if prefix == arg:
return unicode(arg)
namespace = element.get_knownns(prefix)
if namespace is None:
#raise ValueError, "'%s' is an unknown prefix" % str(prefix)
return unicode(arg)
p = element.get_nsprefix(namespace)
return unicode(arg)
def cnv_formula(attribute, arg, element):
""" A string containing a formula. Formulas do not have a predefined syntax, but the string should
begin with a namespace prefix, followed by a “:” (COLON, U+003A) separator, followed by the text
of the formula. The namespace bound to the prefix determines the syntax and semantics of the
formula.
"""
return __save_prefix(attribute, arg, element)
def cnv_ID(attribute, arg, element):
return str(arg)
def cnv_IDREF(attribute, arg, element):
return str(arg)
def cnv_integer(attribute, arg, element):
return str(arg)
def cnv_legend_position(attribute, arg, element):
if str(arg) not in ("start", "end", "top", "bottom", "top-start", "bottom-start", "top-end", "bottom-end"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
pattern_length = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)((cm)|(mm)|(in)|(pt)|(pc)|(px))')
def cnv_length(attribute, arg, element):
""" A (positive or negative) physical length, consisting of magnitude and unit, in conformance with the
Units of Measure defined in §5.9.13 of [XSL].
"""
global pattern_length
if not pattern_length.match(arg):
raise ValueError, "'%s' is not a valid length" % arg
return arg
def cnv_lengthorpercent(attribute, arg, element):
failed = False
try: return cnv_length(attribute, arg, element)
except: failed = True
try: return cnv_percent(attribute, arg, element)
except: failed = True
if failed:
raise ValueError, "'%s' is not a valid length or percent" % arg
return arg
def cnv_metavaluetype(attribute, arg, element):
if str(arg) not in ("float", "date", "time", "boolean", "string"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
def cnv_major_minor(attribute, arg, element):
if arg not in ('major','minor'):
raise ValueError, "'%s' is not either 'minor' or 'major'" % arg
pattern_namespacedToken = re.compile(r'[0-9a-zA-Z_]+:[0-9a-zA-Z._\-]+')
def cnv_namespacedToken(attribute, arg, element):
global pattern_namespacedToken
if not pattern_namespacedToken.match(arg):
raise ValueError, "'%s' is not a valid namespaced token" % arg
return __save_prefix(attribute, arg, element)
def cnv_NCName(attribute, arg, element):
""" NCName is defined in http://www.w3.org/TR/REC-xml-names/#NT-NCName
Essentially an XML name minus ':'
"""
if type(arg) in types.StringTypes:
return make_NCName(arg)
else:
return arg.getAttrNS(STYLENS, 'name')
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_StyleNameRef(attribute, arg, element):
try:
return arg.getAttrNS(STYLENS, 'name')
except:
return arg
# This function takes either an instance of a style (preferred)
# or a text string naming the style. If it is a text string, then it must
# already have been converted to an NCName
# The text-string argument is mainly for when we build a structure from XML
def cnv_DrawNameRef(attribute, arg, element):
try:
return arg.getAttrNS(DRAWNS, 'name')
except:
return arg
# Must accept list of Style objects
def cnv_NCNames(attribute, arg, element):
return ' '.join(arg)
def cnv_nonNegativeInteger(attribute, arg, element):
return str(arg)
pattern_percent = re.compile(r'-?([0-9]+(\.[0-9]*)?|\.[0-9]+)%')
def cnv_percent(attribute, arg, element):
global pattern_percent
if not pattern_percent.match(arg):
raise ValueError, "'%s' is not a valid length" % arg
return arg
# Real one doesn't allow floating point values
pattern_points = re.compile(r'-?[0-9]+,-?[0-9]+([ ]+-?[0-9]+,-?[0-9]+)*')
#pattern_points = re.compile(r'-?[0-9.]+,-?[0-9.]+([ ]+-?[0-9.]+,-?[0-9.]+)*')
def cnv_points(attribute, arg, element):
global pattern_points
if type(arg) in types.StringTypes:
if not pattern_points.match(arg):
raise ValueError, "x,y are separated by a comma and the points are separated by white spaces"
return arg
else:
try:
strarg = ' '.join([ "%d,%d" % p for p in arg])
except:
raise ValueError, "Points must be string or [(0,0),(1,1)] - not %s" % arg
return strarg
def cnv_positiveInteger(attribute, arg, element):
return str(arg)
def cnv_string(attribute, arg, element):
return unicode(arg)
def cnv_textnoteclass(attribute, arg, element):
if str(arg) not in ("footnote", "endnote"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
# Understand different time formats
def cnv_time(attribute, arg, element):
return str(arg)
def cnv_token(attribute, arg, element):
return str(arg)
pattern_viewbox = re.compile(r'-?[0-9]+([ ]+-?[0-9]+){3}$')
def cnv_viewbox(attribute, arg, element):
global pattern_viewbox
if not pattern_viewbox.match(arg):
raise ValueError, "viewBox must be four integers separated by whitespaces"
return arg
def cnv_xlinkshow(attribute, arg, element):
if str(arg) not in ("new", "replace", "embed"):
raise ValueError, "'%s' not allowed" % str(arg)
return str(arg)
attrconverters = {
((ANIMNS,u'audio-level'), None): cnv_double,
((ANIMNS,u'color-interpolation'), None): cnv_string,
((ANIMNS,u'color-interpolation-direction'), None): cnv_string,
((ANIMNS,u'command'), None): cnv_string,
((ANIMNS,u'formula'), None): cnv_string,
((ANIMNS,u'id'), None): cnv_ID,
((ANIMNS,u'iterate-interval'), None): cnv_duration,
((ANIMNS,u'iterate-type'), None): cnv_string,
((ANIMNS,u'name'), None): cnv_string,
((ANIMNS,u'sub-item'), None): cnv_string,
((ANIMNS,u'value'), None): cnv_string,
# ((DBNS,u'type'), None): cnv_namespacedToken,
((CHARTNS,u'attached-axis'), None): cnv_string,
((CHARTNS,u'class'), (CHARTNS,u'grid')): cnv_major_minor,
((CHARTNS,u'class'), None): cnv_namespacedToken,
((CHARTNS,u'column-mapping'), None): cnv_string,
((CHARTNS,u'connect-bars'), None): cnv_boolean,
((CHARTNS,u'data-label-number'), None): cnv_string,
((CHARTNS,u'data-label-symbol'), None): cnv_boolean,
((CHARTNS,u'data-label-text'), None): cnv_boolean,
((CHARTNS,u'data-source-has-labels'), None): cnv_data_source_has_labels,
((CHARTNS,u'deep'), None): cnv_boolean,
((CHARTNS,u'dimension'), None): cnv_string,
((CHARTNS,u'display-label'), None): cnv_boolean,
((CHARTNS,u'error-category'), None): cnv_string,
((CHARTNS,u'error-lower-indicator'), None): cnv_boolean,
((CHARTNS,u'error-lower-limit'), None): cnv_string,
((CHARTNS,u'error-margin'), None): cnv_string,
((CHARTNS,u'error-percentage'), None): cnv_string,
((CHARTNS,u'error-upper-indicator'), None): cnv_boolean,
((CHARTNS,u'error-upper-limit'), None): cnv_string,
((CHARTNS,u'gap-width'), None): cnv_string,
((CHARTNS,u'interpolation'), None): cnv_string,
((CHARTNS,u'interval-major'), None): cnv_string,
((CHARTNS,u'interval-minor-divisor'), None): cnv_string,
((CHARTNS,u'japanese-candle-stick'), None): cnv_boolean,
((CHARTNS,u'label-arrangement'), None): cnv_string,
((CHARTNS,u'label-cell-address'), None): cnv_string,
((CHARTNS,u'legend-align'), None): cnv_string,
((CHARTNS,u'legend-position'), None): cnv_legend_position,
((CHARTNS,u'lines'), None): cnv_boolean,
((CHARTNS,u'link-data-style-to-source'), None): cnv_boolean,
((CHARTNS,u'logarithmic'), None): cnv_boolean,
((CHARTNS,u'maximum'), None): cnv_string,
((CHARTNS,u'mean-value'), None): cnv_boolean,
((CHARTNS,u'minimum'), None): cnv_string,
((CHARTNS,u'name'), None): cnv_string,
((CHARTNS,u'origin'), None): cnv_string,
((CHARTNS,u'overlap'), None): cnv_string,
((CHARTNS,u'percentage'), None): cnv_boolean,
((CHARTNS,u'pie-offset'), None): cnv_string,
((CHARTNS,u'regression-type'), None): cnv_string,
((CHARTNS,u'repeated'), None): cnv_nonNegativeInteger,
((CHARTNS,u'row-mapping'), None): cnv_string,
((CHARTNS,u'scale-text'), None): cnv_boolean,
((CHARTNS,u'series-source'), None): cnv_string,
((CHARTNS,u'solid-type'), None): cnv_string,
((CHARTNS,u'spline-order'), None): cnv_string,
((CHARTNS,u'spline-resolution'), None): cnv_string,
((CHARTNS,u'stacked'), None): cnv_boolean,
((CHARTNS,u'style-name'), None): cnv_StyleNameRef,
((CHARTNS,u'symbol-height'), None): cnv_string,
((CHARTNS,u'symbol-name'), None): cnv_string,
((CHARTNS,u'symbol-type'), None): cnv_string,
((CHARTNS,u'symbol-width'), None): cnv_string,
((CHARTNS,u'text-overlap'), None): cnv_boolean,
((CHARTNS,u'three-dimensional'), None): cnv_boolean,
((CHARTNS,u'tick-marks-major-inner'), None): cnv_boolean,
((CHARTNS,u'tick-marks-major-outer'), None): cnv_boolean,
((CHARTNS,u'tick-marks-minor-inner'), None): cnv_boolean,
((CHARTNS,u'tick-marks-minor-outer'), None): cnv_boolean,
((CHARTNS,u'values-cell-range-address'), None): cnv_string,
((CHARTNS,u'vertical'), None): cnv_boolean,
((CHARTNS,u'visible'), None): cnv_boolean,
((CONFIGNS,u'name'), None): cnv_formula,
((CONFIGNS,u'type'), None): cnv_configtype,
((DR3DNS,u'ambient-color'), None): cnv_string,
((DR3DNS,u'back-scale'), None): cnv_string,
((DR3DNS,u'backface-culling'), None): cnv_string,
((DR3DNS,u'center'), None): cnv_string,
((DR3DNS,u'close-back'), None): cnv_boolean,
((DR3DNS,u'close-front'), None): cnv_boolean,
((DR3DNS,u'depth'), None): cnv_length,
((DR3DNS,u'diffuse-color'), None): cnv_string,
((DR3DNS,u'direction'), None): cnv_string,
((DR3DNS,u'distance'), None): cnv_length,
((DR3DNS,u'edge-rounding'), None): cnv_string,
((DR3DNS,u'edge-rounding-mode'), None): cnv_string,
((DR3DNS,u'emissive-color'), None): cnv_string,
((DR3DNS,u'enabled'), None): cnv_boolean,
((DR3DNS,u'end-angle'), None): cnv_string,
((DR3DNS,u'focal-length'), None): cnv_length,
((DR3DNS,u'horizontal-segments'), None): cnv_string,
((DR3DNS,u'lighting-mode'), None): cnv_boolean,
((DR3DNS,u'max-edge'), None): cnv_string,
((DR3DNS,u'min-edge'), None): cnv_string,
((DR3DNS,u'normals-direction'), None): cnv_string,
((DR3DNS,u'normals-kind'), None): cnv_string,
((DR3DNS,u'projection'), None): cnv_string,
((DR3DNS,u'shade-mode'), None): cnv_string,
((DR3DNS,u'shadow'), None): cnv_string,
((DR3DNS,u'shadow-slant'), None): cnv_nonNegativeInteger,
((DR3DNS,u'shininess'), None): cnv_string,
((DR3DNS,u'size'), None): cnv_string,
((DR3DNS,u'specular'), None): cnv_boolean,
((DR3DNS,u'specular-color'), None): cnv_string,
((DR3DNS,u'texture-filter'), None): cnv_string,
((DR3DNS,u'texture-generation-mode-x'), None): cnv_string,
((DR3DNS,u'texture-generation-mode-y'), None): cnv_string,
((DR3DNS,u'texture-kind'), None): cnv_string,
((DR3DNS,u'texture-mode'), None): cnv_string,
((DR3DNS,u'transform'), None): cnv_string,
((DR3DNS,u'vertical-segments'), None): cnv_string,
((DR3DNS,u'vpn'), None): cnv_string,
((DR3DNS,u'vrp'), None): cnv_string,
((DR3DNS,u'vup'), None): cnv_string,
((DRAWNS,u'align'), None): cnv_string,
((DRAWNS,u'angle'), None): cnv_integer,
((DRAWNS,u'archive'), None): cnv_string,
((DRAWNS,u'auto-grow-height'), None): cnv_boolean,
((DRAWNS,u'auto-grow-width'), None): cnv_boolean,
((DRAWNS,u'background-size'), None): cnv_string,
((DRAWNS,u'blue'), None): cnv_string,
((DRAWNS,u'border'), None): cnv_string,
((DRAWNS,u'caption-angle'), None): cnv_string,
((DRAWNS,u'caption-angle-type'), None): cnv_string,
((DRAWNS,u'caption-escape'), None): cnv_string,
((DRAWNS,u'caption-escape-direction'), None): cnv_string,
((DRAWNS,u'caption-fit-line-length'), None): cnv_boolean,
((DRAWNS,u'caption-gap'), None): cnv_string,
((DRAWNS,u'caption-line-length'), None): cnv_length,
((DRAWNS,u'caption-point-x'), None): cnv_string,
((DRAWNS,u'caption-point-y'), None): cnv_string,
((DRAWNS,u'caption-id'), None): cnv_IDREF,
((DRAWNS,u'caption-type'), None): cnv_string,
((DRAWNS,u'chain-next-name'), None): cnv_string,
((DRAWNS,u'class-id'), None): cnv_string,
((DRAWNS,u'class-names'), None): cnv_NCNames,
((DRAWNS,u'code'), None): cnv_string,
((DRAWNS,u'color'), None): cnv_string,
((DRAWNS,u'color-inversion'), None): cnv_boolean,
((DRAWNS,u'color-mode'), None): cnv_string,
((DRAWNS,u'concave'), None): cnv_string,
((DRAWNS,u'concentric-gradient-fill-allowed'), None): cnv_boolean,
((DRAWNS,u'contrast'), None): cnv_string,
((DRAWNS,u'control'), None): cnv_IDREF,
((DRAWNS,u'copy-of'), None): cnv_string,
((DRAWNS,u'corner-radius'), None): cnv_length,
((DRAWNS,u'corners'), None): cnv_positiveInteger,
((DRAWNS,u'cx'), None): cnv_string,
((DRAWNS,u'cy'), None): cnv_string,
((DRAWNS,u'data'), None): cnv_string,
((DRAWNS,u'decimal-places'), None): cnv_string,
((DRAWNS,u'display'), None): cnv_string,
((DRAWNS,u'display-name'), None): cnv_string,
((DRAWNS,u'distance'), None): cnv_lengthorpercent,
((DRAWNS,u'dots1'), None): cnv_integer,
((DRAWNS,u'dots1-length'), None): cnv_lengthorpercent,
((DRAWNS,u'dots2'), None): cnv_integer,
((DRAWNS,u'dots2-length'), None): cnv_lengthorpercent,
((DRAWNS,u'end-angle'), None): cnv_double,
((DRAWNS,u'end'), None): cnv_string,
((DRAWNS,u'end-color'), None): cnv_string,
((DRAWNS,u'end-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS,u'end-guide'), None): cnv_length,
((DRAWNS,u'end-intensity'), None): cnv_string,
((DRAWNS,u'end-line-spacing-horizontal'), None): cnv_string,
((DRAWNS,u'end-line-spacing-vertical'), None): cnv_string,
((DRAWNS,u'end-shape'), None): cnv_IDREF,
((DRAWNS,u'engine'), None): cnv_namespacedToken,
((DRAWNS,u'enhanced-path'), None): cnv_string,
((DRAWNS,u'escape-direction'), None): cnv_string,
((DRAWNS,u'extrusion-allowed'), None): cnv_boolean,
((DRAWNS,u'extrusion-brightness'), None): cnv_string,
((DRAWNS,u'extrusion'), None): cnv_boolean,
((DRAWNS,u'extrusion-color'), None): cnv_boolean,
((DRAWNS,u'extrusion-depth'), None): cnv_double,
((DRAWNS,u'extrusion-diffusion'), None): cnv_string,
((DRAWNS,u'extrusion-first-light-direction'), None): cnv_string,
((DRAWNS,u'extrusion-first-light-harsh'), None): cnv_boolean,
((DRAWNS,u'extrusion-first-light-level'), None): cnv_string,
((DRAWNS,u'extrusion-light-face'), None): cnv_boolean,
((DRAWNS,u'extrusion-metal'), None): cnv_boolean,
((DRAWNS,u'extrusion-number-of-line-segments'), None): cnv_integer,
((DRAWNS,u'extrusion-origin'), None): cnv_double,
((DRAWNS,u'extrusion-rotation-angle'), None): cnv_double,
((DRAWNS,u'extrusion-rotation-center'), None): cnv_string,
((DRAWNS,u'extrusion-second-light-direction'), None): cnv_string,
((DRAWNS,u'extrusion-second-light-harsh'), None): cnv_boolean,
((DRAWNS,u'extrusion-second-light-level'), None): cnv_string,
((DRAWNS,u'extrusion-shininess'), None): cnv_string,
((DRAWNS,u'extrusion-skew'), None): cnv_double,
((DRAWNS,u'extrusion-specularity'), None): cnv_string,
((DRAWNS,u'extrusion-viewpoint'), None): cnv_string,
((DRAWNS,u'fill'), None): cnv_string,
((DRAWNS,u'fill-color'), None): cnv_string,
((DRAWNS,u'fill-gradient-name'), None): cnv_string,
((DRAWNS,u'fill-hatch-name'), None): cnv_string,
((DRAWNS,u'fill-hatch-solid'), None): cnv_boolean,
((DRAWNS,u'fill-image-height'), None): cnv_lengthorpercent,
((DRAWNS,u'fill-image-name'), None): cnv_DrawNameRef,
((DRAWNS,u'fill-image-ref-point'), None): cnv_string,
((DRAWNS,u'fill-image-ref-point-x'), None): cnv_string,
((DRAWNS,u'fill-image-ref-point-y'), None): cnv_string,
((DRAWNS,u'fill-image-width'), None): cnv_lengthorpercent,
((DRAWNS,u'filter-name'), None): cnv_string,
((DRAWNS,u'fit-to-contour'), None): cnv_boolean,
((DRAWNS,u'fit-to-size'), None): cnv_boolean,
((DRAWNS,u'formula'), None): cnv_string,
((DRAWNS,u'frame-display-border'), None): cnv_boolean,
((DRAWNS,u'frame-display-scrollbar'), None): cnv_boolean,
((DRAWNS,u'frame-margin-horizontal'), None): cnv_string,
((DRAWNS,u'frame-margin-vertical'), None): cnv_string,
((DRAWNS,u'frame-name'), None): cnv_string,
((DRAWNS,u'gamma'), None): cnv_string,
((DRAWNS,u'glue-point-leaving-directions'), None): cnv_string,
((DRAWNS,u'glue-point-type'), None): cnv_string,
((DRAWNS,u'glue-points'), None): cnv_string,
((DRAWNS,u'gradient-step-count'), None): cnv_string,
((DRAWNS,u'green'), None): cnv_string,
((DRAWNS,u'guide-distance'), None): cnv_string,
((DRAWNS,u'guide-overhang'), None): cnv_length,
((DRAWNS,u'handle-mirror-horizontal'), None): cnv_boolean,
((DRAWNS,u'handle-mirror-vertical'), None): cnv_boolean,
((DRAWNS,u'handle-polar'), None): cnv_string,
((DRAWNS,u'handle-position'), None): cnv_string,
((DRAWNS,u'handle-radius-range-maximum'), None): cnv_string,
((DRAWNS,u'handle-radius-range-minimum'), None): cnv_string,
((DRAWNS,u'handle-range-x-maximum'), None): cnv_string,
((DRAWNS,u'handle-range-x-minimum'), None): cnv_string,
((DRAWNS,u'handle-range-y-maximum'), None): cnv_string,
((DRAWNS,u'handle-range-y-minimum'), None): cnv_string,
((DRAWNS,u'handle-switched'), None): cnv_boolean,
# ((DRAWNS,u'id'), None): cnv_ID,
# ((DRAWNS,u'id'), None): cnv_nonNegativeInteger, # ?? line 6581 in RNG
((DRAWNS,u'id'), None): cnv_string,
((DRAWNS,u'image-opacity'), None): cnv_string,
((DRAWNS,u'kind'), None): cnv_string,
((DRAWNS,u'layer'), None): cnv_string,
((DRAWNS,u'line-distance'), None): cnv_string,
((DRAWNS,u'line-skew'), None): cnv_string,
((DRAWNS,u'luminance'), None): cnv_string,
((DRAWNS,u'marker-end-center'), None): cnv_boolean,
((DRAWNS,u'marker-end'), None): cnv_string,
((DRAWNS,u'marker-end-width'), None): cnv_length,
((DRAWNS,u'marker-start-center'), None): cnv_boolean,
((DRAWNS,u'marker-start'), None): cnv_string,
((DRAWNS,u'marker-start-width'), None): cnv_length,
((DRAWNS,u'master-page-name'), None): cnv_StyleNameRef,
((DRAWNS,u'may-script'), None): cnv_boolean,
((DRAWNS,u'measure-align'), None): cnv_string,
((DRAWNS,u'measure-vertical-align'), None): cnv_string,
((DRAWNS,u'mime-type'), None): cnv_string,
((DRAWNS,u'mirror-horizontal'), None): cnv_boolean,
((DRAWNS,u'mirror-vertical'), None): cnv_boolean,
((DRAWNS,u'modifiers'), None): cnv_string,
((DRAWNS,u'name'), None): cnv_NCName,
# ((DRAWNS,u'name'), None): cnv_string,
((DRAWNS,u'nav-order'), None): cnv_IDREF,
((DRAWNS,u'nohref'), None): cnv_string,
((DRAWNS,u'notify-on-update-of-ranges'), None): cnv_string,
((DRAWNS,u'object'), None): cnv_string,
((DRAWNS,u'ole-draw-aspect'), None): cnv_string,
((DRAWNS,u'opacity'), None): cnv_string,
((DRAWNS,u'opacity-name'), None): cnv_string,
((DRAWNS,u'page-number'), None): cnv_positiveInteger,
((DRAWNS,u'parallel'), None): cnv_boolean,
((DRAWNS,u'path-stretchpoint-x'), None): cnv_double,
((DRAWNS,u'path-stretchpoint-y'), None): cnv_double,
((DRAWNS,u'placing'), None): cnv_string,
((DRAWNS,u'points'), None): cnv_points,
((DRAWNS,u'protected'), None): cnv_boolean,
((DRAWNS,u'recreate-on-edit'), None): cnv_boolean,
((DRAWNS,u'red'), None): cnv_string,
((DRAWNS,u'rotation'), None): cnv_integer,
((DRAWNS,u'secondary-fill-color'), None): cnv_string,
((DRAWNS,u'shadow'), None): cnv_string,
((DRAWNS,u'shadow-color'), None): cnv_string,
((DRAWNS,u'shadow-offset-x'), None): cnv_length,
((DRAWNS,u'shadow-offset-y'), None): cnv_length,
((DRAWNS,u'shadow-opacity'), None): cnv_string,
((DRAWNS,u'shape-id'), None): cnv_IDREF,
((DRAWNS,u'sharpness'), None): cnv_string,
((DRAWNS,u'show-unit'), None): cnv_boolean,
((DRAWNS,u'start-angle'), None): cnv_double,
((DRAWNS,u'start'), None): cnv_string,
((DRAWNS,u'start-color'), None): cnv_string,
((DRAWNS,u'start-glue-point'), None): cnv_nonNegativeInteger,
((DRAWNS,u'start-guide'), None): cnv_length,
((DRAWNS,u'start-intensity'), None): cnv_string,
((DRAWNS,u'start-line-spacing-horizontal'), None): cnv_string,
((DRAWNS,u'start-line-spacing-vertical'), None): cnv_string,
((DRAWNS,u'start-shape'), None): cnv_IDREF,
((DRAWNS,u'stroke'), None): cnv_string,
((DRAWNS,u'stroke-dash'), None): cnv_string,
((DRAWNS,u'stroke-dash-names'), None): cnv_string,
((DRAWNS,u'stroke-linejoin'), None): cnv_string,
((DRAWNS,u'style'), None): cnv_string,
((DRAWNS,u'style-name'), None): cnv_StyleNameRef,
((DRAWNS,u'symbol-color'), None): cnv_string,
((DRAWNS,u'text-areas'), None): cnv_string,
((DRAWNS,u'text-path-allowed'), None): cnv_boolean,
((DRAWNS,u'text-path'), None): cnv_boolean,
((DRAWNS,u'text-path-mode'), None): cnv_string,
((DRAWNS,u'text-path-same-letter-heights'), None): cnv_boolean,
((DRAWNS,u'text-path-scale'), None): cnv_string,
((DRAWNS,u'text-rotate-angle'), None): cnv_double,
((DRAWNS,u'text-style-name'), None): cnv_StyleNameRef,
((DRAWNS,u'textarea-horizontal-align'), None): cnv_string,
((DRAWNS,u'textarea-vertical-align'), None): cnv_string,
((DRAWNS,u'tile-repeat-offset'), None): cnv_string,
((DRAWNS,u'transform'), None): cnv_string,
((DRAWNS,u'type'), None): cnv_string,
((DRAWNS,u'unit'), None): cnv_string,
((DRAWNS,u'value'), None): cnv_string,
((DRAWNS,u'visible-area-height'), None): cnv_string,
((DRAWNS,u'visible-area-left'), None): cnv_string,
((DRAWNS,u'visible-area-top'), None): cnv_string,
((DRAWNS,u'visible-area-width'), None): cnv_string,
((DRAWNS,u'wrap-influence-on-position'), None): cnv_string,
((DRAWNS,u'z-index'), None): cnv_nonNegativeInteger,
((FONS,u'background-color'), None): cnv_string,
((FONS,u'border-bottom'), None): cnv_string,
((FONS,u'border'), None): cnv_string,
((FONS,u'border-left'), None): cnv_string,
((FONS,u'border-right'), None): cnv_string,
((FONS,u'border-top'), None): cnv_string,
((FONS,u'break-after'), None): cnv_string,
((FONS,u'break-before'), None): cnv_string,
((FONS,u'clip'), None): cnv_string,
((FONS,u'color'), None): cnv_string,
((FONS,u'column-count'), None): cnv_positiveInteger,
((FONS,u'column-gap'), None): cnv_length,
((FONS,u'country'), None): cnv_token,
((FONS,u'end-indent'), None): cnv_length,
((FONS,u'font-family'), None): cnv_string,
((FONS,u'font-size'), None): cnv_string,
((FONS,u'font-style'), None): cnv_string,
((FONS,u'font-variant'), None): cnv_string,
((FONS,u'font-weight'), None): cnv_string,
((FONS,u'height'), None): cnv_string,
((FONS,u'hyphenate'), None): cnv_boolean,
((FONS,u'hyphenation-keep'), None): cnv_string,
((FONS,u'hyphenation-ladder-count'), None): cnv_string,
((FONS,u'hyphenation-push-char-count'), None): cnv_string,
((FONS,u'hyphenation-remain-char-count'), None): cnv_string,
((FONS,u'keep-together'), None): cnv_string,
((FONS,u'keep-with-next'), None): cnv_string,
((FONS,u'language'), None): cnv_token,
((FONS,u'letter-spacing'), None): cnv_string,
((FONS,u'line-height'), None): cnv_string,
((FONS,u'margin-bottom'), None): cnv_string,
((FONS,u'margin'), None): cnv_string,
((FONS,u'margin-left'), None): cnv_string,
((FONS,u'margin-right'), None): cnv_string,
((FONS,u'margin-top'), None): cnv_string,
((FONS,u'max-height'), None): cnv_string,
((FONS,u'max-width'), None): cnv_string,
((FONS,u'min-height'), None): cnv_length,
((FONS,u'min-width'), None): cnv_string,
((FONS,u'orphans'), None): cnv_string,
((FONS,u'padding-bottom'), None): cnv_string,
((FONS,u'padding'), None): cnv_string,
((FONS,u'padding-left'), None): cnv_string,
((FONS,u'padding-right'), None): cnv_string,
((FONS,u'padding-top'), None): cnv_string,
((FONS,u'page-height'), None): cnv_length,
((FONS,u'page-width'), None): cnv_length,
((FONS,u'space-after'), None): cnv_length,
((FONS,u'space-before'), None): cnv_length,
((FONS,u'start-indent'), None): cnv_length,
((FONS,u'text-align'), None): cnv_string,
((FONS,u'text-align-last'), None): cnv_string,
((FONS,u'text-indent'), None): cnv_string,
((FONS,u'text-shadow'), None): cnv_string,
((FONS,u'text-transform'), None): cnv_string,
((FONS,u'widows'), None): cnv_string,
((FONS,u'width'), None): cnv_string,
((FONS,u'wrap-option'), None): cnv_string,
((FORMNS,u'allow-deletes'), None): cnv_boolean,
((FORMNS,u'allow-inserts'), None): cnv_boolean,
((FORMNS,u'allow-updates'), None): cnv_boolean,
((FORMNS,u'apply-design-mode'), None): cnv_boolean,
((FORMNS,u'apply-filter'), None): cnv_boolean,
((FORMNS,u'auto-complete'), None): cnv_boolean,
((FORMNS,u'automatic-focus'), None): cnv_boolean,
((FORMNS,u'bound-column'), None): cnv_string,
((FORMNS,u'button-type'), None): cnv_string,
((FORMNS,u'command'), None): cnv_string,
((FORMNS,u'command-type'), None): cnv_string,
((FORMNS,u'control-implementation'), None): cnv_namespacedToken,
((FORMNS,u'convert-empty-to-null'), None): cnv_boolean,
((FORMNS,u'current-selected'), None): cnv_boolean,
((FORMNS,u'current-state'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_date,
# ((FORMNS,u'current-value'), None): cnv_double,
((FORMNS,u'current-value'), None): cnv_string,
# ((FORMNS,u'current-value'), None): cnv_time,
((FORMNS,u'data-field'), None): cnv_string,
((FORMNS,u'datasource'), None): cnv_string,
((FORMNS,u'default-button'), None): cnv_boolean,
((FORMNS,u'delay-for-repeat'), None): cnv_duration,
((FORMNS,u'detail-fields'), None): cnv_string,
((FORMNS,u'disabled'), None): cnv_boolean,
((FORMNS,u'dropdown'), None): cnv_boolean,
((FORMNS,u'echo-char'), None): cnv_string,
((FORMNS,u'enctype'), None): cnv_string,
((FORMNS,u'escape-processing'), None): cnv_boolean,
((FORMNS,u'filter'), None): cnv_string,
((FORMNS,u'focus-on-click'), None): cnv_boolean,
((FORMNS,u'for'), None): cnv_string,
((FORMNS,u'id'), None): cnv_ID,
((FORMNS,u'ignore-result'), None): cnv_boolean,
((FORMNS,u'image-align'), None): cnv_string,
((FORMNS,u'image-data'), None): cnv_anyURI,
((FORMNS,u'image-position'), None): cnv_string,
((FORMNS,u'is-tristate'), None): cnv_boolean,
((FORMNS,u'label'), None): cnv_string,
((FORMNS,u'list-source'), None): cnv_string,
((FORMNS,u'list-source-type'), None): cnv_string,
((FORMNS,u'master-fields'), None): cnv_string,
((FORMNS,u'max-length'), None): cnv_nonNegativeInteger,
# ((FORMNS,u'max-value'), None): cnv_date,
# ((FORMNS,u'max-value'), None): cnv_double,
((FORMNS,u'max-value'), None): cnv_string,
# ((FORMNS,u'max-value'), None): cnv_time,
((FORMNS,u'method'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_date,
# ((FORMNS,u'min-value'), None): cnv_double,
((FORMNS,u'min-value'), None): cnv_string,
# ((FORMNS,u'min-value'), None): cnv_time,
((FORMNS,u'multi-line'), None): cnv_boolean,
((FORMNS,u'multiple'), None): cnv_boolean,
((FORMNS,u'name'), None): cnv_string,
((FORMNS,u'navigation-mode'), None): cnv_string,
((FORMNS,u'order'), None): cnv_string,
((FORMNS,u'orientation'), None): cnv_string,
((FORMNS,u'page-step-size'), None): cnv_positiveInteger,
((FORMNS,u'printable'), None): cnv_boolean,
((FORMNS,u'property-name'), None): cnv_string,
((FORMNS,u'readonly'), None): cnv_boolean,
((FORMNS,u'selected'), None): cnv_boolean,
((FORMNS,u'size'), None): cnv_nonNegativeInteger,
((FORMNS,u'state'), None): cnv_string,
((FORMNS,u'step-size'), None): cnv_positiveInteger,
((FORMNS,u'tab-cycle'), None): cnv_string,
((FORMNS,u'tab-index'), None): cnv_nonNegativeInteger,
((FORMNS,u'tab-stop'), None): cnv_boolean,
((FORMNS,u'text-style-name'), None): cnv_StyleNameRef,
((FORMNS,u'title'), None): cnv_string,
((FORMNS,u'toggle'), None): cnv_boolean,
((FORMNS,u'validation'), None): cnv_boolean,
# ((FORMNS,u'value'), None): cnv_date,
# ((FORMNS,u'value'), None): cnv_double,
((FORMNS,u'value'), None): cnv_string,
# ((FORMNS,u'value'), None): cnv_time,
((FORMNS,u'visual-effect'), None): cnv_string,
((FORMNS,u'xforms-list-source'), None): cnv_string,
((FORMNS,u'xforms-submission'), None): cnv_string,
((MANIFESTNS,'algorithm-name'), None): cnv_string,
((MANIFESTNS,'checksum'), None): cnv_string,
((MANIFESTNS,'checksum-type'), None): cnv_string,
((MANIFESTNS,'full-path'), None): cnv_string,
((MANIFESTNS,'initialisation-vector'), None): cnv_string,
((MANIFESTNS,'iteration-count'), None): cnv_nonNegativeInteger,
((MANIFESTNS,'key-derivation-name'), None): cnv_string,
((MANIFESTNS,'media-type'), None): cnv_string,
((MANIFESTNS,'salt'), None): cnv_string,
((MANIFESTNS,'size'), None): cnv_nonNegativeInteger,
((METANS,u'cell-count'), None): cnv_nonNegativeInteger,
((METANS,u'character-count'), None): cnv_nonNegativeInteger,
((METANS,u'date'), None): cnv_dateTime,
((METANS,u'delay'), None): cnv_duration,
((METANS,u'draw-count'), None): cnv_nonNegativeInteger,
((METANS,u'frame-count'), None): cnv_nonNegativeInteger,
((METANS,u'image-count'), None): cnv_nonNegativeInteger,
((METANS,u'name'), None): cnv_string,
((METANS,u'non-whitespace-character-count'), None): cnv_nonNegativeInteger,
((METANS,u'object-count'), None): cnv_nonNegativeInteger,
((METANS,u'ole-object-count'), None): cnv_nonNegativeInteger,
((METANS,u'page-count'), None): cnv_nonNegativeInteger,
((METANS,u'paragraph-count'), None): cnv_nonNegativeInteger,
((METANS,u'row-count'), None): cnv_nonNegativeInteger,
((METANS,u'sentence-count'), None): cnv_nonNegativeInteger,
((METANS,u'syllable-count'), None): cnv_nonNegativeInteger,
((METANS,u'table-count'), None): cnv_nonNegativeInteger,
((METANS,u'value-type'), None): cnv_metavaluetype,
((METANS,u'word-count'), None): cnv_nonNegativeInteger,
((NUMBERNS,u'automatic-order'), None): cnv_boolean,
((NUMBERNS,u'calendar'), None): cnv_string,
((NUMBERNS,u'country'), None): cnv_token,
((NUMBERNS,u'decimal-places'), None): cnv_integer,
((NUMBERNS,u'decimal-replacement'), None): cnv_string,
((NUMBERNS,u'denominator-value'), None): cnv_integer,
((NUMBERNS,u'display-factor'), None): cnv_double,
((NUMBERNS,u'format-source'), None): cnv_string,
((NUMBERNS,u'grouping'), None): cnv_boolean,
((NUMBERNS,u'language'), None): cnv_token,
((NUMBERNS,u'min-denominator-digits'), None): cnv_integer,
((NUMBERNS,u'min-exponent-digits'), None): cnv_integer,
((NUMBERNS,u'min-integer-digits'), None): cnv_integer,
((NUMBERNS,u'min-numerator-digits'), None): cnv_integer,
((NUMBERNS,u'position'), None): cnv_integer,
((NUMBERNS,u'possessive-form'), None): cnv_boolean,
((NUMBERNS,u'style'), None): cnv_string,
((NUMBERNS,u'textual'), None): cnv_boolean,
((NUMBERNS,u'title'), None): cnv_string,
((NUMBERNS,u'transliteration-country'), None): cnv_token,
((NUMBERNS,u'transliteration-format'), None): cnv_string,
((NUMBERNS,u'transliteration-language'), None): cnv_token,
((NUMBERNS,u'transliteration-style'), None): cnv_string,
((NUMBERNS,u'truncate-on-overflow'), None): cnv_boolean,
((OFFICENS,u'automatic-update'), None): cnv_boolean,
((OFFICENS,u'boolean-value'), None): cnv_boolean,
((OFFICENS,u'conversion-mode'), None): cnv_string,
((OFFICENS,u'currency'), None): cnv_string,
((OFFICENS,u'date-value'), None): cnv_dateTime,
((OFFICENS,u'dde-application'), None): cnv_string,
((OFFICENS,u'dde-item'), None): cnv_string,
((OFFICENS,u'dde-topic'), None): cnv_string,
((OFFICENS,u'display'), None): cnv_boolean,
((OFFICENS,u'mimetype'), None): cnv_string,
((OFFICENS,u'name'), None): cnv_string,
((OFFICENS,u'process-content'), None): cnv_boolean,
((OFFICENS,u'server-map'), None): cnv_boolean,
((OFFICENS,u'string-value'), None): cnv_string,
((OFFICENS,u'target-frame'), None): cnv_string,
((OFFICENS,u'target-frame-name'), None): cnv_string,
((OFFICENS,u'time-value'), None): cnv_duration,
((OFFICENS,u'title'), None): cnv_string,
((OFFICENS,u'value'), None): cnv_double,
((OFFICENS,u'value-type'), None): cnv_string,
((OFFICENS,u'version'), None): cnv_string,
((PRESENTATIONNS,u'action'), None): cnv_string,
((PRESENTATIONNS,u'animations'), None): cnv_string,
((PRESENTATIONNS,u'background-objects-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'background-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'class'), None): cnv_string,
((PRESENTATIONNS,u'class-names'), None): cnv_NCNames,
((PRESENTATIONNS,u'delay'), None): cnv_duration,
((PRESENTATIONNS,u'direction'), None): cnv_string,
((PRESENTATIONNS,u'display-date-time'), None): cnv_boolean,
((PRESENTATIONNS,u'display-footer'), None): cnv_boolean,
((PRESENTATIONNS,u'display-header'), None): cnv_boolean,
((PRESENTATIONNS,u'display-page-number'), None): cnv_boolean,
((PRESENTATIONNS,u'duration'), None): cnv_string,
((PRESENTATIONNS,u'effect'), None): cnv_string,
((PRESENTATIONNS,u'endless'), None): cnv_boolean,
((PRESENTATIONNS,u'force-manual'), None): cnv_boolean,
((PRESENTATIONNS,u'full-screen'), None): cnv_boolean,
((PRESENTATIONNS,u'group-id'), None): cnv_string,
((PRESENTATIONNS,u'master-element'), None): cnv_IDREF,
((PRESENTATIONNS,u'mouse-as-pen'), None): cnv_boolean,
((PRESENTATIONNS,u'mouse-visible'), None): cnv_boolean,
((PRESENTATIONNS,u'name'), None): cnv_string,
((PRESENTATIONNS,u'node-type'), None): cnv_string,
((PRESENTATIONNS,u'object'), None): cnv_string,
((PRESENTATIONNS,u'pages'), None): cnv_string,
((PRESENTATIONNS,u'path-id'), None): cnv_string,
((PRESENTATIONNS,u'pause'), None): cnv_duration,
((PRESENTATIONNS,u'placeholder'), None): cnv_boolean,
((PRESENTATIONNS,u'play-full'), None): cnv_boolean,
((PRESENTATIONNS,u'presentation-page-layout-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS,u'preset-class'), None): cnv_string,
((PRESENTATIONNS,u'preset-id'), None): cnv_string,
((PRESENTATIONNS,u'preset-sub-type'), None): cnv_string,
((PRESENTATIONNS,u'show'), None): cnv_string,
((PRESENTATIONNS,u'show-end-of-presentation-slide'), None): cnv_boolean,
((PRESENTATIONNS,u'show-logo'), None): cnv_boolean,
((PRESENTATIONNS,u'source'), None): cnv_string,
((PRESENTATIONNS,u'speed'), None): cnv_string,
((PRESENTATIONNS,u'start-page'), None): cnv_string,
((PRESENTATIONNS,u'start-scale'), None): cnv_string,
((PRESENTATIONNS,u'start-with-navigator'), None): cnv_boolean,
((PRESENTATIONNS,u'stay-on-top'), None): cnv_boolean,
((PRESENTATIONNS,u'style-name'), None): cnv_StyleNameRef,
((PRESENTATIONNS,u'transition-on-click'), None): cnv_string,
((PRESENTATIONNS,u'transition-speed'), None): cnv_string,
((PRESENTATIONNS,u'transition-style'), None): cnv_string,
((PRESENTATIONNS,u'transition-type'), None): cnv_string,
((PRESENTATIONNS,u'use-date-time-name'), None): cnv_string,
((PRESENTATIONNS,u'use-footer-name'), None): cnv_string,
((PRESENTATIONNS,u'use-header-name'), None): cnv_string,
((PRESENTATIONNS,u'user-transformed'), None): cnv_boolean,
((PRESENTATIONNS,u'verb'), None): cnv_nonNegativeInteger,
((PRESENTATIONNS,u'visibility'), None): cnv_string,
((SCRIPTNS,u'event-name'), None): cnv_formula,
((SCRIPTNS,u'language'), None): cnv_formula,
((SCRIPTNS,u'macro-name'), None): cnv_string,
((SMILNS,u'accelerate'), None): cnv_double,
((SMILNS,u'accumulate'), None): cnv_string,
((SMILNS,u'additive'), None): cnv_string,
((SMILNS,u'attributeName'), None): cnv_string,
((SMILNS,u'autoReverse'), None): cnv_boolean,
((SMILNS,u'begin'), None): cnv_string,
((SMILNS,u'by'), None): cnv_string,
((SMILNS,u'calcMode'), None): cnv_string,
((SMILNS,u'decelerate'), None): cnv_double,
((SMILNS,u'direction'), None): cnv_string,
((SMILNS,u'dur'), None): cnv_string,
((SMILNS,u'end'), None): cnv_string,
((SMILNS,u'endsync'), None): cnv_string,
((SMILNS,u'fadeColor'), None): cnv_string,
((SMILNS,u'fill'), None): cnv_string,
((SMILNS,u'fillDefault'), None): cnv_string,
((SMILNS,u'from'), None): cnv_string,
((SMILNS,u'keySplines'), None): cnv_string,
((SMILNS,u'keyTimes'), None): cnv_string,
((SMILNS,u'mode'), None): cnv_string,
((SMILNS,u'repeatCount'), None): cnv_nonNegativeInteger,
((SMILNS,u'repeatDur'), None): cnv_string,
((SMILNS,u'restart'), None): cnv_string,
((SMILNS,u'restartDefault'), None): cnv_string,
((SMILNS,u'subtype'), None): cnv_string,
((SMILNS,u'targetElement'), None): cnv_IDREF,
((SMILNS,u'to'), None): cnv_string,
((SMILNS,u'type'), None): cnv_string,
((SMILNS,u'values'), None): cnv_string,
((STYLENS,u'adjustment'), None): cnv_string,
((STYLENS,u'apply-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'auto-text-indent'), None): cnv_boolean,
((STYLENS,u'auto-update'), None): cnv_boolean,
((STYLENS,u'background-transparency'), None): cnv_string,
((STYLENS,u'base-cell-address'), None): cnv_string,
((STYLENS,u'border-line-width-bottom'), None): cnv_string,
((STYLENS,u'border-line-width'), None): cnv_string,
((STYLENS,u'border-line-width-left'), None): cnv_string,
((STYLENS,u'border-line-width-right'), None): cnv_string,
((STYLENS,u'border-line-width-top'), None): cnv_string,
((STYLENS,u'cell-protect'), None): cnv_string,
((STYLENS,u'char'), None): cnv_string,
((STYLENS,u'class'), None): cnv_string,
((STYLENS,u'color'), None): cnv_string,
((STYLENS,u'column-width'), None): cnv_string,
((STYLENS,u'condition'), None): cnv_string,
((STYLENS,u'country-asian'), None): cnv_string,
((STYLENS,u'country-complex'), None): cnv_string,
((STYLENS,u'data-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'decimal-places'), None): cnv_string,
((STYLENS,u'default-outline-level'), None): cnv_positiveInteger,
((STYLENS,u'diagonal-bl-tr'), None): cnv_string,
((STYLENS,u'diagonal-bl-tr-widths'), None): cnv_string,
((STYLENS,u'diagonal-tl-br'), None): cnv_string,
((STYLENS,u'diagonal-tl-br-widths'), None): cnv_string,
((STYLENS,u'direction'), None): cnv_string,
((STYLENS,u'display'), None): cnv_boolean,
((STYLENS,u'display-name'), None): cnv_string,
((STYLENS,u'distance-after-sep'), None): cnv_length,
((STYLENS,u'distance-before-sep'), None): cnv_length,
((STYLENS,u'distance'), None): cnv_length,
((STYLENS,u'dynamic-spacing'), None): cnv_boolean,
((STYLENS,u'editable'), None): cnv_boolean,
((STYLENS,u'family'), None): cnv_family,
((STYLENS,u'filter-name'), None): cnv_string,
((STYLENS,u'first-page-number'), None): cnv_string,
((STYLENS,u'flow-with-text'), None): cnv_boolean,
((STYLENS,u'font-adornments'), None): cnv_string,
((STYLENS,u'font-charset'), None): cnv_string,
((STYLENS,u'font-charset-asian'), None): cnv_string,
((STYLENS,u'font-charset-complex'), None): cnv_string,
((STYLENS,u'font-family-asian'), None): cnv_string,
((STYLENS,u'font-family-complex'), None): cnv_string,
((STYLENS,u'font-family-generic-asian'), None): cnv_string,
((STYLENS,u'font-family-generic'), None): cnv_string,
((STYLENS,u'font-family-generic-complex'), None): cnv_string,
((STYLENS,u'font-independent-line-spacing'), None): cnv_boolean,
((STYLENS,u'font-name-asian'), None): cnv_string,
((STYLENS,u'font-name'), None): cnv_string,
((STYLENS,u'font-name-complex'), None): cnv_string,
((STYLENS,u'font-pitch-asian'), None): cnv_string,
((STYLENS,u'font-pitch'), None): cnv_string,
((STYLENS,u'font-pitch-complex'), None): cnv_string,
((STYLENS,u'font-relief'), None): cnv_string,
((STYLENS,u'font-size-asian'), None): cnv_string,
((STYLENS,u'font-size-complex'), None): cnv_string,
((STYLENS,u'font-size-rel-asian'), None): cnv_length,
((STYLENS,u'font-size-rel'), None): cnv_length,
((STYLENS,u'font-size-rel-complex'), None): cnv_length,
((STYLENS,u'font-style-asian'), None): cnv_string,
((STYLENS,u'font-style-complex'), None): cnv_string,
((STYLENS,u'font-style-name-asian'), None): cnv_string,
((STYLENS,u'font-style-name'), None): cnv_string,
((STYLENS,u'font-style-name-complex'), None): cnv_string,
((STYLENS,u'font-weight-asian'), None): cnv_string,
((STYLENS,u'font-weight-complex'), None): cnv_string,
((STYLENS,u'footnote-max-height'), None): cnv_length,
((STYLENS,u'glyph-orientation-vertical'), None): cnv_string,
((STYLENS,u'height'), None): cnv_string,
((STYLENS,u'horizontal-pos'), None): cnv_string,
((STYLENS,u'horizontal-rel'), None): cnv_string,
((STYLENS,u'justify-single-word'), None): cnv_boolean,
((STYLENS,u'language-asian'), None): cnv_string,
((STYLENS,u'language-complex'), None): cnv_string,
((STYLENS,u'layout-grid-base-height'), None): cnv_length,
((STYLENS,u'layout-grid-color'), None): cnv_string,
((STYLENS,u'layout-grid-display'), None): cnv_boolean,
((STYLENS,u'layout-grid-lines'), None): cnv_string,
((STYLENS,u'layout-grid-mode'), None): cnv_string,
((STYLENS,u'layout-grid-print'), None): cnv_boolean,
((STYLENS,u'layout-grid-ruby-below'), None): cnv_boolean,
((STYLENS,u'layout-grid-ruby-height'), None): cnv_length,
((STYLENS,u'leader-char'), None): cnv_string,
((STYLENS,u'leader-color'), None): cnv_string,
((STYLENS,u'leader-style'), None): cnv_string,
((STYLENS,u'leader-text'), None): cnv_string,
((STYLENS,u'leader-text-style'), None): cnv_StyleNameRef,
((STYLENS,u'leader-type'), None): cnv_string,
((STYLENS,u'leader-width'), None): cnv_string,
((STYLENS,u'legend-expansion-aspect-ratio'), None): cnv_double,
((STYLENS,u'legend-expansion'), None): cnv_string,
((STYLENS,u'length'), None): cnv_positiveInteger,
((STYLENS,u'letter-kerning'), None): cnv_boolean,
((STYLENS,u'line-break'), None): cnv_string,
((STYLENS,u'line-height-at-least'), None): cnv_string,
((STYLENS,u'line-spacing'), None): cnv_length,
((STYLENS,u'line-style'), None): cnv_string,
((STYLENS,u'lines'), None): cnv_positiveInteger,
((STYLENS,u'list-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'master-page-name'), None): cnv_StyleNameRef,
((STYLENS,u'may-break-between-rows'), None): cnv_boolean,
((STYLENS,u'min-row-height'), None): cnv_string,
((STYLENS,u'mirror'), None): cnv_string,
((STYLENS,u'name'), None): cnv_NCName,
((STYLENS,u'name'), (STYLENS,u'font-face')): cnv_string,
((STYLENS,u'next-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'num-format'), None): cnv_string,
((STYLENS,u'num-letter-sync'), None): cnv_boolean,
((STYLENS,u'num-prefix'), None): cnv_string,
((STYLENS,u'num-suffix'), None): cnv_string,
((STYLENS,u'number-wrapped-paragraphs'), None): cnv_string,
((STYLENS,u'overflow-behavior'), None): cnv_string,
((STYLENS,u'page-layout-name'), None): cnv_StyleNameRef,
((STYLENS,u'page-number'), None): cnv_string,
((STYLENS,u'page-usage'), None): cnv_string,
((STYLENS,u'paper-tray-name'), None): cnv_string,
((STYLENS,u'parent-style-name'), None): cnv_StyleNameRef,
((STYLENS,u'position'), (STYLENS,u'tab-stop')): cnv_length,
((STYLENS,u'position'), None): cnv_string,
((STYLENS,u'print'), None): cnv_string,
((STYLENS,u'print-content'), None): cnv_boolean,
((STYLENS,u'print-orientation'), None): cnv_string,
((STYLENS,u'print-page-order'), None): cnv_string,
((STYLENS,u'protect'), None): cnv_boolean,
((STYLENS,u'punctuation-wrap'), None): cnv_string,
((STYLENS,u'register-true'), None): cnv_boolean,
((STYLENS,u'register-truth-ref-style-name'), None): cnv_string,
((STYLENS,u'rel-column-width'), None): cnv_string,
((STYLENS,u'rel-height'), None): cnv_string,
((STYLENS,u'rel-width'), None): cnv_string,
((STYLENS,u'repeat'), None): cnv_string,
((STYLENS,u'repeat-content'), None): cnv_boolean,
((STYLENS,u'rotation-align'), None): cnv_string,
((STYLENS,u'rotation-angle'), None): cnv_string,
((STYLENS,u'row-height'), None): cnv_string,
((STYLENS,u'ruby-align'), None): cnv_string,
((STYLENS,u'ruby-position'), None): cnv_string,
((STYLENS,u'run-through'), None): cnv_string,
((STYLENS,u'scale-to'), None): cnv_string,
((STYLENS,u'scale-to-pages'), None): cnv_string,
((STYLENS,u'script-type'), None): cnv_string,
((STYLENS,u'shadow'), None): cnv_string,
((STYLENS,u'shrink-to-fit'), None): cnv_boolean,
((STYLENS,u'snap-to-layout-grid'), None): cnv_boolean,
((STYLENS,u'style'), None): cnv_string,
((STYLENS,u'style-name'), None): cnv_StyleNameRef,
((STYLENS,u'tab-stop-distance'), None): cnv_string,
((STYLENS,u'table-centering'), None): cnv_string,
((STYLENS,u'text-align-source'), None): cnv_string,
((STYLENS,u'text-autospace'), None): cnv_string,
((STYLENS,u'text-blinking'), None): cnv_boolean,
((STYLENS,u'text-combine'), None): cnv_string,
((STYLENS,u'text-combine-end-char'), None): cnv_string,
((STYLENS,u'text-combine-start-char'), None): cnv_string,
((STYLENS,u'text-emphasize'), None): cnv_string,
((STYLENS,u'text-line-through-color'), None): cnv_string,
((STYLENS,u'text-line-through-mode'), None): cnv_string,
((STYLENS,u'text-line-through-style'), None): cnv_string,
((STYLENS,u'text-line-through-text'), None): cnv_string,
((STYLENS,u'text-line-through-text-style'), None): cnv_string,
((STYLENS,u'text-line-through-type'), None): cnv_string,
((STYLENS,u'text-line-through-width'), None): cnv_string,
((STYLENS,u'text-outline'), None): cnv_boolean,
((STYLENS,u'text-position'), None): cnv_string,
((STYLENS,u'text-rotation-angle'), None): cnv_string,
((STYLENS,u'text-rotation-scale'), None): cnv_string,
((STYLENS,u'text-scale'), None): cnv_string,
((STYLENS,u'text-underline-color'), None): cnv_string,
((STYLENS,u'text-underline-mode'), None): cnv_string,
((STYLENS,u'text-underline-style'), None): cnv_string,
((STYLENS,u'text-underline-type'), None): cnv_string,
((STYLENS,u'text-underline-width'), None): cnv_string,
((STYLENS,u'type'), None): cnv_string,
((STYLENS,u'use-optimal-column-width'), None): cnv_boolean,
((STYLENS,u'use-optimal-row-height'), None): cnv_boolean,
((STYLENS,u'use-window-font-color'), None): cnv_boolean,
((STYLENS,u'vertical-align'), None): cnv_string,
((STYLENS,u'vertical-pos'), None): cnv_string,
((STYLENS,u'vertical-rel'), None): cnv_string,
((STYLENS,u'volatile'), None): cnv_boolean,
((STYLENS,u'width'), None): cnv_string,
((STYLENS,u'wrap'), None): cnv_string,
((STYLENS,u'wrap-contour'), None): cnv_boolean,
((STYLENS,u'wrap-contour-mode'), None): cnv_string,
((STYLENS,u'wrap-dynamic-threshold'), None): cnv_length,
((STYLENS,u'writing-mode-automatic'), None): cnv_boolean,
((STYLENS,u'writing-mode'), None): cnv_string,
((SVGNS,u'accent-height'), None): cnv_integer,
((SVGNS,u'alphabetic'), None): cnv_integer,
((SVGNS,u'ascent'), None): cnv_integer,
((SVGNS,u'bbox'), None): cnv_string,
((SVGNS,u'cap-height'), None): cnv_integer,
((SVGNS,u'cx'), None): cnv_string,
((SVGNS,u'cy'), None): cnv_string,
((SVGNS,u'd'), None): cnv_string,
((SVGNS,u'descent'), None): cnv_integer,
((SVGNS,u'fill-rule'), None): cnv_string,
((SVGNS,u'font-family'), None): cnv_string,
((SVGNS,u'font-size'), None): cnv_string,
((SVGNS,u'font-stretch'), None): cnv_string,
((SVGNS,u'font-style'), None): cnv_string,
((SVGNS,u'font-variant'), None): cnv_string,
((SVGNS,u'font-weight'), None): cnv_string,
((SVGNS,u'fx'), None): cnv_string,
((SVGNS,u'fy'), None): cnv_string,
((SVGNS,u'gradientTransform'), None): cnv_string,
((SVGNS,u'gradientUnits'), None): cnv_string,
((SVGNS,u'hanging'), None): cnv_integer,
((SVGNS,u'height'), None): cnv_length,
((SVGNS,u'ideographic'), None): cnv_integer,
((SVGNS,u'mathematical'), None): cnv_integer,
((SVGNS,u'name'), None): cnv_string,
((SVGNS,u'offset'), None): cnv_string,
((SVGNS,u'origin'), None): cnv_string,
((SVGNS,u'overline-position'), None): cnv_integer,
((SVGNS,u'overline-thickness'), None): cnv_integer,
((SVGNS,u'panose-1'), None): cnv_string,
((SVGNS,u'path'), None): cnv_string,
((SVGNS,u'r'), None): cnv_length,
((SVGNS,u'rx'), None): cnv_length,
((SVGNS,u'ry'), None): cnv_length,
((SVGNS,u'slope'), None): cnv_integer,
((SVGNS,u'spreadMethod'), None): cnv_string,
((SVGNS,u'stemh'), None): cnv_integer,
((SVGNS,u'stemv'), None): cnv_integer,
((SVGNS,u'stop-color'), None): cnv_string,
((SVGNS,u'stop-opacity'), None): cnv_double,
((SVGNS,u'strikethrough-position'), None): cnv_integer,
((SVGNS,u'strikethrough-thickness'), None): cnv_integer,
((SVGNS,u'string'), None): cnv_string,
((SVGNS,u'stroke-color'), None): cnv_string,
((SVGNS,u'stroke-opacity'), None): cnv_string,
((SVGNS,u'stroke-width'), None): cnv_length,
((SVGNS,u'type'), None): cnv_string,
((SVGNS,u'underline-position'), None): cnv_integer,
((SVGNS,u'underline-thickness'), None): cnv_integer,
((SVGNS,u'unicode-range'), None): cnv_string,
((SVGNS,u'units-per-em'), None): cnv_integer,
((SVGNS,u'v-alphabetic'), None): cnv_integer,
((SVGNS,u'v-hanging'), None): cnv_integer,
((SVGNS,u'v-ideographic'), None): cnv_integer,
((SVGNS,u'v-mathematical'), None): cnv_integer,
((SVGNS,u'viewBox'), None): cnv_viewbox,
((SVGNS,u'width'), None): cnv_length,
((SVGNS,u'widths'), None): cnv_string,
((SVGNS,u'x'), None): cnv_length,
((SVGNS,u'x-height'), None): cnv_integer,
((SVGNS,u'x1'), None): cnv_lengthorpercent,
((SVGNS,u'x2'), None): cnv_lengthorpercent,
((SVGNS,u'y'), None): cnv_length,
((SVGNS,u'y1'), None): cnv_lengthorpercent,
((SVGNS,u'y2'), None): cnv_lengthorpercent,
((TABLENS,u'acceptance-state'), None): cnv_string,
((TABLENS,u'add-empty-lines'), None): cnv_boolean,
((TABLENS,u'algorithm'), None): cnv_formula,
((TABLENS,u'align'), None): cnv_string,
((TABLENS,u'allow-empty-cell'), None): cnv_boolean,
((TABLENS,u'application-data'), None): cnv_string,
((TABLENS,u'automatic-find-labels'), None): cnv_boolean,
((TABLENS,u'base-cell-address'), None): cnv_string,
((TABLENS,u'bind-styles-to-content'), None): cnv_boolean,
((TABLENS,u'border-color'), None): cnv_string,
((TABLENS,u'border-model'), None): cnv_string,
((TABLENS,u'buttons'), None): cnv_string,
((TABLENS,u'buttons'), None): cnv_string,
((TABLENS,u'case-sensitive'), None): cnv_boolean,
((TABLENS,u'case-sensitive'), None): cnv_string,
((TABLENS,u'cell-address'), None): cnv_string,
((TABLENS,u'cell-range-address'), None): cnv_string,
((TABLENS,u'cell-range-address'), None): cnv_string,
((TABLENS,u'cell-range'), None): cnv_string,
((TABLENS,u'column'), None): cnv_integer,
((TABLENS,u'comment'), None): cnv_string,
((TABLENS,u'condition'), None): cnv_formula,
((TABLENS,u'condition-source'), None): cnv_string,
((TABLENS,u'condition-source-range-address'), None): cnv_string,
((TABLENS,u'contains-error'), None): cnv_boolean,
((TABLENS,u'contains-header'), None): cnv_boolean,
((TABLENS,u'content-validation-name'), None): cnv_string,
((TABLENS,u'copy-back'), None): cnv_boolean,
((TABLENS,u'copy-formulas'), None): cnv_boolean,
((TABLENS,u'copy-styles'), None): cnv_boolean,
((TABLENS,u'count'), None): cnv_positiveInteger,
((TABLENS,u'country'), None): cnv_token,
((TABLENS,u'data-cell-range-address'), None): cnv_string,
((TABLENS,u'data-field'), None): cnv_string,
((TABLENS,u'data-type'), None): cnv_string,
((TABLENS,u'database-name'), None): cnv_string,
((TABLENS,u'database-table-name'), None): cnv_string,
((TABLENS,u'date-end'), None): cnv_string,
((TABLENS,u'date-start'), None): cnv_string,
((TABLENS,u'date-value'), None): cnv_date,
((TABLENS,u'default-cell-style-name'), None): cnv_StyleNameRef,
((TABLENS,u'direction'), None): cnv_string,
((TABLENS,u'display-border'), None): cnv_boolean,
((TABLENS,u'display'), None): cnv_boolean,
((TABLENS,u'display-duplicates'), None): cnv_boolean,
((TABLENS,u'display-filter-buttons'), None): cnv_boolean,
((TABLENS,u'display-list'), None): cnv_string,
((TABLENS,u'display-member-mode'), None): cnv_string,
((TABLENS,u'drill-down-on-double-click'), None): cnv_boolean,
((TABLENS,u'enabled'), None): cnv_boolean,
((TABLENS,u'end-cell-address'), None): cnv_string,
((TABLENS,u'end'), None): cnv_string,
((TABLENS,u'end-column'), None): cnv_integer,
((TABLENS,u'end-position'), None): cnv_integer,
((TABLENS,u'end-row'), None): cnv_integer,
((TABLENS,u'end-table'), None): cnv_integer,
((TABLENS,u'end-x'), None): cnv_length,
((TABLENS,u'end-y'), None): cnv_length,
((TABLENS,u'execute'), None): cnv_boolean,
((TABLENS,u'expression'), None): cnv_formula,
((TABLENS,u'field-name'), None): cnv_string,
((TABLENS,u'field-number'), None): cnv_nonNegativeInteger,
((TABLENS,u'field-number'), None): cnv_string,
((TABLENS,u'filter-name'), None): cnv_string,
((TABLENS,u'filter-options'), None): cnv_string,
((TABLENS,u'formula'), None): cnv_formula,
((TABLENS,u'function'), None): cnv_string,
((TABLENS,u'function'), None): cnv_string,
((TABLENS,u'grand-total'), None): cnv_string,
((TABLENS,u'group-by-field-number'), None): cnv_nonNegativeInteger,
((TABLENS,u'grouped-by'), None): cnv_string,
((TABLENS,u'has-persistent-data'), None): cnv_boolean,
((TABLENS,u'id'), None): cnv_string,
((TABLENS,u'identify-categories'), None): cnv_boolean,
((TABLENS,u'ignore-empty-rows'), None): cnv_boolean,
((TABLENS,u'index'), None): cnv_nonNegativeInteger,
((TABLENS,u'is-active'), None): cnv_boolean,
((TABLENS,u'is-data-layout-field'), None): cnv_string,
((TABLENS,u'is-selection'), None): cnv_boolean,
((TABLENS,u'is-sub-table'), None): cnv_boolean,
((TABLENS,u'label-cell-range-address'), None): cnv_string,
((TABLENS,u'language'), None): cnv_token,
((TABLENS,u'language'), None): cnv_token,
((TABLENS,u'last-column-spanned'), None): cnv_positiveInteger,
((TABLENS,u'last-row-spanned'), None): cnv_positiveInteger,
((TABLENS,u'layout-mode'), None): cnv_string,
((TABLENS,u'link-to-source-data'), None): cnv_boolean,
((TABLENS,u'marked-invalid'), None): cnv_boolean,
((TABLENS,u'matrix-covered'), None): cnv_boolean,
((TABLENS,u'maximum-difference'), None): cnv_double,
((TABLENS,u'member-count'), None): cnv_nonNegativeInteger,
((TABLENS,u'member-name'), None): cnv_string,
((TABLENS,u'member-type'), None): cnv_string,
((TABLENS,u'message-type'), None): cnv_string,
((TABLENS,u'mode'), None): cnv_string,
((TABLENS,u'multi-deletion-spanned'), None): cnv_integer,
((TABLENS,u'name'), None): cnv_string,
((TABLENS,u'name'), None): cnv_string,
((TABLENS,u'null-year'), None): cnv_positiveInteger,
((TABLENS,u'number-columns-repeated'), None): cnv_positiveInteger,
((TABLENS,u'number-columns-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-matrix-columns-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-matrix-rows-spanned'), None): cnv_positiveInteger,
((TABLENS,u'number-rows-repeated'), None): cnv_positiveInteger,
((TABLENS,u'number-rows-spanned'), None): cnv_positiveInteger,
((TABLENS,u'object-name'), None): cnv_string,
((TABLENS,u'on-update-keep-size'), None): cnv_boolean,
((TABLENS,u'on-update-keep-styles'), None): cnv_boolean,
((TABLENS,u'operator'), None): cnv_string,
((TABLENS,u'operator'), None): cnv_string,
((TABLENS,u'order'), None): cnv_string,
((TABLENS,u'orientation'), None): cnv_string,
((TABLENS,u'orientation'), None): cnv_string,
((TABLENS,u'page-breaks-on-group-change'), None): cnv_boolean,
((TABLENS,u'parse-sql-statement'), None): cnv_boolean,
((TABLENS,u'password'), None): cnv_string,
((TABLENS,u'position'), None): cnv_integer,
((TABLENS,u'precision-as-shown'), None): cnv_boolean,
((TABLENS,u'print'), None): cnv_boolean,
((TABLENS,u'print-ranges'), None): cnv_string,
((TABLENS,u'protect'), None): cnv_boolean,
((TABLENS,u'protected'), None): cnv_boolean,
((TABLENS,u'protection-key'), None): cnv_string,
((TABLENS,u'query-name'), None): cnv_string,
((TABLENS,u'range-usable-as'), None): cnv_string,
((TABLENS,u'refresh-delay'), None): cnv_boolean,
((TABLENS,u'refresh-delay'), None): cnv_duration,
((TABLENS,u'rejecting-change-id'), None): cnv_string,
((TABLENS,u'row'), None): cnv_integer,
((TABLENS,u'scenario-ranges'), None): cnv_string,
((TABLENS,u'search-criteria-must-apply-to-whole-cell'), None): cnv_boolean,
((TABLENS,u'selected-page'), None): cnv_string,
((TABLENS,u'show-details'), None): cnv_boolean,
((TABLENS,u'show-empty'), None): cnv_boolean,
((TABLENS,u'show-empty'), None): cnv_string,
((TABLENS,u'show-filter-button'), None): cnv_boolean,
((TABLENS,u'sort-mode'), None): cnv_string,
((TABLENS,u'source-cell-range-addresses'), None): cnv_string,
((TABLENS,u'source-cell-range-addresses'), None): cnv_string,
((TABLENS,u'source-field-name'), None): cnv_string,
((TABLENS,u'source-field-name'), None): cnv_string,
((TABLENS,u'source-name'), None): cnv_string,
((TABLENS,u'sql-statement'), None): cnv_string,
((TABLENS,u'start'), None): cnv_string,
((TABLENS,u'start-column'), None): cnv_integer,
((TABLENS,u'start-position'), None): cnv_integer,
((TABLENS,u'start-row'), None): cnv_integer,
((TABLENS,u'start-table'), None): cnv_integer,
((TABLENS,u'status'), None): cnv_string,
((TABLENS,u'step'), None): cnv_double,
((TABLENS,u'steps'), None): cnv_positiveInteger,
((TABLENS,u'structure-protected'), None): cnv_boolean,
((TABLENS,u'style-name'), None): cnv_StyleNameRef,
((TABLENS,u'table-background'), None): cnv_boolean,
((TABLENS,u'table'), None): cnv_integer,
((TABLENS,u'table-name'), None): cnv_string,
((TABLENS,u'target-cell-address'), None): cnv_string,
((TABLENS,u'target-cell-address'), None): cnv_string,
((TABLENS,u'target-range-address'), None): cnv_string,
((TABLENS,u'target-range-address'), None): cnv_string,
((TABLENS,u'title'), None): cnv_string,
((TABLENS,u'track-changes'), None): cnv_boolean,
((TABLENS,u'type'), None): cnv_string,
((TABLENS,u'use-labels'), None): cnv_string,
((TABLENS,u'use-regular-expressions'), None): cnv_boolean,
((TABLENS,u'used-hierarchy'), None): cnv_integer,
((TABLENS,u'user-name'), None): cnv_string,
((TABLENS,u'value'), None): cnv_string,
((TABLENS,u'value'), None): cnv_string,
((TABLENS,u'value-type'), None): cnv_string,
((TABLENS,u'visibility'), None): cnv_string,
((TEXTNS,u'active'), None): cnv_boolean,
((TEXTNS,u'address'), None): cnv_string,
((TEXTNS,u'alphabetical-separators'), None): cnv_boolean,
((TEXTNS,u'anchor-page-number'), None): cnv_positiveInteger,
((TEXTNS,u'anchor-type'), None): cnv_string,
((TEXTNS,u'animation'), None): cnv_string,
((TEXTNS,u'animation-delay'), None): cnv_string,
((TEXTNS,u'animation-direction'), None): cnv_string,
((TEXTNS,u'animation-repeat'), None): cnv_string,
((TEXTNS,u'animation-start-inside'), None): cnv_boolean,
((TEXTNS,u'animation-steps'), None): cnv_length,
((TEXTNS,u'animation-stop-inside'), None): cnv_boolean,
((TEXTNS,u'annote'), None): cnv_string,
((TEXTNS,u'author'), None): cnv_string,
((TEXTNS,u'bibliography-data-field'), None): cnv_string,
((TEXTNS,u'bibliography-type'), None): cnv_string,
((TEXTNS,u'booktitle'), None): cnv_string,
((TEXTNS,u'bullet-char'), None): cnv_string,
((TEXTNS,u'bullet-relative-size'), None): cnv_string,
((TEXTNS,u'c'), None): cnv_nonNegativeInteger,
((TEXTNS,u'capitalize-entries'), None): cnv_boolean,
((TEXTNS,u'caption-sequence-format'), None): cnv_string,
((TEXTNS,u'caption-sequence-name'), None): cnv_string,
((TEXTNS,u'change-id'), None): cnv_IDREF,
((TEXTNS,u'chapter'), None): cnv_string,
((TEXTNS,u'citation-body-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'citation-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'class-names'), None): cnv_NCNames,
((TEXTNS,u'column-name'), None): cnv_string,
((TEXTNS,u'combine-entries'), None): cnv_boolean,
((TEXTNS,u'combine-entries-with-dash'), None): cnv_boolean,
((TEXTNS,u'combine-entries-with-pp'), None): cnv_boolean,
((TEXTNS,u'comma-separated'), None): cnv_boolean,
((TEXTNS,u'cond-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'condition'), None): cnv_formula,
((TEXTNS,u'connection-name'), None): cnv_string,
((TEXTNS,u'consecutive-numbering'), None): cnv_boolean,
((TEXTNS,u'continue-numbering'), None): cnv_boolean,
((TEXTNS,u'copy-outline-levels'), None): cnv_boolean,
((TEXTNS,u'count-empty-lines'), None): cnv_boolean,
((TEXTNS,u'count-in-text-boxes'), None): cnv_boolean,
((TEXTNS,u'current-value'), None): cnv_boolean,
((TEXTNS,u'custom1'), None): cnv_string,
((TEXTNS,u'custom2'), None): cnv_string,
((TEXTNS,u'custom3'), None): cnv_string,
((TEXTNS,u'custom4'), None): cnv_string,
((TEXTNS,u'custom5'), None): cnv_string,
((TEXTNS,u'database-name'), None): cnv_string,
((TEXTNS,u'date-adjust'), None): cnv_duration,
((TEXTNS,u'date-value'), None): cnv_date,
# ((TEXTNS,u'date-value'), None): cnv_dateTime,
((TEXTNS,u'default-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'description'), None): cnv_string,
((TEXTNS,u'display'), None): cnv_string,
((TEXTNS,u'display-levels'), None): cnv_positiveInteger,
((TEXTNS,u'display-outline-level'), None): cnv_nonNegativeInteger,
((TEXTNS,u'dont-balance-text-columns'), None): cnv_boolean,
((TEXTNS,u'duration'), None): cnv_duration,
((TEXTNS,u'edition'), None): cnv_string,
((TEXTNS,u'editor'), None): cnv_string,
((TEXTNS,u'filter-name'), None): cnv_string,
((TEXTNS,u'first-row-end-column'), None): cnv_string,
((TEXTNS,u'first-row-start-column'), None): cnv_string,
((TEXTNS,u'fixed'), None): cnv_boolean,
((TEXTNS,u'footnotes-position'), None): cnv_string,
((TEXTNS,u'formula'), None): cnv_formula,
((TEXTNS,u'global'), None): cnv_boolean,
((TEXTNS,u'howpublished'), None): cnv_string,
((TEXTNS,u'id'), None): cnv_ID,
# ((TEXTNS,u'id'), None): cnv_string,
((TEXTNS,u'identifier'), None): cnv_string,
((TEXTNS,u'ignore-case'), None): cnv_boolean,
((TEXTNS,u'increment'), None): cnv_nonNegativeInteger,
((TEXTNS,u'index-name'), None): cnv_string,
((TEXTNS,u'index-scope'), None): cnv_string,
((TEXTNS,u'institution'), None): cnv_string,
((TEXTNS,u'is-hidden'), None): cnv_boolean,
((TEXTNS,u'is-list-header'), None): cnv_boolean,
((TEXTNS,u'isbn'), None): cnv_string,
((TEXTNS,u'issn'), None): cnv_string,
((TEXTNS,u'issn'), None): cnv_string,
((TEXTNS,u'journal'), None): cnv_string,
((TEXTNS,u'key'), None): cnv_string,
((TEXTNS,u'key1'), None): cnv_string,
((TEXTNS,u'key1-phonetic'), None): cnv_string,
((TEXTNS,u'key2'), None): cnv_string,
((TEXTNS,u'key2-phonetic'), None): cnv_string,
((TEXTNS,u'kind'), None): cnv_string,
((TEXTNS,u'label'), None): cnv_string,
((TEXTNS,u'last-row-end-column'), None): cnv_string,
((TEXTNS,u'last-row-start-column'), None): cnv_string,
((TEXTNS,u'level'), None): cnv_positiveInteger,
((TEXTNS,u'line-break'), None): cnv_boolean,
((TEXTNS,u'line-number'), None): cnv_string,
((TEXTNS,u'main-entry'), None): cnv_boolean,
((TEXTNS,u'main-entry-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'master-page-name'), None): cnv_StyleNameRef,
((TEXTNS,u'min-label-distance'), None): cnv_string,
((TEXTNS,u'min-label-width'), None): cnv_string,
((TEXTNS,u'month'), None): cnv_string,
((TEXTNS,u'name'), None): cnv_string,
((TEXTNS,u'note-class'), None): cnv_textnoteclass,
((TEXTNS,u'note'), None): cnv_string,
((TEXTNS,u'number'), None): cnv_string,
((TEXTNS,u'number-lines'), None): cnv_boolean,
((TEXTNS,u'number-position'), None): cnv_string,
((TEXTNS,u'numbered-entries'), None): cnv_boolean,
((TEXTNS,u'offset'), None): cnv_string,
((TEXTNS,u'organizations'), None): cnv_string,
((TEXTNS,u'outline-level'), None): cnv_string,
((TEXTNS,u'page-adjust'), None): cnv_integer,
((TEXTNS,u'pages'), None): cnv_string,
((TEXTNS,u'paragraph-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'placeholder-type'), None): cnv_string,
((TEXTNS,u'prefix'), None): cnv_string,
((TEXTNS,u'protected'), None): cnv_boolean,
((TEXTNS,u'protection-key'), None): cnv_string,
((TEXTNS,u'publisher'), None): cnv_string,
((TEXTNS,u'ref-name'), None): cnv_string,
((TEXTNS,u'reference-format'), None): cnv_string,
((TEXTNS,u'relative-tab-stop-position'), None): cnv_boolean,
((TEXTNS,u'report-type'), None): cnv_string,
((TEXTNS,u'restart-numbering'), None): cnv_boolean,
((TEXTNS,u'restart-on-page'), None): cnv_boolean,
((TEXTNS,u'row-number'), None): cnv_nonNegativeInteger,
((TEXTNS,u'school'), None): cnv_string,
((TEXTNS,u'section-name'), None): cnv_string,
((TEXTNS,u'select-page'), None): cnv_string,
((TEXTNS,u'separation-character'), None): cnv_string,
((TEXTNS,u'series'), None): cnv_string,
((TEXTNS,u'sort-algorithm'), None): cnv_string,
((TEXTNS,u'sort-ascending'), None): cnv_boolean,
((TEXTNS,u'sort-by-position'), None): cnv_boolean,
((TEXTNS,u'space-before'), None): cnv_string,
((TEXTNS,u'start-numbering-at'), None): cnv_string,
((TEXTNS,u'start-value'), None): cnv_nonNegativeInteger,
((TEXTNS,u'start-value'), None): cnv_positiveInteger,
((TEXTNS,u'string-value'), None): cnv_string,
((TEXTNS,u'string-value-if-false'), None): cnv_string,
((TEXTNS,u'string-value-if-true'), None): cnv_string,
((TEXTNS,u'string-value-phonetic'), None): cnv_string,
((TEXTNS,u'style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'suffix'), None): cnv_string,
((TEXTNS,u'tab-ref'), None): cnv_nonNegativeInteger,
((TEXTNS,u'table-name'), None): cnv_string,
((TEXTNS,u'table-type'), None): cnv_string,
((TEXTNS,u'time-adjust'), None): cnv_duration,
((TEXTNS,u'time-value'), None): cnv_dateTime,
((TEXTNS,u'time-value'), None): cnv_time,
((TEXTNS,u'title'), None): cnv_string,
((TEXTNS,u'track-changes'), None): cnv_boolean,
((TEXTNS,u'url'), None): cnv_string,
((TEXTNS,u'use-caption'), None): cnv_boolean,
((TEXTNS,u'use-chart-objects'), None): cnv_boolean,
((TEXTNS,u'use-draw-objects'), None): cnv_boolean,
((TEXTNS,u'use-floating-frames'), None): cnv_boolean,
((TEXTNS,u'use-graphics'), None): cnv_boolean,
((TEXTNS,u'use-index-marks'), None): cnv_boolean,
((TEXTNS,u'use-index-source-styles'), None): cnv_boolean,
((TEXTNS,u'use-keys-as-entries'), None): cnv_boolean,
((TEXTNS,u'use-math-objects'), None): cnv_boolean,
((TEXTNS,u'use-objects'), None): cnv_boolean,
((TEXTNS,u'use-other-objects'), None): cnv_boolean,
((TEXTNS,u'use-outline-level'), None): cnv_boolean,
((TEXTNS,u'use-soft-page-breaks'), None): cnv_boolean,
((TEXTNS,u'use-spreadsheet-objects'), None): cnv_boolean,
((TEXTNS,u'use-tables'), None): cnv_boolean,
((TEXTNS,u'value'), None): cnv_nonNegativeInteger,
((TEXTNS,u'visited-style-name'), None): cnv_StyleNameRef,
((TEXTNS,u'volume'), None): cnv_string,
((TEXTNS,u'year'), None): cnv_string,
((XFORMSNS,u'bind'), None): cnv_string,
((XLINKNS,u'actuate'), None): cnv_string,
((XLINKNS,u'href'), None): cnv_anyURI,
((XLINKNS,u'show'), None): cnv_xlinkshow,
((XLINKNS,u'title'), None): cnv_string,
((XLINKNS,u'type'), None): cnv_string,
}
class AttrConverters:
def convert(self, attribute, value, element):
""" Based on the element, figures out how to check/convert the attribute value
All values are converted to string
"""
conversion = attrconverters.get((attribute, element.qname), None)
if conversion is not None:
return conversion(attribute, value, element)
else:
conversion = attrconverters.get((attribute, None), None)
if conversion is not None:
return conversion(attribute, value, element)
return unicode(value)
| bsd-3-clause |
dhanunjaya/neutron | neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py | 3 | 87886 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import signal
import sys
import time
import uuid
import functools
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
UINT64_BITMASK = (1 << 64) - 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
def has_zero_prefixlen_address(ip_addresses):
return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses)
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.conf = conf or cfg.CONF
# init bridge classes with configured datapath type.
self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
functools.partial(bridge_classes[b],
datapath_type=self.conf.OVS.datapath_type)
for b in ('br_int', 'br_phys', 'br_tun'))
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.use_call = True
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
# TODO(ethuleau): Change ARP responder so it's not dependent on the
# ML2 l2 population mechanism driver.
self.enable_distributed_routing = enable_distributed_routing
self.arp_responder_enabled = arp_responder and self.l2_pop
self.prevent_arp_spoofing = prevent_arp_spoofing
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': self.conf.host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop,
'arp_responder_enabled':
self.arp_responder_enabled,
'enable_distributed_routing':
self.enable_distributed_routing,
'log_agent_heartbeats':
self.conf.AGENT.log_agent_heartbeats},
'agent_type': self.conf.AGENT.agent_type,
'start_flag': True}
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
# Validate agent configurations
self._check_agent_configurations()
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK
self.int_br = self.br_int_cls(integ_br)
self.setup_integration_br()
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
self.network_ports = collections.defaultdict(set)
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
self.init_extension_manager(self.connection)
self.bridge_mappings = bridge_mappings
self.setup_physical_bridges(self.bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {p_const.TYPE_GENEVE: {},
p_const.TYPE_GRE: {},
p_const.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port
self.dont_fragment = self.conf.AGENT.dont_fragment
self.tunnel_csum = cfg.CONF.AGENT.tunnel_csum
self.tun_br = None
self.patch_int_ofport = constants.OFPORT_INVALID
self.patch_tun_ofport = constants.OFPORT_INVALID
if self.enable_tunneling:
# The patch_int_ofport and patch_tun_ofport are updated
# here inside the call to setup_tunnel_br()
self.setup_tunnel_br(tun_br)
self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
self.context,
self.dvr_plugin_rpc,
self.int_br,
self.tun_br,
self.bridge_mappings,
self.phys_brs,
self.int_ofports,
self.phys_ofports,
self.patch_int_ofport,
self.patch_tun_ofport,
self.conf.host,
self.enable_tunneling,
self.enable_distributed_routing)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
if self.enable_tunneling:
self.setup_tunnel_br_flows()
self.dvr_agent.setup_dvr_flows()
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# In order to keep existed device's local vlan unchanged,
# restore local vlan mapping at start
self._restore_local_vlan_map()
# Security group agent support
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.local_vlan_map,
defer_refresh_firewall=True)
# Initialize iteration counter
self.iter_num = 0
self.run_daemon_loop = True
self.catch_sigterm = False
self.catch_sighup = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.quitting_rpc_timeout = quitting_rpc_timeout
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
self.agent_state.get('configurations')['in_distributed_mode'] = (
self.dvr_agent.in_distributed_mode())
try:
self.state_rpc.report_state(self.context,
self.agent_state,
self.use_call)
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _restore_local_vlan_map(self):
cur_ports = self.int_br.get_vif_ports()
port_names = [p.port_name for p in cur_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "other_config", "tag"], ports=port_names)
by_name = {x['name']: x for x in port_info}
for port in cur_ports:
# if a port was deleted between get_vif_ports and
# get_ports_attributes, we
# will get a KeyError
try:
local_vlan_map = by_name[port.port_name]['other_config']
local_vlan = by_name[port.port_name]['tag']
except KeyError:
continue
if not local_vlan:
continue
net_uuid = local_vlan_map.get('net_uuid')
if (net_uuid and net_uuid not in self.local_vlan_map
and local_vlan != DEAD_VLAN_TAG):
self.provision_local_vlan(local_vlan_map['net_uuid'],
local_vlan_map['network_type'],
local_vlan_map['physical_network'],
int(local_vlan_map[
'segmentation_id']),
local_vlan)
def setup_rpc(self):
self.agent_id = 'ovs-agent-%s' % self.conf.host
self.topic = topics.AGENT
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
self.context = context.get_admin_context_without_session()
# Handle updates from service
self.endpoints = [self]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION, topics.UPDATE])
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
def init_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
self.ext_manager = (
ext_manager.AgentExtensionsManager(self.conf))
self.ext_manager.initialize(
connection, constants.EXTENSION_DRIVER_TYPE)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
if vif_id in vlan_mapping.vif_ports:
return network_id
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
deleted_ports = list(self.deleted_ports)
while self.deleted_ports:
port_id = self.deleted_ports.pop()
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
# move to dead VLAN so deleted ports no
# longer have access to the network
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
# Flush firewall rules after ports are put on dead VLAN to be
# more secure
self.sg_agent.remove_devices_filter(deleted_ports)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
if not tunnel_ip_hex:
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunnelling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip, self.local_vlan_map)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
ip = netaddr.IPAddress(ip_address)
if ip.version == 6:
return
ip = str(ip)
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning(_LW('Action %s not supported'), action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id, local_vlan=None):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local', 'geneve')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
lvm = self.local_vlan_map.get(net_uuid)
if lvm:
lvid = lvm.vlan
else:
if local_vlan in self.available_local_vlans:
lvid = local_vlan
self.available_local_vlans.remove(local_vlan)
else:
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"),
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
network_type,
physical_network,
segmentation_id)
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = list(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == p_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error(_LE("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error(_LE("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == p_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == p_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
ovs_restarted):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param ovs_restarted: indicates if this is called for an OVS restart.
'''
if net_uuid not in self.local_vlan_map or ovs_restarted:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': physical_network,
'segmentation_id': segmentation_id}
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
def _bind_devices(self, need_binding_ports):
devices_up = []
devices_down = []
port_names = [p['vif_port'].port_name for p in need_binding_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "tag"], ports=port_names, if_exists=True)
tags_by_name = {x['name']: x['tag'] for x in port_info}
for port_detail in need_binding_ports:
lvm = self.local_vlan_map.get(port_detail['network_id'])
if not lvm:
# network for port was deleted. skip this port since it
# will need to be handled as a DEAD port in the next scan
continue
port = port_detail['vif_port']
device = port_detail['device']
# Do not bind a port if it's already bound
cur_tag = tags_by_name.get(port.port_name)
if cur_tag is None:
LOG.info(_LI("Port %s was deleted concurrently, skipping it"),
port.port_name)
continue
if cur_tag != lvm.vlan:
self.int_br.delete_flows(in_port=port.ofport)
if self.prevent_arp_spoofing:
self.setup_arp_spoofing_protection(self.int_br,
port, port_detail)
if cur_tag != lvm.vlan:
self.int_br.set_db_attribute(
"Port", port.port_name, "tag", lvm.vlan)
# update plugin about port status
# FIXME(salv-orlando): Failures while updating device status
# must be handled appropriately. Otherwise this might prevent
# neutron server from sending network-vif-* events to the nova
# API server, thus possibly preventing instance spawn.
if port_detail.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
devices_up.append(device)
else:
LOG.debug("Setting status for %s to DOWN", device)
devices_down.append(device)
failed_devices = []
if devices_up or devices_down:
devices_set = self.plugin_rpc.update_device_list(
self.context, devices_up, devices_down, self.agent_id,
self.conf.host)
failed_devices = (devices_set.get('failed_devices_up') +
devices_set.get('failed_devices_down'))
if failed_devices:
LOG.error(_LE("Configuration for devices %s failed!"),
failed_devices)
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=failed_devices)
LOG.info(_LI("Configuration for devices up %(up)s and devices "
"down %(down)s completed."),
{'up': devices_up, 'down': devices_down})
@staticmethod
def setup_arp_spoofing_protection(bridge, vif, port_details):
# clear any previous flows related to this port in our ARP table
bridge.delete_arp_spoofing_protection(port=vif.ofport)
if not port_details.get('port_security_enabled', True):
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
"it has port security disabled"), vif.port_name)
return
if port_details['device_owner'].startswith('network:'):
LOG.debug("Skipping ARP spoofing rules for network owned port "
"'%s'.", vif.port_name)
return
# collect all of the addresses and cidrs that belong to the port
addresses = {f['ip_address'] for f in port_details['fixed_ips']}
mac_addresses = {vif.vif_mac}
if port_details.get('allowed_address_pairs'):
addresses |= {p['ip_address']
for p in port_details['allowed_address_pairs']}
mac_addresses |= {p['mac_address']
for p in port_details['allowed_address_pairs']
if p.get('mac_address')}
ipv6_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 6}
# Allow neighbor advertisements for LLA address.
ipv6_addresses |= {str(ipv6.get_ipv6_addr_by_EUI64(
n_const.IPV6_LLA_PREFIX, mac))
for mac in mac_addresses}
if not has_zero_prefixlen_address(ipv6_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
# match on /1 or more.
bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport,
ip_addresses=ipv6_addresses)
ipv4_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
if not has_zero_prefixlen_address(ipv4_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the ARP_SPA can only
# match on /1 or more.
bridge.install_arp_spoofing_protection(port=vif.ofport,
ip_addresses=ipv4_addresses)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if vif_id in lvm.vif_ports:
vif_port = lvm.vif_ports[vif_id]
self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port, log_errors=True):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
# Don't kill a port if it's already dead
cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
log_errors=log_errors)
if cur_tag != DEAD_VLAN_TAG:
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG, log_errors=log_errors)
self.int_br.drop_port(in_port=port.ofport)
def setup_integration_br(self):
'''Setup the integration bridge.
'''
self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
# Ensure the integration bridge is created.
# ovs_lib.OVSBridge.create() will run
# ovs-vsctl -- --may-exist add-br BRIDGE_NAME
# which does nothing if bridge already exists.
self.int_br.create()
self.int_br.set_secure_mode()
self.int_br.setup_controllers(self.conf)
self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
self.int_br.setup_default_table()
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
if bridge_id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge)
LOG.info(_LI('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br_name=None):
'''(re)initialize the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br_name: the name of the tunnel bridge.
'''
if not self.tun_br:
self.tun_br = self.br_tun_cls(tun_br_name)
self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
if not self.tun_br.bridge_exists(self.tun_br.br_name):
self.tun_br.create(secure_mode=True)
self.tun_br.setup_controllers(self.conf)
if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or
self.patch_tun_ofport == ovs_lib.INVALID_OFPORT):
self.patch_tun_ofport = self.int_br.add_patch_port(
self.conf.OVS.int_peer_patch_port,
self.conf.OVS.tun_peer_patch_port)
if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or
self.patch_int_ofport == ovs_lib.INVALID_OFPORT):
self.patch_int_ofport = self.tun_br.add_patch_port(
self.conf.OVS.tun_peer_patch_port,
self.conf.OVS.int_peer_patch_port)
if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
self.patch_int_ofport):
LOG.error(_LE("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this "
"version of OVS does not support tunnels or patch "
"ports. Agent terminated!"))
exit(1)
if self.conf.AGENT.drop_flows_on_start:
self.tun_br.delete_flows()
def setup_tunnel_br_flows(self):
'''Setup the tunnel bridge.
Add all flows to the tunnel bridge.
'''
self.tun_br.setup_default_table(self.patch_int_ofport,
self.arp_responder_enabled)
def get_peer_name(self, prefix, name):
"""Construct a peer name based on the prefix and name.
The peer name can not exceed the maximum length allowed for a linux
device. Longer names are hashed to help ensure uniqueness.
"""
if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN:
return prefix + name
# We can't just truncate because bridges may be distinguished
# by an ident at the end. A hash over the name should be unique.
# Leave part of the bridge name on for easier identification
hashlen = 6
namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen
if isinstance(name, six.text_type):
hashed_name = hashlib.sha1(name.encode('utf-8'))
else:
hashed_name = hashlib.sha1(name)
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
{'prefix': prefix, 'truncated': name[0:namelen],
'hash': hashed_name.hexdigest()[0:hashlen]})
LOG.warning(_LW("Creating an interface named %(name)s exceeds the "
"%(limit)d character limitation. It was shortened to "
"%(new_name)s to fit."),
{'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN,
'new_name': new_name})
return new_name
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths or patch ports.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper()
ovs = ovs_lib.BaseOVS()
ovs_bridges = ovs.get_bridges()
for physical_network, bridge in six.iteritems(bridge_mappings):
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if bridge not in ovs_bridges:
LOG.error(_LE("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = self.br_phys_cls(bridge)
br.setup_controllers(self.conf)
br.setup_default_table()
self.phys_brs[physical_network] = br
# interconnect physical and integration bridges using veth/patchs
int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX,
bridge)
phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX,
bridge)
# Interface type of port for physical and integration bridges must
# be same, so check only one of them.
int_type = self.int_br.db_get_val("Interface", int_if_name, "type")
if self.use_veth_interconnection:
# Drop ports if the interface types doesn't match the
# configuration value.
if int_type == 'patch':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
if ip_lib.device_exists(int_if_name):
ip_lib.IPDevice(int_if_name).link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
phys_if_name)
int_ofport = self.int_br.add_port(int_veth)
phys_ofport = br.add_port(phys_veth)
else:
# Drop ports if the interface type doesn't match the
# configuration value
if int_type == 'veth':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
# Create patch ports without associating them in order to block
# untranslated traffic before association
int_ofport = self.int_br.add_patch_port(
int_if_name, constants.NONEXISTENT_PEER)
phys_ofport = br.add_patch_port(
phys_if_name, constants.NONEXISTENT_PEER)
self.int_ofports[physical_network] = int_ofport
self.phys_ofports[physical_network] = phys_ofport
# block all untranslated traffic between bridges
self.int_br.drop_port(in_port=int_ofport)
br.drop_port(in_port=phys_ofport)
if self.use_veth_interconnection:
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
else:
# associate patch ports to pass traffic
self.int_br.set_db_attribute('Interface', int_if_name,
'options:peer', phys_if_name)
br.set_db_attribute('Interface', phys_if_name,
'options:peer', int_if_name)
def update_stale_ofport_rules(self):
# right now the ARP spoofing rules are the only thing that utilizes
# ofport-based rules, so make arp_spoofing protection a conditional
# until something else uses ofport
if not self.prevent_arp_spoofing:
return []
previous = self.vifname_to_ofport_map
current = self.int_br.get_vif_port_to_ofport_map()
# if any ofport numbers have changed, re-process the devices as
# added ports so any rules based on ofport numbers are updated.
moved_ports = self._get_ofport_moves(current, previous)
# delete any stale rules based on removed ofports
ofports_deleted = set(previous.values()) - set(current.values())
for ofport in ofports_deleted:
self.int_br.delete_arp_spoofing_protection(port=ofport)
# store map for next iteration
self.vifname_to_ofport_map = current
return moved_ports
@staticmethod
def _get_ofport_moves(current, previous):
"""Returns a list of moved ports.
Takes two port->ofport maps and returns a list ports that moved to a
different ofport. Deleted ports are not included.
"""
port_moves = []
for name, ofport in previous.items():
if name not in current:
continue
current_ofport = current[name]
if ofport != current_ofport:
port_moves.append(name)
return port_moves
def _get_port_info(self, registered_ports, cur_ports,
readd_registered_ports):
port_info = {'current': cur_ports}
# FIXME(salv-orlando): It's not really necessary to return early
# if nothing has changed.
if not readd_registered_ports and cur_ports == registered_ports:
return port_info
if readd_registered_ports:
port_info['added'] = cur_ports
else:
port_info['added'] = cur_ports - registered_ports
# Update port_info with ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def scan_ports(self, registered_ports, sync, updated_ports=None):
cur_ports = self.int_br.get_vif_port_set()
self.int_br_device_count = len(cur_ports)
port_info = self._get_port_info(registered_ports, cur_ports, sync)
if updated_ports is None:
updated_ports = set()
updated_ports.update(self.check_changed_vlans())
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
return port_info
def scan_ancillary_ports(self, registered_ports, sync):
cur_ports = set()
for bridge in self.ancillary_brs:
cur_ports |= bridge.get_vif_port_set()
return self._get_port_info(registered_ports, cur_ports, sync)
def check_changed_vlans(self):
"""Return ports which have lost their vlan tag.
The returned value is a set of port ids of the ports concerned by a
vlan tag loss.
"""
port_tags = self.int_br.get_port_tag_dict()
changed_ports = set()
for lvm in self.local_vlan_map.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags
and port_tags[port.port_name] != lvm.vlan
):
LOG.info(
_LI("Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'!"),
{'port_name': port.port_name,
'vlan_tag': lvm.vlan}
)
changed_ports.add(port.vif_id)
return changed_ports
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up,
fixed_ips, device_owner, ovs_restarted):
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
port_needs_binding = True
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id)
if vif_port:
if admin_state_up:
self.port_bound(vif_port, network_id, network_type,
physical_network, segmentation_id,
fixed_ips, device_owner, ovs_restarted)
else:
self.port_dead(vif_port)
port_needs_binding = False
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
return port_needs_binding
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment,
self.tunnel_csum)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunnelling table (lvid will be set in the latter)
br.setup_tunnel_port(tunnel_type, ofport)
ofports = self.tun_br_ofports[tunnel_type].values()
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for vlan_mapping in list(self.local_vlan_map.values()):
if vlan_mapping.network_type == tunnel_type:
br.install_flood_to_tun(vlan_mapping.vlan,
vlan_mapping.segmentation_id,
ofports)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
return 0
port_name = '%s-%s' % (network_type, remote_ip_hex)
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
items = list(self.tun_br_ofports[tunnel_type].items())
for remote_ip, ofport in items:
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type,
self.get_ip_in_hex(remote_ip))
br.delete_port(port_name)
br.cleanup_tunnel_port(ofport)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added_or_updated(self, devices, ovs_restarted):
skipped_devices = []
need_binding_devices = []
security_disabled_devices = []
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices = devices_details_list.get('devices')
vif_by_id = self.int_br.get_vifs_by_ids(
[vif['device'] for vif in devices])
for details in devices:
device = details['device']
LOG.debug("Processing port: %s", device)
port = vif_by_id.get(device)
if not port:
# The port disappeared and cannot be processed
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
skipped_devices.append(device)
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
details['vif_port'] = port
need_binding = self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'],
details['fixed_ips'],
details['device_owner'],
ovs_restarted)
if need_binding:
need_binding_devices.append(details)
port_security = details['port_security_enabled']
has_sgs = 'security_groups' in details
if not port_security or not has_sgs:
security_disabled_devices.append(device)
self._update_port_network(details['port_id'],
details['network_id'])
self.ext_manager.handle_port(self.context, details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return skipped_devices, need_binding_devices, security_disabled_devices
def _update_port_network(self, port_id, network_id):
self._clean_network_ports(port_id)
self.network_ports[network_id].add(port_id)
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices_added = [
d['device'] for d in devices_details_list.get('devices')]
LOG.info(_LI("Ancillary Ports %s added"), devices_added)
# update plugin about port status
devices_set_up = (
self.plugin_rpc.update_device_list(self.context,
devices_added,
[],
self.agent_id,
self.conf.host))
if devices_set_up.get('failed_devices_up'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError()
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
LOG.info(_LI("Ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for device in devices:
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
LOG.info(_LI("Ancillary ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for detail in devices_down.get('devices_down'):
if detail['exists']:
LOG.info(_LI("Port %s updated."), detail['device'])
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", detail['device'])
return resync
def process_network_ports(self, port_info, ovs_restarted):
resync_a = False
resync_b = False
# TODO(salv-orlando): consider a solution for ensuring notifications
# are processed exactly in the same order in which they were
# received. This is tricky because there are two notification
# sources: the neutron server, and the ovs db monitor process
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
need_binding_devices = []
security_disabled_ports = []
if devices_added_updated:
start = time.time()
try:
(skipped_devices, need_binding_devices,
security_disabled_ports) = (
self.treat_devices_added_or_updated(
devices_added_updated, ovs_restarted))
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d devices of "
"%(num_current)d devices currently available. "
"Time elapsed: %(elapsed).3f",
{'iter_num': self.iter_num,
'num_skipped': len(skipped_devices),
'num_current': len(port_info['current']),
'elapsed': time.time() - start})
# Update the list of current ports storing only those which
# have been actually processed.
port_info['current'] = (port_info['current'] -
set(skipped_devices))
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_network_ports - iteration:%d - "
"failure while retrieving port details "
"from server"), self.iter_num)
resync_a = True
# TODO(salv-orlando): Optimize avoiding applying filters
# unnecessarily, (eg: when there are no IP address changes)
added_ports = port_info.get('added', set())
if security_disabled_ports:
added_ports -= set(security_disabled_ports)
self.sg_agent.setup_port_filters(added_ports,
port_info.get('updated', set()))
self._bind_devices(need_binding_devices)
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info and port_info['added']:
start = time.time()
try:
self.treat_ancillary_devices_added(port_info['added'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_ancillary_network_ports - "
"iteration:%d - failure while retrieving "
"port details from server"), self.iter_num)
resync_a = True
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def get_ip_in_hex(self, ip_address):
try:
return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
return
def tunnel_sync(self):
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type,
self.conf.host)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
remote_ip = tunnel['ip_address']
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
continue
tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
self._setup_tunnel_port(self.tun_br,
tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
return True
return False
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.updated_ports or
self.deleted_ports or
self.sg_agent.firewall_refresh_needed())
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def check_ovs_status(self):
# Check for the canary flow
status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports."))
elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically."))
return status
def loop_count_and_wait(self, start_time, port_stats):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def get_port_stats(self, port_info, ancillary_port_info):
port_stats = {
'regular': {
'added': len(port_info.get('added', [])),
'updated': len(port_info.get('updated', [])),
'removed': len(port_info.get('removed', []))}}
if self.ancillary_brs:
port_stats['ancillary'] = {
'added': len(ancillary_port_info.get('added', [])),
'removed': len(ancillary_port_info.get('removed', []))}
return port_stats
def cleanup_stale_flows(self):
if self.iter_num == 0:
bridges = [self.int_br]
if self.enable_tunneling:
bridges.append(self.tun_br)
for bridge in bridges:
LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name)
bridge.cleanup_flows()
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
sync = True
ports = set()
updated_ports_copy = set()
ancillary_ports = set()
tunnel_sync = True
ovs_restarted = False
consecutive_resyncs = 0
while self._check_and_handle_signal():
port_info = {}
ancillary_port_info = {}
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
polling_manager.force_polling()
consecutive_resyncs = consecutive_resyncs + 1
if consecutive_resyncs >= constants.MAX_DEVICE_RETRIES:
LOG.warn(_LW("Clearing cache of registered ports, retrials"
" to resync were > %s"),
constants.MAX_DEVICE_RETRIES)
ports.clear()
ancillary_ports.clear()
sync = False
consecutive_resyncs = 0
else:
consecutive_resyncs = 0
ovs_status = self.check_ovs_status()
if ovs_status == constants.OVS_RESTARTED:
self.setup_integration_br()
self.setup_physical_bridges(self.bridge_mappings)
if self.enable_tunneling:
self.setup_tunnel_br()
self.setup_tunnel_br_flows()
tunnel_sync = True
if self.enable_distributed_routing:
self.dvr_agent.reset_ovs_parameters(self.int_br,
self.tun_br,
self.patch_int_ofport,
self.patch_tun_ofport)
self.dvr_agent.reset_dvr_parameters()
self.dvr_agent.setup_dvr_flows()
elif ovs_status == constants.OVS_DEAD:
# Agent doesn't apply any operations when ovs is dead, to
# prevent unexpected failure or crash. Sleep and continue
# loop in which ovs status will be checked periodically.
port_stats = self.get_port_stats({}, {})
self.loop_count_and_wait(start, port_stats)
continue
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
if self._agent_has_updates(polling_manager) or ovs_restarted:
try:
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
reg_ports = (set() if ovs_restarted else ports)
port_info = self.scan_ports(reg_ports, sync,
updated_ports_copy)
self.process_deleted_ports(port_info)
ofport_changed_ports = self.update_stale_ofport_rules()
if ofport_changed_ports:
port_info.setdefault('updated', set()).update(
ofport_changed_ports)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Treat ancillary devices if they exist
if self.ancillary_brs:
ancillary_port_info = self.scan_ancillary_ports(
ancillary_ports, sync)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
sync = False
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed() or
ovs_restarted):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info,
ovs_restarted)
self.cleanup_stale_flows()
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
if self.ancillary_brs:
sync |= self.process_ancillary_network_ports(
ancillary_port_info)
LOG.debug("Agent rpc_loop - iteration: "
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = ancillary_port_info['current']
polling_manager.polling_completed()
# Keep this flag in the last line of "try" block,
# so we can sure that no other Exception occurred.
if not sync:
ovs_restarted = False
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
port_stats = self.get_port_stats(port_info, ancillary_port_info)
self.loop_count_and_wait(start, port_stats)
def daemon_loop(self):
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
signal.signal(signal.SIGTERM, self._handle_sigterm)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_sighup)
with polling.get_polling_manager(
self.minimize_polling,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def _handle_sigterm(self, signum, frame):
self.catch_sigterm = True
if self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
def _handle_sighup(self, signum, frame):
self.catch_sighup = True
def _check_and_handle_signal(self):
if self.catch_sigterm:
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.run_daemon_loop = False
self.catch_sigterm = False
if self.catch_sighup:
LOG.info(_LI("Agent caught SIGHUP, resetting."))
self.conf.reload_config_files()
config.setup_logging()
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, logging.DEBUG)
self.catch_sighup = False
return self.run_daemon_loop
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.dvr_plugin_rpc, self.state_rpc):
rpc_api.client.timeout = timeout
def _check_agent_configurations(self):
if (self.enable_distributed_routing and self.enable_tunneling
and not self.l2_pop):
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
"underlays require L2-pop to be enabled, "
"in both the Agent and Server side."))
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
enable_distributed_routing=config.AGENT.enable_distributed_routing,
l2_population=config.AGENT.l2_population,
arp_responder=config.AGENT.arp_responder,
prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing,
use_veth_interconnection=config.OVS.use_veth_interconnection,
quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specified: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def validate_local_ip(local_ip):
"""If tunneling is enabled, verify if the ip exists on the agent's host."""
if not cfg.CONF.AGENT.tunnel_types:
return
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
prepare_xen_compute()
validate_local_ip(agent_config['local_ip'])
try:
agent = OVSNeutronAgent(bridge_classes, **agent_config)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
| apache-2.0 |
blaggacao/OpenUpgrade | addons/stock/partner.py | 375 | 1766 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'property_stock_customer': fields.property(
type='many2one',
relation='stock.location',
string="Customer Location",
help="This stock location will be used, instead of the default one, as the destination location for goods you send to this partner"),
'property_stock_supplier': fields.property(
type='many2one',
relation='stock.location',
string="Supplier Location",
help="This stock location will be used, instead of the default one, as the source location for goods you receive from the current partner"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nelsonsar/ansible | lib/ansible/plugins/action/fetch.py | 28 | 7549 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pwd
import random
import traceback
import tempfile
import base64
from ansible import constants as C
from ansible.errors import *
from ansible.plugins.action import ActionBase
from ansible.utils.boolean import boolean
from ansible.utils.hashing import checksum, checksum_s, md5, secure_hash
from ansible.utils.path import makedirs_safe
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=dict()):
''' handler for fetch operations '''
if self._play_context.check_mode:
return dict(skipped=True, msg='check mode not (yet) supported for this module')
source = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
flat = boolean(self._task.args.get('flat'))
fail_on_missing = boolean(self._task.args.get('fail_on_missing'))
validate_checksum = boolean(self._task.args.get('validate_checksum', self._task.args.get('validate_md5')))
if 'validate_md5' in self._task.args and 'validate_checksum' in self._task.args:
return dict(failed=True, msg="validate_checksum and validate_md5 cannot both be specified")
if source is None or dest is None:
return dict(failed=True, msg="src and dest are required")
source = self._connection._shell.join_path(source)
source = self._remote_expand_user(source)
# calculate checksum for the remote file
remote_checksum = self._remote_checksum(source, all_vars=task_vars)
# use slurp if sudo and permissions are lacking
remote_data = None
if remote_checksum in ('1', '2') or self._play_context.become:
slurpres = self._execute_module(module_name='slurp', module_args=dict(src=source), task_vars=task_vars, tmp=tmp)
if slurpres.get('failed'):
if remote_checksum == '1' and not fail_on_missing:
return dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
return slurpres
else:
if slurpres['encoding'] == 'base64':
remote_data = base64.b64decode(slurpres['content'])
if remote_data is not None:
remote_checksum = checksum_s(remote_data)
# the source path may have been expanded on the
# target system, so we compare it here and use the
# expanded version if it's different
remote_source = slurpres.get('source')
if remote_source and remote_source != source:
source = remote_source
# calculate the destination name
if os.path.sep not in self._connection._shell.join_path('a', ''):
source = self._connection._shell._unquote(source)
source_local = source.replace('\\', '/')
else:
source_local = source
dest = os.path.expanduser(dest)
if flat:
if dest.endswith(os.sep):
# if the path ends with "/", we'll use the source filename as the
# destination filename
base = os.path.basename(source_local)
dest = os.path.join(dest, base)
if not dest.startswith("/"):
# if dest does not start with "/", we'll assume a relative path
dest = self._loader.path_dwim(dest)
else:
# files are saved in dest dir, with a subdir for each host, then the filename
if 'inventory_hostname' in task_vars:
target_name = task_vars['inventory_hostname']
else:
target_name = self._play_context.remote_addr
dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local)
dest = dest.replace("//","/")
if remote_checksum in ('0', '1', '2', '3', '4'):
# these don't fail because you may want to transfer a log file that possibly MAY exist
# but keep going to fetch other log files
if remote_checksum == '0':
result = dict(msg="unable to calculate the checksum of the remote file", file=source, changed=False)
elif remote_checksum == '1':
if fail_on_missing:
result = dict(failed=True, msg="the remote file does not exist", file=source)
else:
result = dict(msg="the remote file does not exist, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '2':
result = dict(msg="no read permission on remote file, not transferring, ignored", file=source, changed=False)
elif remote_checksum == '3':
result = dict(msg="remote file is a directory, fetch cannot work on directories", file=source, changed=False)
elif remote_checksum == '4':
result = dict(msg="python isn't present on the system. Unable to compute checksum", file=source, changed=False)
return result
# calculate checksum for the local file
local_checksum = checksum(dest)
if remote_checksum != local_checksum:
# create the containing directories, if needed
makedirs_safe(os.path.dirname(dest))
# fetch the file and check for changes
if remote_data is None:
self._connection.fetch_file(source, dest)
else:
try:
f = open(dest, 'w')
f.write(remote_data)
f.close()
except (IOError, OSError) as e:
raise AnsibleError("Failed to fetch the file: %s" % e)
new_checksum = secure_hash(dest)
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
new_md5 = md5(dest)
except ValueError:
new_md5 = None
if validate_checksum and new_checksum != remote_checksum:
return dict(failed=True, md5sum=new_md5, msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
return dict(changed=True, md5sum=new_md5, dest=dest, remote_md5sum=None, checksum=new_checksum, remote_checksum=remote_checksum)
else:
# For backwards compatibility. We'll return None on FIPS enabled
# systems
try:
local_md5 = md5(dest)
except ValueError:
local_md5 = None
return dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)
| gpl-3.0 |
Wyliodrin/wyliodrin-server | tests/debugging/sim_board.py | 1 | 3409 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Board
"""
import getpass
import logging
import os
import signal
import sleekxmpp
import ssl
import sys
import threading
import time
from sleekxmpp import Message, Presence
from sleekxmpp.xmlstream import ElementBase
from sleekxmpp.xmlstream import register_stanza_plugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
JID = "wyliodrin_board@wyliodrin.org"
PASS = "wyliodrin"
MESSAGE = None
gdb_commands_pipe_name = "/tmp/gdb_commands"
gdb_results_pipe_name = "/tmp/gdb_results"
class W(ElementBase):
"""
<w xmlns="wyliodrin" d="<msgpack_data>"/>
"""
name = 'w'
namespace = 'wyliodrin'
plugin_attrib = 'w'
interfaces = set(('d',))
class SimBoard(sleekxmpp.ClientXMPP):
def __init__(self, jid, password, pipeout):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.pipeout = pipeout
self.add_event_handler("session_start", self.start, threaded=False)
self.register_handler(
Callback('Some custom message',
StanzaPath('message/w'),
self._handle_action))
self.add_event_handler('custom_action',
self._handle_action_event,
threaded=True)
register_stanza_plugin(Message, W)
def start(self, event):
global MESSAGE
# Send priority
prio = self.Presence()
prio['lang'] = None
prio['to'] = None
prio['priority'] = '50'
prio.send()
# Save message
MESSAGE = self.Message()
MESSAGE['lang'] = None
MESSAGE['to'] = "wyliodrin_test@wyliodrin.org"
def _handle_action(self, msg):
self.event('custom_action', msg)
def _handle_action_event(self, msg):
self.pipeout.write(msg['w']['d'])
self.pipeout.flush()
class Listener(threading.Thread):
def __init__(self, pipein):
threading.Thread.__init__(self)
self.pipein = pipein
def run(self):
global MESSAGE
while True:
# Get result
content = os.read(self.pipein.fileno(), 3 * 1024).decode("utf-8")
MESSAGE['w']['d'] = content
MESSAGE.send()
if __name__ == '__main__':
# Create the commands and results pipes
if not os.path.exists(gdb_commands_pipe_name):
os.mkfifo(gdb_commands_pipe_name)
if not os.path.exists(gdb_results_pipe_name):
os.mkfifo(gdb_results_pipe_name)
# Open pipes
gdb_commands_pipe_fd = open(gdb_commands_pipe_name, 'w')
gdb_results_pipe_fd = open(gdb_results_pipe_name, 'r')
listener = Listener(gdb_results_pipe_fd)
listener.start()
# Setup logging.
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
xmpp = SimBoard(JID, PASS, gdb_commands_pipe_fd)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # XMPP Ping
xmpp.ssl_version = ssl.PROTOCOL_SSLv3
xmpp.auto_authorize = True
xmpp.auto_subscribe = True
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| lgpl-3.0 |
350dotorg/Django | django/contrib/gis/tests/geoapp/test_regress.py | 258 | 1500 | import os, unittest
from django.contrib.gis.tests.utils import no_mysql, no_oracle, no_postgis, no_spatialite
from django.contrib.gis.shortcuts import render_to_kmz
from models import City
class GeoRegressionTests(unittest.TestCase):
def test01_update(self):
"Testing GeoQuerySet.update(), see #10411."
pnt = City.objects.get(name='Pueblo').point
bak = pnt.clone()
pnt.y += 0.005
pnt.x += 0.005
City.objects.filter(name='Pueblo').update(point=pnt)
self.assertEqual(pnt, City.objects.get(name='Pueblo').point)
City.objects.filter(name='Pueblo').update(point=bak)
self.assertEqual(bak, City.objects.get(name='Pueblo').point)
def test02_kmz(self):
"Testing `render_to_kmz` with non-ASCII data, see #11624."
name = '\xc3\x85land Islands'.decode('iso-8859-1')
places = [{'name' : name,
'description' : name,
'kml' : '<Point><coordinates>5.0,23.0</coordinates></Point>'
}]
kmz = render_to_kmz('gis/kml/placemarks.kml', {'places' : places})
@no_spatialite
@no_mysql
def test03_extent(self):
"Testing `extent` on a table with a single point, see #11827."
pnt = City.objects.get(name='Pueblo').point
ref_ext = (pnt.x, pnt.y, pnt.x, pnt.y)
extent = City.objects.filter(name='Pueblo').extent()
for ref_val, val in zip(ref_ext, extent):
self.assertAlmostEqual(ref_val, val, 4)
| bsd-3-clause |
JimCircadian/ansible | lib/ansible/modules/storage/purestorage/purefa_pgsnap.py | 26 | 4976 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_pgsnap
version_added: '2.6'
short_description: Manage protection group snapshots on Pure Storage FlashArrays
description:
- Create or delete protection group snapshots on Pure Storage FlashArray.
author:
- Simon Dodsley (@sdodsley)
options:
name:
description:
- The name of the source protection group.
required: true
suffix:
description:
- Suffix of snapshot name.
state:
description:
- Define whether the protection group snapshot should exist or not.
choices: [ absent, present ]
default: present
eradicate:
description:
- Define whether to eradicate the snapshot on delete or leave in trash.
type: bool
default: 'no'
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Create protection group snapshot foo.ansible
purefa_pgsnap:
name: foo
suffix: ansible
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Delete and eradicate protection group snapshot named foo.snap
purefa_pgsnap:
name: foo
suffix: snap
eradicate: true
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
from datetime import datetime
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_pgroup(module, array):
"""Return Protection Group or None"""
try:
return array.get_pgroup(module.params['name'])
except:
return None
def get_pgsnapshot(module, array):
"""Return Snapshot or None"""
try:
snapname = module.params['name'] + "." + module.params['suffix']
for s in array.get_pgroup(module.params['name'], snap='true'):
if s['name'] == snapname:
return snapname
except:
return None
def create_pgsnapshot(module, array):
"""Create Protection Group Snapshot"""
if not module.check_mode:
try:
array.create_pgroup_snapshot(source=module.params['name'],
suffix=module.params['suffix'],
snap=True,
apply_retention=True)
changed = False
except:
changed = False
module.exit_json(changed=changed)
def update_pgsnapshot(module, array):
"""Update Protection Group Snapshot"""
changed = False
module.exit_json(changed=changed)
def delete_pgsnapshot(module, array):
""" Delete Protection Group Snapshot"""
if not module.check_mode:
snapname = module.params['name'] + "." + module.params['suffix']
try:
array.destroy_pgroup(snapname)
chaned = True
if module.params['eradicate']:
try:
array.eradicate_pgroup(snapname)
changed = True
except:
changed = False
except:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
suffix=dict(type='str'),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
module = AnsibleModule(argument_spec,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
if module.params['suffix'] is None:
suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
module.params['suffix'] = suffix.replace(".", "")
state = module.params['state']
array = get_system(module)
pgroup = get_pgroup(module, array)
pgsnap = get_pgsnapshot(module, array)
if state == 'present' and pgroup and not pgsnap:
create_pgsnapshot(module, array)
elif state == 'present' and pgroup and pgsnap:
update_pgsnapshot(module, array)
elif state == 'present' and not pgroup:
update_pgsnapshot(module, array)
elif state == 'absent' and pgsnap:
delete_pgsnapshot(module, array)
elif state == 'absent' and not pgsnap:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
bratsche/Neutron-Drive | google_appengine/lib/django_1_2/django/contrib/localflavor/ro/forms.py | 273 | 6640 | # -*- coding: utf-8 -*-
"""
Romanian specific form helpers.
"""
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError, Field, RegexField, Select
from django.utils.translation import ugettext_lazy as _
class ROCIFField(RegexField):
"""
A Romanian fiscal identity code (CIF) field
For CIF validation algorithm see http://www.validari.ro/cui.html
"""
default_error_messages = {
'invalid': _("Enter a valid CIF."),
}
def __init__(self, *args, **kwargs):
super(ROCIFField, self).__init__(r'^(RO)?[0-9]{2,10}', max_length=10,
min_length=2, *args, **kwargs)
def clean(self, value):
"""
CIF validation
"""
value = super(ROCIFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# strip RO part
if value[0:2] == 'RO':
value = value[2:]
key = '753217532'[::-1]
value = value[::-1]
key_iter = iter(key)
checksum = 0
for digit in value[1:]:
checksum += int(digit) * int(key_iter.next())
checksum = checksum * 10 % 11
if checksum == 10:
checksum = 0
if checksum != int(value[0]):
raise ValidationError(self.error_messages['invalid'])
return value[::-1]
class ROCNPField(RegexField):
"""
A Romanian personal identity code (CNP) field
For CNP validation algorithm see http://www.validari.ro/cnp.html
"""
default_error_messages = {
'invalid': _("Enter a valid CNP."),
}
def __init__(self, *args, **kwargs):
super(ROCNPField, self).__init__(r'^[1-9][0-9]{12}', max_length=13,
min_length=13, *args, **kwargs)
def clean(self, value):
"""
CNP validations
"""
value = super(ROCNPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
# check birthdate digits
import datetime
try:
datetime.date(int(value[1:3]),int(value[3:5]),int(value[5:7]))
except:
raise ValidationError(self.error_messages['invalid'])
# checksum
key = '279146358279'
checksum = 0
value_iter = iter(value)
for digit in key:
checksum += int(digit) * int(value_iter.next())
checksum %= 11
if checksum == 10:
checksum = 1
if checksum != int(value[12]):
raise ValidationError(self.error_messages['invalid'])
return value
class ROCountyField(Field):
"""
A form field that validates its input is a Romanian county name or
abbreviation. It normalizes the input to the standard vehicle registration
abbreviation for the given county
WARNING: This field will only accept names written with diacritics; consider
using ROCountySelect if this behavior is unnaceptable for you
Example:
Argeş => valid
Arges => invalid
"""
default_error_messages = {
'invalid': u'Enter a Romanian county code or name.',
}
def clean(self, value):
from ro_counties import COUNTIES_CHOICES
super(ROCountyField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().upper()
except AttributeError:
pass
# search for county code
for entry in COUNTIES_CHOICES:
if value in entry:
return value
# search for county name
normalized_CC = []
for entry in COUNTIES_CHOICES:
normalized_CC.append((entry[0],entry[1].upper()))
for entry in normalized_CC:
if entry[1] == value:
return entry[0]
raise ValidationError(self.error_messages['invalid'])
class ROCountySelect(Select):
"""
A Select widget that uses a list of Romanian counties (judete) as its
choices.
"""
def __init__(self, attrs=None):
from ro_counties import COUNTIES_CHOICES
super(ROCountySelect, self).__init__(attrs, choices=COUNTIES_CHOICES)
class ROIBANField(RegexField):
"""
Romanian International Bank Account Number (IBAN) field
For Romanian IBAN validation algorithm see http://validari.ro/iban.html
"""
default_error_messages = {
'invalid': _('Enter a valid IBAN in ROXX-XXXX-XXXX-XXXX-XXXX-XXXX format'),
}
def __init__(self, *args, **kwargs):
super(ROIBANField, self).__init__(r'^[0-9A-Za-z\-\s]{24,40}$',
max_length=40, min_length=24, *args, **kwargs)
def clean(self, value):
"""
Strips - and spaces, performs country code and checksum validation
"""
value = super(ROIBANField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace(' ','')
value = value.upper()
if value[0:2] != 'RO':
raise ValidationError(self.error_messages['invalid'])
numeric_format = ''
for char in value[4:] + value[0:4]:
if char.isalpha():
numeric_format += str(ord(char) - 55)
else:
numeric_format += char
if int(numeric_format) % 97 != 1:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPhoneNumberField(RegexField):
"""Romanian phone number field"""
default_error_messages = {
'invalid': _('Phone numbers must be in XXXX-XXXXXX format.'),
}
def __init__(self, *args, **kwargs):
super(ROPhoneNumberField, self).__init__(r'^[0-9\-\(\)\s]{10,20}$',
max_length=20, min_length=10, *args, **kwargs)
def clean(self, value):
"""
Strips -, (, ) and spaces. Checks the final length.
"""
value = super(ROPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = value.replace('-','')
value = value.replace('(','')
value = value.replace(')','')
value = value.replace(' ','')
if len(value) != 10:
raise ValidationError(self.error_messages['invalid'])
return value
class ROPostalCodeField(RegexField):
"""Romanian postal code field."""
default_error_messages = {
'invalid': _('Enter a valid postal code in the format XXXXXX'),
}
def __init__(self, *args, **kwargs):
super(ROPostalCodeField, self).__init__(r'^[0-9][0-8][0-9]{4}$',
max_length=6, min_length=6, *args, **kwargs)
| bsd-3-clause |
KirillMysnik/ArcJail | srcds/addons/source-python/plugins/arcjail/modules/games/base_classes/map_game_team_based.py | 1 | 16195 | # This file is part of ArcJail.
#
# ArcJail is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ArcJail is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ArcJail. If not, see <http://www.gnu.org/licenses/>.
from random import shuffle
from core import GAME_NAME
from ....internal_events import InternalEvent
from ....resource.strings import COLOR_SCHEME
from ...jail_map import teleport_player, get_games
from ...player_colors import cancel_color_request, make_color_request
from ...players import broadcast, player_manager, tell
from ...rebels import get_rebels
from ...skins import model_player_manager
from .. import (
config_manager, game_event_handler, game_internal_event_handler,
helper_set_winner, helper_set_loser, MIN_PLAYERS_IN_GAME, stage,
strings_module)
from .map_game import MapGame
COLOR_PRIORITY = 2
SKIN_PRIORITY = 2
TEAM_NUM_MIN = 2
TEAM_NUM_MAX = 4
class MapGameTeamBased(MapGame):
class PlayerTeam(list):
def __init__(self, team_num, *args, **kwargs):
super().__init__(*args, **kwargs)
self.team_num = team_num
@property
def team_num_str(self):
return 'team{}'.format(self.team_num)
_caption = '$games_base title_mapgame_teambased'
num_teams = 2
stage_groups = {
'mapgame-prepare': [
"mapgame-cancel-falldmg-protection",
"mapgame-equip-noblock",
"mapgame-teambased-split-teams",
"mapgame-teleport-players",
"mapgame-fire-mapdata-prepare-outputs",
"mapgame-prepare-entry",
],
'mapgame-teambased-split-teams': [
"mapgame-teambased-split-teams",
],
'mapgame-teambased-split-teams2': [
"mapgame-teambased-split-teams2",
],
'mapgame-teambased-split-teams3': [
"mapgame-teambased-split-teams3",
],
'mapgame-teambased-split-teams4': [
"mapgame-teambased-split-teams4",
],
'mapgame-teleport-players2': ["mapgame-teleport-players2", ],
'mapgame-teleport-players3': ["mapgame-teleport-players3", ],
'mapgame-teleport-players4': ["mapgame-teleport-players4", ],
'game-end-draw': ['game-end-draw', ],
'game-end-win-team1': ['game-end-win-team1', ],
'game-end-win-team2': ['game-end-win-team2', ],
'game-end-win-team3': ['game-end-win-team3', ],
'game-end-win-team4': ['game-end-win-team4', ],
}
def __init__(self, leader_player, players, **kwargs):
super().__init__(leader_player, players, **kwargs)
assert TEAM_NUM_MIN <= self.num_teams <= TEAM_NUM_MAX
self._starting_player_number = len(players)
self._team1 = self.PlayerTeam(1)
self._team2 = self.PlayerTeam(2)
self._team3 = self.PlayerTeam(3)
self._team4 = self.PlayerTeam(4)
@property
def team1(self):
return tuple(self._team1)
@property
def team2(self):
return tuple(self._team2)
@property
def team3(self):
return tuple(self._team3)
@property
def team4(self):
return tuple(self._team4)
def get_player_team(self, player):
if player in self._team1:
return self._team1
if player in self._team2:
return self._team2
if player in self._team3:
return self._team3
if player in self._team4:
return self._team4
raise IndexError("Player does not belong to this game")
@stage('mapgame-teleport-players')
def stage_mapgame_teleport_players(self):
self.insert_stage_group(
"mapgame-teleport-players{}".format(self.num_teams))
@stage('mapgame-teleport-players2')
def stage_mapgame_teleport_players2(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teleport-players3')
def stage_mapgame_teleport_players3(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team3'))
shuffle(spawnpoints)
for player in self._team3:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teleport-players4')
def stage_mapgame_teleport_players4(self):
spawnpoints = list(self.map_data.get_spawnpoints('team1'))
shuffle(spawnpoints)
for player in self._team1:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team2'))
shuffle(spawnpoints)
for player in self._team2:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team3'))
shuffle(spawnpoints)
for player in self._team3:
teleport_player(player, spawnpoints.pop())
spawnpoints = list(self.map_data.get_spawnpoints('team4'))
shuffle(spawnpoints)
for player in self._team4:
teleport_player(player, spawnpoints.pop())
teleport_player(self.leader, self.map_data.get_spawnpoints('team0')[0])
@stage('mapgame-teambased-split-teams')
def stage_mapgame_teambased_split_teams(self):
self.insert_stage_group(
"mapgame-teambased-split-teams{}".format(self.num_teams))
@stage('undo-mapgame-teambased-split-teams')
def stage_undo_mapgame_teambased_split_teams(self):
for player in self._players:
model_player_manager[player.index].cancel_request(
'games-teambased')
cancel_color_request(player, 'games-teambased')
@stage('mapgame-teambased-split-teams2')
def stage_mapgame_teambased_split_teams_2(self):
self._team1 = self.PlayerTeam(1)
self._team2 = self.PlayerTeam(2)
players = self._players[:]
shuffle(players)
if GAME_NAME in ("csgo",):
broadcast(strings_module['players_two_teams'].tokenize(
color1=COLOR_SCHEME['color_highlight'],
color2=COLOR_SCHEME['color_highlight'],
team1=strings_module['team1'],
team2=strings_module['team2'],
))
else:
broadcast(strings_module['players_two_teams'].tokenize(
color1=config_manager['team1_color'],
color2=config_manager['team2_color'],
team1=strings_module['team1'],
team2=strings_module['team2'],
))
while players:
p1, p2 = players.pop(), players.pop()
if GAME_NAME in ("csgo",):
tell(p1, strings_module['your_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team1']
))
tell(p2, strings_module['your_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team2']
))
else:
tell(p1, strings_module['your_team'].tokenize(
color=config_manager['team1_color'],
team=strings_module['team1']
))
tell(p2, strings_module['your_team'].tokenize(
color=config_manager['team2_color'],
team=strings_module['team2']
))
if config_manager['prefer_model_over_color']:
model_player_manager[p1.index].make_request(
'games-teambased', SKIN_PRIORITY, "alpha")
model_player_manager[p2.index].make_request(
'games-teambased', SKIN_PRIORITY, "bravo")
else:
make_color_request(
p1, COLOR_PRIORITY, 'games-teambased',
config_manager['team1_color']
)
make_color_request(
p2, COLOR_PRIORITY, 'games-teambased',
config_manager['team2_color']
)
self._team1.append(p1)
self._team2.append(p2)
@stage('mapgame-teambased-split-teams3')
def stage_mapgame_teambased_split_teams_3(self):
raise NotImplementedError
@stage('mapgame-teambased-split-teams4')
def stage_mapgame_teambased_split_teams_4(self):
raise NotImplementedError
@stage('game-end-draw')
def stage_game_end_draw(self):
broadcast(strings_module['draw'])
self.set_stage_group('destroy')
@stage('game-end-win-team1')
def stage_game_end_win_team1(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team1,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=1,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team1']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team1_color'],
team=strings_module['team1']
))
for player in self._team1:
helper_set_winner(player)
for player in (self._team2 + self._team3 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team2')
def stage_game_end_win_team2(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team2,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=2,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team2']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team2_color'],
team=strings_module['team2']
))
for player in self._team2:
helper_set_winner(player)
for player in (self._team1 + self._team3 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team3')
def stage_game_end_win_team3(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team3,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=3,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team3']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team3_color'],
team=strings_module['team3']
))
for player in self._team3:
helper_set_winner(player)
for player in (self._team1 + self._team2 + self._team4):
helper_set_loser(player)
self.set_stage_group('destroy')
@stage('game-end-win-team4')
def stage_game_end_win_team4(self):
InternalEvent.fire(
'jail_game_map_game_team_based_winners',
winners=self._team4,
num_teams=self.num_teams,
starting_player_number=self._starting_player_number,
team_num=4,
)
if GAME_NAME in ("csgo",):
broadcast(strings_module['win_team'].tokenize(
color=COLOR_SCHEME['color_highlight'],
team=strings_module['team4']
))
else:
broadcast(strings_module['win_team'].tokenize(
color=config_manager['team4_color'],
team=strings_module['team4']
))
for player in self._team4:
helper_set_winner(player)
for player in (self._team1 + self._team2 + self._team3):
helper_set_loser(player)
self.set_stage_group('destroy')
@game_event_handler('jailgame-player-death', 'player_death')
def event_jailgame_player_death(self, game_event):
player = player_manager.get_by_userid(game_event['userid'])
if self.leader == player:
self.set_stage_group('abort-leader-dead')
elif player in self._players:
self._players.remove(player)
self.get_player_team(player).remove(player)
if not all((self._team1,
self._team2,
self._team3,
self._team4)[:self.num_teams]):
self.set_stage_group('abort-not-enough-players')
elif len(self._players) < MIN_PLAYERS_IN_GAME:
self.set_stage_group('abort-not-enough-players')
@game_internal_event_handler(
'jailgame-main-player-deleted', 'player_deleted')
def event_jailgame_player_deleted(self, player):
if self.leader == player:
self.set_stage_group('abort-leader-disconnect')
elif player in self._players:
self._players.remove(player)
for team_list in (self._team1,
self._team2,
self._team3,
self._team4):
if player in team_list:
team_list.remove(player)
if not (self._team1 and
self._team2 and
self._team3 and
self._team4):
self.set_stage_group('abort-not-enough-players')
elif len(self._players) < MIN_PLAYERS_IN_GAME:
self.set_stage_group('abort-not-enough-players')
@classmethod
def get_available_launchers(cls, leader_player, players):
if get_rebels():
return ()
len_players = len(players)
if len_players < config_manager['min_players_number']:
return ()
if len_players % cls.num_teams:
return ()
result = []
teams = ['team1', 'team2', 'team3', 'team4'][:cls.num_teams]
for map_data in get_games(cls.module):
p_min = map_data['MIN_PLAYERS']
p_max = map_data['MAX_PLAYERS']
if not len(map_data.get_spawnpoints('team0')):
continue
for team_num in teams:
if (len(map_data.get_spawnpoints(team_num)) <
len_players // cls.num_teams):
break
else:
if (len_players >= p_min and
(p_max == -1 or len_players <= p_max)):
result.append(cls.GameLauncher(cls, map_data))
return result
| gpl-3.0 |
tlatzko/spmcluster | .tox/docs/lib/python2.7/site-packages/pygments/lexers/r.py | 72 | 23755 | # -*- coding: utf-8 -*-
"""
pygments.lexers.r
~~~~~~~~~~~~~~~~~
Lexers for the R/S languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, words, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic
__all__ = ['RConsoleLexer', 'SLexer', 'RdLexer']
line_re = re.compile('.*?\n')
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(
insertions, slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
.. versionadded:: 0.10
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
builtins_base = (
'Arg', 'Conj', 'Cstack_info', 'Encoding', 'FALSE',
'Filter', 'Find', 'I', 'ISOdate', 'ISOdatetime', 'Im', 'Inf',
'La.svd', 'Map', 'Math.Date', 'Math.POSIXt', 'Math.data.frame',
'Math.difftime', 'Math.factor', 'Mod', 'NA_character_',
'NA_complex_', 'NA_real_', 'NCOL', 'NROW', 'NULLNA_integer_', 'NaN',
'Negate', 'NextMethod', 'Ops.Date', 'Ops.POSIXt', 'Ops.data.frame',
'Ops.difftime', 'Ops.factor', 'Ops.numeric_version', 'Ops.ordered',
'Position', 'R.Version', 'R.home', 'R.version', 'R.version.string',
'RNGkind', 'RNGversion', 'R_system_version', 'Re', 'Recall',
'Reduce', 'Summary.Date', 'Summary.POSIXct', 'Summary.POSIXlt',
'Summary.data.frame', 'Summary.difftime', 'Summary.factor',
'Summary.numeric_version', 'Summary.ordered', 'Sys.Date',
'Sys.chmod', 'Sys.getenv', 'Sys.getlocale', 'Sys.getpid',
'Sys.glob', 'Sys.info', 'Sys.localeconv', 'Sys.readlink',
'Sys.setFileTime', 'Sys.setenv', 'Sys.setlocale', 'Sys.sleep',
'Sys.time', 'Sys.timezone', 'Sys.umask', 'Sys.unsetenv',
'Sys.which', 'TRUE', 'UseMethod', 'Vectorize', 'abbreviate', 'abs',
'acos', 'acosh', 'addNA', 'addTaskCallback', 'agrep', 'alist',
'all', 'all.equal', 'all.equal.POSIXct', 'all.equal.character',
'all.equal.default', 'all.equal.factor', 'all.equal.formula',
'all.equal.language', 'all.equal.list', 'all.equal.numeric',
'all.equal.raw', 'all.names', 'all.vars', 'any', 'anyDuplicated',
'anyDuplicated.array', 'anyDuplicated.data.frame',
'anyDuplicated.default', 'anyDuplicated.matrix', 'aperm',
'aperm.default', 'aperm.table', 'append', 'apply', 'args',
'arrayInd', 'as.Date', 'as.Date.POSIXct', 'as.Date.POSIXlt',
'as.Date.character', 'as.Date.date', 'as.Date.dates',
'as.Date.default', 'as.Date.factor', 'as.Date.numeric',
'as.POSIXct', 'as.POSIXct.Date', 'as.POSIXct.POSIXlt',
'as.POSIXct.date', 'as.POSIXct.dates', 'as.POSIXct.default',
'as.POSIXct.numeric', 'as.POSIXlt', 'as.POSIXlt.Date',
'as.POSIXlt.POSIXct', 'as.POSIXlt.character', 'as.POSIXlt.date',
'as.POSIXlt.dates', 'as.POSIXlt.default', 'as.POSIXlt.factor',
'as.POSIXlt.numeric', 'as.array', 'as.array.default', 'as.call',
'as.character', 'as.character.Date', 'as.character.POSIXt',
'as.character.condition', 'as.character.default',
'as.character.error', 'as.character.factor', 'as.character.hexmode',
'as.character.numeric_version', 'as.character.octmode',
'as.character.srcref', 'as.complex', 'as.data.frame',
'as.data.frame.AsIs', 'as.data.frame.Date', 'as.data.frame.POSIXct',
'as.data.frame.POSIXlt', 'as.data.frame.array',
'as.data.frame.character', 'as.data.frame.complex',
'as.data.frame.data.frame', 'as.data.frame.default',
'as.data.frame.difftime', 'as.data.frame.factor',
'as.data.frame.integer', 'as.data.frame.list',
'as.data.frame.logical', 'as.data.frame.matrix',
'as.data.frame.model.matrix', 'as.data.frame.numeric',
'as.data.frame.numeric_version', 'as.data.frame.ordered',
'as.data.frame.raw', 'as.data.frame.table', 'as.data.frame.ts',
'as.data.frame.vector', 'as.difftime', 'as.double',
'as.double.POSIXlt', 'as.double.difftime', 'as.environment',
'as.expression', 'as.expression.default', 'as.factor',
'as.function', 'as.function.default', 'as.hexmode', 'as.integer',
'as.list', 'as.list.Date', 'as.list.POSIXct', 'as.list.data.frame',
'as.list.default', 'as.list.environment', 'as.list.factor',
'as.list.function', 'as.list.numeric_version', 'as.logical',
'as.logical.factor', 'as.matrix', 'as.matrix.POSIXlt',
'as.matrix.data.frame', 'as.matrix.default', 'as.matrix.noquote',
'as.name', 'as.null', 'as.null.default', 'as.numeric',
'as.numeric_version', 'as.octmode', 'as.ordered',
'as.package_version', 'as.pairlist', 'as.qr', 'as.raw', 'as.single',
'as.single.default', 'as.symbol', 'as.table', 'as.table.default',
'as.vector', 'as.vector.factor', 'asNamespace', 'asS3', 'asS4',
'asin', 'asinh', 'assign', 'atan', 'atan2', 'atanh',
'attachNamespace', 'attr', 'attr.all.equal', 'attributes',
'autoload', 'autoloader', 'backsolve', 'baseenv', 'basename',
'besselI', 'besselJ', 'besselK', 'besselY', 'beta',
'bindingIsActive', 'bindingIsLocked', 'bindtextdomain', 'bitwAnd',
'bitwNot', 'bitwOr', 'bitwShiftL', 'bitwShiftR', 'bitwXor', 'body',
'bquote', 'browser', 'browserCondition', 'browserSetDebug',
'browserText', 'builtins', 'by', 'by.data.frame', 'by.default',
'bzfile', 'c.Date', 'c.POSIXct', 'c.POSIXlt', 'c.noquote',
'c.numeric_version', 'call', 'callCC', 'capabilities', 'casefold',
'cat', 'category', 'cbind', 'cbind.data.frame', 'ceiling',
'char.expand', 'charToRaw', 'charmatch', 'chartr', 'check_tzones',
'chol', 'chol.default', 'chol2inv', 'choose', 'class',
'clearPushBack', 'close', 'close.connection', 'close.srcfile',
'close.srcfilealias', 'closeAllConnections', 'col', 'colMeans',
'colSums', 'colnames', 'commandArgs', 'comment', 'computeRestarts',
'conditionCall', 'conditionCall.condition', 'conditionMessage',
'conditionMessage.condition', 'conflicts', 'contributors', 'cos',
'cosh', 'crossprod', 'cummax', 'cummin', 'cumprod', 'cumsum', 'cut',
'cut.Date', 'cut.POSIXt', 'cut.default', 'dQuote', 'data.class',
'data.matrix', 'date', 'debug', 'debugonce',
'default.stringsAsFactors', 'delayedAssign', 'deparse', 'det',
'determinant', 'determinant.matrix', 'dget', 'diag', 'diff',
'diff.Date', 'diff.POSIXt', 'diff.default', 'difftime', 'digamma',
'dim', 'dim.data.frame', 'dimnames', 'dimnames.data.frame', 'dir',
'dir.create', 'dirname', 'do.call', 'dput', 'drop', 'droplevels',
'droplevels.data.frame', 'droplevels.factor', 'dump', 'duplicated',
'duplicated.POSIXlt', 'duplicated.array', 'duplicated.data.frame',
'duplicated.default', 'duplicated.matrix',
'duplicated.numeric_version', 'dyn.load', 'dyn.unload', 'eapply',
'eigen', 'else', 'emptyenv', 'enc2native', 'enc2utf8',
'encodeString', 'enquote', 'env.profile', 'environment',
'environmentIsLocked', 'environmentName', 'eval', 'eval.parent',
'evalq', 'exists', 'exp', 'expand.grid', 'expm1', 'expression',
'factor', 'factorial', 'fifo', 'file', 'file.access', 'file.append',
'file.choose', 'file.copy', 'file.create', 'file.exists',
'file.info', 'file.link', 'file.path', 'file.remove', 'file.rename',
'file.show', 'file.symlink', 'find.package', 'findInterval',
'findPackageEnv', 'findRestart', 'floor', 'flush',
'flush.connection', 'force', 'formals', 'format',
'format.AsIs', 'format.Date', 'format.POSIXct', 'format.POSIXlt',
'format.data.frame', 'format.default', 'format.difftime',
'format.factor', 'format.hexmode', 'format.info',
'format.libraryIQR', 'format.numeric_version', 'format.octmode',
'format.packageInfo', 'format.pval', 'format.summaryDefault',
'formatC', 'formatDL', 'forwardsolve', 'gamma', 'gc', 'gc.time',
'gcinfo', 'gctorture', 'gctorture2', 'get', 'getAllConnections',
'getCallingDLL', 'getCallingDLLe', 'getConnection',
'getDLLRegisteredRoutines', 'getDLLRegisteredRoutines.DLLInfo',
'getDLLRegisteredRoutines.character', 'getElement',
'getExportedValue', 'getHook', 'getLoadedDLLs', 'getNamespace',
'getNamespaceExports', 'getNamespaceImports', 'getNamespaceInfo',
'getNamespaceName', 'getNamespaceUsers', 'getNamespaceVersion',
'getNativeSymbolInfo', 'getOption', 'getRversion', 'getSrcLines',
'getTaskCallbackNames', 'geterrmessage', 'gettext', 'gettextf',
'getwd', 'gl', 'globalenv', 'gregexpr', 'grep', 'grepRaw', 'grepl',
'gsub', 'gzcon', 'gzfile', 'head', 'iconv', 'iconvlist',
'icuSetCollate', 'identical', 'identity', 'ifelse', 'importIntoEnv',
'in', 'inherits', 'intToBits', 'intToUtf8', 'interaction', 'interactive',
'intersect', 'inverse.rle', 'invisible', 'invokeRestart',
'invokeRestartInteractively', 'is.R', 'is.array', 'is.atomic',
'is.call', 'is.character', 'is.complex', 'is.data.frame',
'is.double', 'is.element', 'is.environment', 'is.expression',
'is.factor', 'is.finite', 'is.function', 'is.infinite',
'is.integer', 'is.language', 'is.list', 'is.loaded', 'is.logical',
'is.matrix', 'is.na', 'is.na.POSIXlt', 'is.na.data.frame',
'is.na.numeric_version', 'is.name', 'is.nan', 'is.null',
'is.numeric', 'is.numeric.Date', 'is.numeric.POSIXt',
'is.numeric.difftime', 'is.numeric_version', 'is.object',
'is.ordered', 'is.package_version', 'is.pairlist', 'is.primitive',
'is.qr', 'is.raw', 'is.recursive', 'is.single', 'is.symbol',
'is.table', 'is.unsorted', 'is.vector', 'isBaseNamespace',
'isIncomplete', 'isNamespace', 'isOpen', 'isRestart', 'isS4',
'isSeekable', 'isSymmetric', 'isSymmetric.matrix', 'isTRUE',
'isatty', 'isdebugged', 'jitter', 'julian', 'julian.Date',
'julian.POSIXt', 'kappa', 'kappa.default', 'kappa.lm', 'kappa.qr',
'kronecker', 'l10n_info', 'labels', 'labels.default', 'lapply',
'lazyLoad', 'lazyLoadDBexec', 'lazyLoadDBfetch', 'lbeta', 'lchoose',
'length', 'length.POSIXlt', 'letters', 'levels', 'levels.default',
'lfactorial', 'lgamma', 'library.dynam', 'library.dynam.unload',
'licence', 'license', 'list.dirs', 'list.files', 'list2env', 'load',
'loadNamespace', 'loadedNamespaces', 'loadingNamespaceInfo',
'local', 'lockBinding', 'lockEnvironment', 'log', 'log10', 'log1p',
'log2', 'logb', 'lower.tri', 'ls', 'make.names', 'make.unique',
'makeActiveBinding', 'mapply', 'margin.table', 'mat.or.vec',
'match', 'match.arg', 'match.call', 'match.fun', 'max', 'max.col',
'mean', 'mean.Date', 'mean.POSIXct', 'mean.POSIXlt', 'mean.default',
'mean.difftime', 'mem.limits', 'memCompress', 'memDecompress',
'memory.profile', 'merge', 'merge.data.frame', 'merge.default',
'message', 'mget', 'min', 'missing', 'mode', 'month.abb',
'month.name', 'months', 'months.Date', 'months.POSIXt',
'months.abb', 'months.nameletters', 'names', 'names.POSIXlt',
'namespaceExport', 'namespaceImport', 'namespaceImportClasses',
'namespaceImportFrom', 'namespaceImportMethods', 'nargs', 'nchar',
'ncol', 'new.env', 'ngettext', 'nlevels', 'noquote', 'norm',
'normalizePath', 'nrow', 'numeric_version', 'nzchar', 'objects',
'oldClass', 'on.exit', 'open', 'open.connection', 'open.srcfile',
'open.srcfilealias', 'open.srcfilecopy', 'options', 'order',
'ordered', 'outer', 'packBits', 'packageEvent',
'packageHasNamespace', 'packageStartupMessage', 'package_version',
'pairlist', 'parent.env', 'parent.frame', 'parse',
'parseNamespaceFile', 'paste', 'paste0', 'path.expand',
'path.package', 'pipe', 'pmatch', 'pmax', 'pmax.int', 'pmin',
'pmin.int', 'polyroot', 'pos.to.env', 'pretty', 'pretty.default',
'prettyNum', 'print', 'print.AsIs', 'print.DLLInfo',
'print.DLLInfoList', 'print.DLLRegisteredRoutines', 'print.Date',
'print.NativeRoutineList', 'print.POSIXct', 'print.POSIXlt',
'print.by', 'print.condition', 'print.connection',
'print.data.frame', 'print.default', 'print.difftime',
'print.factor', 'print.function', 'print.hexmode',
'print.libraryIQR', 'print.listof', 'print.noquote',
'print.numeric_version', 'print.octmode', 'print.packageInfo',
'print.proc_time', 'print.restart', 'print.rle',
'print.simple.list', 'print.srcfile', 'print.srcref',
'print.summary.table', 'print.summaryDefault', 'print.table',
'print.warnings', 'prmatrix', 'proc.time', 'prod', 'prop.table',
'provideDimnames', 'psigamma', 'pushBack', 'pushBackLength', 'q',
'qr', 'qr.Q', 'qr.R', 'qr.X', 'qr.coef', 'qr.default', 'qr.fitted',
'qr.qty', 'qr.qy', 'qr.resid', 'qr.solve', 'quarters',
'quarters.Date', 'quarters.POSIXt', 'quit', 'quote', 'range',
'range.default', 'rank', 'rapply', 'raw', 'rawConnection',
'rawConnectionValue', 'rawShift', 'rawToBits', 'rawToChar', 'rbind',
'rbind.data.frame', 'rcond', 'read.dcf', 'readBin', 'readChar',
'readLines', 'readRDS', 'readRenviron', 'readline', 'reg.finalizer',
'regexec', 'regexpr', 'registerS3method', 'registerS3methods',
'regmatches', 'remove', 'removeTaskCallback', 'rep', 'rep.Date',
'rep.POSIXct', 'rep.POSIXlt', 'rep.factor', 'rep.int',
'rep.numeric_version', 'rep_len', 'replace', 'replicate',
'requireNamespace', 'restartDescription', 'restartFormals',
'retracemem', 'rev', 'rev.default', 'rle', 'rm', 'round',
'round.Date', 'round.POSIXt', 'row', 'row.names',
'row.names.data.frame', 'row.names.default', 'rowMeans', 'rowSums',
'rownames', 'rowsum', 'rowsum.data.frame', 'rowsum.default',
'sQuote', 'sample', 'sample.int', 'sapply', 'save', 'save.image',
'saveRDS', 'scale', 'scale.default', 'scan', 'search',
'searchpaths', 'seek', 'seek.connection', 'seq', 'seq.Date',
'seq.POSIXt', 'seq.default', 'seq.int', 'seq_along', 'seq_len',
'sequence', 'serialize', 'set.seed', 'setHook', 'setNamespaceInfo',
'setSessionTimeLimit', 'setTimeLimit', 'setdiff', 'setequal',
'setwd', 'shQuote', 'showConnections', 'sign', 'signalCondition',
'signif', 'simpleCondition', 'simpleError', 'simpleMessage',
'simpleWarning', 'simplify2array', 'sin', 'single',
'sinh', 'sink', 'sink.number', 'slice.index', 'socketConnection',
'socketSelect', 'solve', 'solve.default', 'solve.qr', 'sort',
'sort.POSIXlt', 'sort.default', 'sort.int', 'sort.list', 'split',
'split.Date', 'split.POSIXct', 'split.data.frame', 'split.default',
'sprintf', 'sqrt', 'srcfile', 'srcfilealias', 'srcfilecopy',
'srcref', 'standardGeneric', 'stderr', 'stdin', 'stdout', 'stop',
'stopifnot', 'storage.mode', 'strftime', 'strptime', 'strsplit',
'strtoi', 'strtrim', 'structure', 'strwrap', 'sub', 'subset',
'subset.data.frame', 'subset.default', 'subset.matrix',
'substitute', 'substr', 'substring', 'sum', 'summary',
'summary.Date', 'summary.POSIXct', 'summary.POSIXlt',
'summary.connection', 'summary.data.frame', 'summary.default',
'summary.factor', 'summary.matrix', 'summary.proc_time',
'summary.srcfile', 'summary.srcref', 'summary.table',
'suppressMessages', 'suppressPackageStartupMessages',
'suppressWarnings', 'svd', 'sweep', 'sys.call', 'sys.calls',
'sys.frame', 'sys.frames', 'sys.function', 'sys.load.image',
'sys.nframe', 'sys.on.exit', 'sys.parent', 'sys.parents',
'sys.save.image', 'sys.source', 'sys.status', 'system',
'system.file', 'system.time', 'system2', 't', 't.data.frame',
't.default', 'table', 'tabulate', 'tail', 'tan', 'tanh', 'tapply',
'taskCallbackManager', 'tcrossprod', 'tempdir', 'tempfile',
'testPlatformEquivalence', 'textConnection', 'textConnectionValue',
'toString', 'toString.default', 'tolower', 'topenv', 'toupper',
'trace', 'traceback', 'tracemem', 'tracingState', 'transform',
'transform.data.frame', 'transform.default', 'trigamma', 'trunc',
'trunc.Date', 'trunc.POSIXt', 'truncate', 'truncate.connection',
'try', 'tryCatch', 'typeof', 'unclass', 'undebug', 'union',
'unique', 'unique.POSIXlt', 'unique.array', 'unique.data.frame',
'unique.default', 'unique.matrix', 'unique.numeric_version',
'units', 'units.difftime', 'unix.time', 'unlink', 'unlist',
'unloadNamespace', 'unlockBinding', 'unname', 'unserialize',
'unsplit', 'untrace', 'untracemem', 'unz', 'upper.tri', 'url',
'utf8ToInt', 'vapply', 'version', 'warning', 'warnings', 'weekdays',
'weekdays.Date', 'weekdays.POSIXt', 'which', 'which.max',
'which.min', 'with', 'with.default', 'withCallingHandlers',
'withRestarts', 'withVisible', 'within', 'within.data.frame',
'within.list', 'write', 'write.dcf', 'writeBin', 'writeChar',
'writeLines', 'xor', 'xor.hexmode', 'xor.octmode',
'xpdrows.data.frame', 'xtfrm', 'xtfrm.AsIs', 'xtfrm.Date',
'xtfrm.POSIXct', 'xtfrm.POSIXlt', 'xtfrm.Surv', 'xtfrm.default',
'xtfrm.difftime', 'xtfrm.factor', 'xtfrm.numeric_version', 'xzfile',
'zapsmall'
)
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][\w.]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][\w.]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(words(builtins_base, suffix=r'(?![\w. =])'),
Keyword.Pseudo),
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![\w.])',
Keyword.Reserved),
(r'(array|category|character|complex|double|function|integer|list|'
r'logical|matrix|numeric|vector|data.frame|c)'
r'(?![\w.])',
Keyword.Type),
(r'(library|require|attach|detach|source)'
r'(?![\w.])',
Keyword.Namespace)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'letters|LETTERS|Inf|TRUE|FALSE|NaN|pi|\.\.(\.|[0-9]+))'
r'(?![\w.])',
Keyword.Constant),
(r'(T|F)\b', Name.Builtin.Pseudo),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
# (r'\{', Punctuation, 'block'),
(r'.', Text),
],
# 'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
# ],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
if re.search(r'[a-z0-9_\])\s]<-(?!-)', text):
return 0.11
class RdLexer(RegexLexer):
"""
Pygments Lexer for R documentation (Rd) files
This is a very minimal implementation, highlighting little more
than the macros. A description of Rd syntax is found in `Writing R
Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_
and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_.
.. versionadded:: 1.6
"""
name = 'Rd'
aliases = ['rd']
filenames = ['*.Rd']
mimetypes = ['text/x-r-doc']
# To account for verbatim / LaTeX-like / and R-like areas
# would require parsing.
tokens = {
'root': [
# catch escaped brackets and percent sign
(r'\\[\\{}%]', String.Escape),
# comments
(r'%.*$', Comment),
# special macros with no arguments
(r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant),
# macros
(r'\\[a-zA-Z]+\b', Keyword),
# special preprocessor macros
(r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc),
# non-escaped brackets
(r'[{}]', Name.Builtin),
# everything else
(r'[^\\%\n{}]+', Text),
(r'.', Text),
]
}
| bsd-2-clause |
atomicobject/kinetic-c | vendor/protobuf-2.5.0/python/ez_setup.py | 454 | 10334 | #!python
# This file was obtained from:
# http://peak.telecommunity.com/dist/ez_setup.py
# on 2011/1/21.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| lgpl-2.1 |
PhonologicalCorpusTools/PyAnnotationGraph | polyglotdb/io/exporters/csv.py | 3 | 1634 | import csv
def make_safe(value, delimiter):
"""
Recursively parse transcription lists into strings for saving
Parameters
----------
value : list or None
Object to make into string
delimiter : str
Character to mark boundaries between list elements
Returns
-------
str
Safe string
"""
if isinstance(value, list):
return delimiter.join(map(lambda x: make_safe(x, delimiter), value))
if value is None:
return ''
return str(value)
def save_results(results, path, header=None, mode='w'):
"""
Writes results to path specified
Parameters
----------
results : iterable
the results to write
path : str
the path to the save file
header : list
Defaults to none
mode : str
defaults to 'w', or write. Can be 'a', append
"""
if header is None:
header = results.columns
if isinstance(path, str):
with open(path, mode, encoding='utf8', newline='') as f:
writer = csv.DictWriter(f, header)
if mode != 'a':
writer.writeheader()
for line in results:
try:
line = {k: make_safe(line[k], '/') for k in header}
except KeyError:
continue
writer.writerow(line)
else:
if mode != 'a':
path.writerow(header)
for line in results:
try:
line = [make_safe(line[k], '/') for k in header]
except KeyError:
continue
path.writerow(line)
| mit |
mooseman/pdteco | test_parser.py | 1 | 1865 |
# test_parser.py
# Try a few things with creating tokens which know the
# kind of token that should follow them.
import string, itertools
class token(object):
def __init__(self):
self.type = self.next = self.stmttype = None
self.attrdict = vars(self)
# Set an attribute
# NOTE! This can als be used to store values to be passed
# to the next token.
def set(self, attr, val):
setattr(self, attr, val)
# Get an attribute from a token.
def get(self, attr):
return getattr(self, attr)
def display(self):
print self.attrdict
# Test the code
a = token()
a.set('type', 'foo')
a.set('next', 'bar')
a.set('moose', 'big')
print a.get('next')
a.display()
# Create a parser with two modes - character and word.
# Note - we could add a statement checker to this. It would look at the
# stmttype of tokens to determine which kind of statement they belong in.
# When a statement is complete, it can flag that and act accordingly.
# Also - attach actions to statements.
class parser(object):
def __init__(self):
self.toklist = []
self.mode = None
def setmode(self, mode):
self.mode = mode
# Clear the token list
def clear(self):
self.toklist = []
def parse(self, stuff, sep=" "):
if self.mode == 'char':
for ch in stuff:
self.toklist.append(ch)
elif self.mode == 'word':
for tok in stuff.split(sep):
self.toklist.append(tok)
def display(self):
print self.toklist
# Test the code
a = parser()
a.setmode('char')
a.parse('The quick brown fox')
a.display()
a.setmode('word')
a.clear()
a.parse('The quick brown fox')
a.display()
| unlicense |
jtoppins/beaker | Client/src/bkr/client/task_watcher.py | 1 | 5411 | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
import time
__all__ = (
"TaskWatcher",
"watch_tasks"
)
def display_tasklist_status(task_list):
state_dict = {}
for task in task_list:
for state, value in task.get_state_dict().iteritems():
state_dict.setdefault(state, 0)
state_dict[state] += value
print "--> " + " ".join(( "%s: %s" % (key, state_dict[key]) for key in sorted(state_dict) )) + " [total: %s]" % sum(state_dict.values())
def watch_tasks(hub, task_id_list, indentation_level=0, sleep_time=30, task_url=None):
"""Watch the task statuses until they finish."""
if not task_id_list:
return
try:
print "Watching tasks (this may be safely interrupted)..."
watcher = TaskWatcher()
for task_id in sorted(task_id_list):
watcher.task_list.append(Task(hub, task_id, indentation_level))
# print task url if task_url is set or TASK_URL exists in config file
task_url = task_url or hub._conf.get("TASK_URL", None)
if task_url is not None:
print "Task url: %s" % (task_url % task_id)
is_failed = False
while True:
all_done = True
changed = False
for task in watcher.task_list:
changed |= watcher.update(task)
is_failed |= watcher.is_failed(task)
all_done &= watcher.is_finished(task)
if changed:
display_tasklist_status(watcher.task_list)
if all_done:
break
time.sleep(sleep_time)
except KeyboardInterrupt:
running_task_list = [ t.task_id for t in watcher.task_list if not watcher.is_finished(t) ]
if running_task_list:
print "Tasks still running: %s" % running_task_list
# Don't report pass on jobs still running.
is_failed = True
return is_failed
class TaskWatcher(object):
display_tasklist_status = staticmethod(display_tasklist_status)
def __init__(self):
self.subtask_dict = {}
self.task_list = []
def is_finished(self, task):
"""Is the task finished?"""
if task.task_info is None:
return False
result = task.task_info.get("is_finished", False)
for subtask in self.subtask_dict.itervalues():
result &= subtask.is_finished()
return result
def is_failed(self, task):
"""Did the task Fail?"""
if task.task_info is None:
return False
result = task.task_info.get("is_failed", False)
for subtask in self.subtask_dict.itervalues():
result |= subtask.is_failed()
return result
def update(self, task):
"""Update info and log if needed. Returns True on state change."""
if self.is_finished(task):
return False
last = task.task_info
task.task_info = task.hub.taskactions.task_info(task.task_id, False)
if task.task_info is None:
print "No such task id: %s" % task.task_id
sys.exit(1)
changed = False
state = task.task_info["state"]
if last:
# compare and note status changes
laststate = last["state"]
if laststate != state:
print "%s: %s -> %s" % (task, task.display_state(last), task.display_state(task.task_info))
changed = True
else:
# first time we're seeing this task, so just show the current state
print "%s: %s" % (task, task.display_state(task.task_info))
changed = True
# update all subtasks
for key in sorted(self.subtask_dict.keys()):
changed |= self.subtask_dict[key].update()
return changed
class Task(object):
def __init__(self, hub, task_id, indentation_level=0):
self.hub = hub
self.task_id = task_id
self.task_info = None
self.indentation_level = int(indentation_level)
self.subtask_dict = {}
def __str__(self):
result = "%s%s" % (" " * self.indentation_level, self.task_id)
if self.task_info:
result += " %s" % self.task_info.get("method", "unknown")
return result
def is_failed(self):
"""Did the task fail?"""
if self.task_info is None:
return False
return self.task_info.get("is_failed", False)
def display_state(self, task_info):
worker = task_info.get("worker")
if worker is not None:
return "%s (%s)" % (task_info["state_label"], worker["name"])
return "%s" % task_info["state_label"]
def get_state_dict(self):
state_dict = {}
if self.task_info is not None:
state = self.task_info.get("state_label", "unknown")
state_dict.setdefault(state, 0)
state_dict[state] += 1
for subtask in self.subtask_dict.itervalues():
for state, value in subtask.get_state_dict().iteritems():
state_dict.setdefault(state, 0)
state_dict[state] += value
return state_dict
| gpl-2.0 |
yamila-moreno/django | tests/admin_checks/models.py | 98 | 1724 | """
Tests of ModelAdmin system checks logic.
"""
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Album(models.Model):
title = models.CharField(max_length=150)
@python_2_unicode_compatible
class Song(models.Model):
title = models.CharField(max_length=150)
album = models.ForeignKey(Album)
original_release = models.DateField(editable=False)
class Meta:
ordering = ('title',)
def __str__(self):
return self.title
def readonly_method_on_model(self):
# does nothing
pass
class TwoAlbumFKAndAnE(models.Model):
album1 = models.ForeignKey(Album, related_name="album1_set")
album2 = models.ForeignKey(Album, related_name="album2_set")
e = models.CharField(max_length=1)
class Author(models.Model):
name = models.CharField(max_length=100)
class Book(models.Model):
name = models.CharField(max_length=100)
subtitle = models.CharField(max_length=100)
price = models.FloatField()
authors = models.ManyToManyField(Author, through='AuthorsBooks')
class AuthorsBooks(models.Model):
author = models.ForeignKey(Author)
book = models.ForeignKey(Book)
featured = models.BooleanField()
class State(models.Model):
name = models.CharField(max_length=15)
class City(models.Model):
state = models.ForeignKey(State)
class Influence(models.Model):
name = models.TextField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
| bsd-3-clause |
chiotlune/ext | gnuradio-3.7.0.1/gr-blocks/python/blocks/qa_block_gateway.py | 10 | 8214 | #
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import numpy
import pmt
from gnuradio import gr, gr_unittest, blocks
class add_2_f32_1_f32(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "add 2 f32",
in_sig = [numpy.float32, numpy.float32],
out_sig = [numpy.float32],
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0] + input_items[1]
return len(output_items[0])
class add_2_fc32_1_fc32(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "add 2 fc32",
in_sig = [numpy.complex64, numpy.complex64],
out_sig = [numpy.complex64],
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0] + input_items[1]
return len(output_items[0])
class convolve(gr.sync_block):
"""
A demonstration using block history to properly perform a convolution.
"""
def __init__(self):
gr.sync_block.__init__(
self,
name = "convolve",
in_sig = [numpy.float32],
out_sig = [numpy.float32]
)
self._taps = [1, 0, 0, 0]
self.set_history(len(self._taps))
def work(self, input_items, output_items):
output_items[0][:] = numpy.convolve(input_items[0], self._taps, mode='valid')
return len(output_items[0])
class decim2x(gr.decim_block):
def __init__(self):
gr.decim_block.__init__(
self,
name = "decim2x",
in_sig = [numpy.float32],
out_sig = [numpy.float32],
decim = 2
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0][::2]
return len(output_items[0])
class interp2x(gr.interp_block):
def __init__(self):
gr.interp_block.__init__(
self,
name = "interp2x",
in_sig = [numpy.float32],
out_sig = [numpy.float32],
interp = 2
)
def work(self, input_items, output_items):
output_items[0][1::2] = input_items[0]
output_items[0][::2] = input_items[0]
return len(output_items[0])
class tag_source(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "tag source",
in_sig = None,
out_sig = [numpy.float32],
)
def work(self, input_items, output_items):
num_output_items = len(output_items[0])
#put code here to fill the output items...
#make a new tag on the middle element every time work is called
count = self.nitems_written(0) + num_output_items/2
key = pmt.string_to_symbol("example_key")
value = pmt.string_to_symbol("example_value")
self.add_item_tag(0, count, key, value)
return num_output_items
class tag_sink(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "tag sink",
in_sig = [numpy.float32],
out_sig = None,
)
self.key = None
def work(self, input_items, output_items):
num_input_items = len(input_items[0])
#put code here to process the input items...
#print all the tags received in this work call
nread = self.nitems_read(0)
tags = self.get_tags_in_range(0, nread, nread+num_input_items)
for tag in tags:
#print tag.offset
#print pmt.symbol_to_string(tag.key)
#print pmt.symbol_to_string(tag.value)
self.key = pmt.symbol_to_string(tag.key)
return num_input_items
class fc32_to_f32_2(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "fc32_to_f32_2",
in_sig = [numpy.complex64],
out_sig = [(numpy.float32, 2)],
)
def work(self, input_items, output_items):
output_items[0][::,0] = numpy.real(input_items[0])
output_items[0][::,1] = numpy.imag(input_items[0])
return len(output_items[0])
class vector_to_stream(gr.interp_block):
def __init__(self, itemsize, nitems_per_block):
gr.interp_block.__init__(
self,
name = "vector_to_stream",
in_sig = [(itemsize, nitems_per_block)],
out_sig = [itemsize],
interp = nitems_per_block
)
self.block_size = nitems_per_block
def work(self, input_items, output_items):
n = 0
for i in xrange(len(input_items[0])):
for j in xrange(self.block_size):
output_items[0][n] = input_items[0][i][j]
n += 1
return len(output_items[0])
class test_block_gateway(gr_unittest.TestCase):
def test_add_f32(self):
tb = gr.top_block()
src0 = blocks.vector_source_f([1, 3, 5, 7, 9], False)
src1 = blocks.vector_source_f([0, 2, 4, 6, 8], False)
adder = add_2_f32_1_f32()
sink = blocks.vector_sink_f()
tb.connect((src0, 0), (adder, 0))
tb.connect((src1, 0), (adder, 1))
tb.connect(adder, sink)
tb.run()
self.assertEqual(sink.data(), (1, 5, 9, 13, 17))
def test_add_fc32(self):
tb = gr.top_block()
src0 = blocks.vector_source_c([1, 3j, 5, 7j, 9], False)
src1 = blocks.vector_source_c([0, 2j, 4, 6j, 8], False)
adder = add_2_fc32_1_fc32()
sink = blocks.vector_sink_c()
tb.connect((src0, 0), (adder, 0))
tb.connect((src1, 0), (adder, 1))
tb.connect(adder, sink)
tb.run()
self.assertEqual(sink.data(), (1, 5j, 9, 13j, 17))
def test_convolve(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 2, 3, 4, 5, 6, 7, 8], False)
cv = convolve()
sink = blocks.vector_sink_f()
tb.connect(src, cv, sink)
tb.run()
self.assertEqual(sink.data(), (1, 2, 3, 4, 5, 6, 7, 8))
def test_decim2x(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 2, 3, 4, 5, 6, 7, 8], False)
d2x = decim2x()
sink = blocks.vector_sink_f()
tb.connect(src, d2x, sink)
tb.run()
self.assertEqual(sink.data(), (1, 3, 5, 7))
def test_interp2x(self):
tb = gr.top_block()
src = blocks.vector_source_f([1, 3, 5, 7, 9], False)
i2x = interp2x()
sink = blocks.vector_sink_f()
tb.connect(src, i2x, sink)
tb.run()
self.assertEqual(sink.data(), (1, 1, 3, 3, 5, 5, 7, 7, 9, 9))
def test_tags(self):
src = tag_source()
sink = tag_sink()
head = blocks.head(gr.sizeof_float, 50000) #should be enough items to get a tag through
tb = gr.top_block()
tb.connect(src, head, sink)
tb.run()
self.assertEqual(sink.key, "example_key")
def test_fc32_to_f32_2(self):
tb = gr.top_block()
src = blocks.vector_source_c([1+2j, 3+4j, 5+6j, 7+8j, 9+10j], False)
convert = fc32_to_f32_2()
v2s = vector_to_stream(numpy.float32, 2)
sink = blocks.vector_sink_f()
tb.connect(src, convert, v2s, sink)
tb.run()
self.assertEqual(sink.data(), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
if __name__ == '__main__':
gr_unittest.run(test_block_gateway, "test_block_gateway.xml")
| gpl-2.0 |
MarekIgnaszak/econ-project-templates | .mywaflib/waflib/Tools/irixcc.py | 54 | 1350 | #! /usr/bin/env python
# encoding: utf-8
# imported from samba
"""
Compiler definition for irix/MIPSpro cc compiler
"""
from waflib import Errors
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_irixcc(conf):
v = conf.env
cc = None
if v.CC:
cc = v.CC
elif 'CC' in conf.environ:
cc = conf.environ['CC']
if not cc:
cc = conf.find_program('cc', var='CC')
if not cc:
conf.fatal('irixcc was not found')
try:
conf.cmd_and_log(cc + ['-version'])
except Errors.WafError:
conf.fatal('%r -version could not be executed' % cc)
v.CC = cc
v.CC_NAME = 'irix'
@conf
def irixcc_common_flags(conf):
v = conf.env
v.CC_SRC_F = ''
v.CC_TGT_F = ['-c', '-o']
v.CPPPATH_ST = '-I%s'
v.DEFINES_ST = '-D%s'
if not v.LINK_CC:
v.LINK_CC = v.CC
v.CCLNK_SRC_F = ''
v.CCLNK_TGT_F = ['-o']
v.LIB_ST = '-l%s' # template for adding libs
v.LIBPATH_ST = '-L%s' # template for adding libpaths
v.STLIB_ST = '-l%s'
v.STLIBPATH_ST = '-L%s'
v.cprogram_PATTERN = '%s'
v.cshlib_PATTERN = 'lib%s.so'
v.cstlib_PATTERN = 'lib%s.a'
def configure(conf):
conf.find_irixcc()
conf.find_cpp()
conf.find_ar()
conf.irixcc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| bsd-3-clause |
ct-23/home-assistant | homeassistant/components/switch/wake_on_lan.py | 8 | 3289 | """
Support for wake on lan.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.wake_on_lan/
"""
import logging
import platform
import subprocess as sp
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.const import (CONF_HOST, CONF_NAME)
REQUIREMENTS = ['wakeonlan==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_MAC_ADDRESS = 'mac_address'
CONF_OFF_ACTION = 'turn_off'
CONF_BROADCAST_ADDRESS = 'broadcast_address'
DEFAULT_NAME = 'Wake on LAN'
DEFAULT_PING_TIMEOUT = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC_ADDRESS): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_BROADCAST_ADDRESS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a wake on lan switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
mac_address = config.get(CONF_MAC_ADDRESS)
broadcast_address = config.get(CONF_BROADCAST_ADDRESS)
off_action = config.get(CONF_OFF_ACTION)
add_devices([WOLSwitch(hass, name, host, mac_address,
off_action, broadcast_address)], True)
class WOLSwitch(SwitchDevice):
"""Representation of a wake on lan switch."""
def __init__(self, hass, name, host, mac_address,
off_action, broadcast_address):
"""Initialize the WOL switch."""
from wakeonlan import wol
self._hass = hass
self._name = name
self._host = host
self._mac_address = mac_address
self._broadcast_address = broadcast_address
self._off_script = Script(hass, off_action) if off_action else None
self._state = False
self._wol = wol
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self):
"""Turn the device on."""
if self._broadcast_address:
self._wol.send_magic_packet(
self._mac_address, ip_address=self._broadcast_address)
else:
self._wol.send_magic_packet(self._mac_address)
def turn_off(self):
"""Turn the device off if an off action is present."""
if self._off_script is not None:
self._off_script.run()
def update(self):
"""Check if device is on and update the state."""
if platform.system().lower() == 'windows':
ping_cmd = ['ping', '-n', '1', '-w',
str(DEFAULT_PING_TIMEOUT * 1000), str(self._host)]
else:
ping_cmd = ['ping', '-c', '1', '-W',
str(DEFAULT_PING_TIMEOUT), str(self._host)]
status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
self._state = not bool(status)
| apache-2.0 |
golismero/golismero-devel | thirdparty_libs/shodan/api.py | 8 | 7427 | try:
from json import dumps, loads
except:
from simplejson import dumps, loads
try:
# Python 2
from urllib2 import urlopen
from urllib import urlencode
except:
# Python 3
from urllib.request import urlopen
from urllib.parse import urlencode
__all__ = ['WebAPI']
class WebAPIError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class WebAPI:
"""Wrapper around the SHODAN webservices API"""
class Exploits:
def __init__(self, parent):
self.parent = parent
def search(self, query, sources=[], cve=None, osvdb=None, msb=None, bid=None):
"""Search the entire Shodan Exploits archive using the same query syntax
as the website.
Arguments:
query -- exploit search query; same syntax as website
Optional arguments:
sources -- metasploit, cve, osvdb, exploitdb, or packetstorm
cve -- CVE identifier (ex. 2010-0432)
osvdb -- OSVDB identifier (ex. 11666)
msb -- Microsoft Security Bulletin ID (ex. MS05-030)
bid -- Bugtraq identifier (ex. 13951)
"""
if sources:
query += ' source:' + ','.join(sources)
if cve:
query += ' cve:%s' % (str(cve).strip())
if osvdb:
query += ' osvdb:%s' % (str(osvdb).strip())
if msb:
query += ' msb:%s' % (str(msb).strip())
if bid:
query += ' bid:%s' % (str(bid).strip())
return self.parent._request('search_exploits', {'q': query})
class ExploitDb:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download the exploit code from the ExploitDB archive.
Arguments:
id -- ID of the ExploitDB entry
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- Contents of the file
"""
return self.parent._request('exploitdb/download', {'id': id})
def search(self, query, **kwargs):
"""Search the ExploitDB archive.
Arguments:
query -- Search terms
Optional arguments:
author -- Name of the exploit submitter
platform -- Target platform (e.g. windows, linux, hardware etc.)
port -- Service port number
type -- Any, dos, local, papers, remote, shellcode and webapps
Returns:
A dictionary with 2 main items: matches (list) and total (int).
Each item in 'matches' is a dictionary with the following elements:
id
author
date
description
platform
port
type
"""
return self.parent._request('exploitdb/search', dict(q=query, **kwargs))
class Msf:
def __init__(self, parent):
self.parent = parent
def download(self, id):
"""Download a metasploit module given the fullname (id) of it.
Arguments:
id -- fullname of the module (ex. auxiliary/admin/backupexec/dump)
Returns:
A dictionary with the following fields:
filename -- Name of the file
content-type -- Mimetype
data -- File content
"""
return self.parent._request('msf/download', {'id': id})
def search(self, query, **kwargs):
"""Search for a Metasploit module.
"""
return self.parent._request('msf/search', dict(q=query, **kwargs))
def __init__(self, key):
"""Initializes the API object.
Arguments:
key -- your API key
"""
self.api_key = key
self.base_url = 'http://www.shodanhq.com/api/'
self.exploits = self.Exploits(self)
self.exploitdb = self.ExploitDb(self)
self.msf = self.Msf(self)
def _request(self, function, params):
"""General-purpose function to create web requests to SHODAN.
Arguments:
function -- name of the function you want to execute
params -- dictionary of parameters for the function
Returns
A JSON string containing the function's results.
"""
# Add the API key parameter automatically
params['key'] = self.api_key
# Send the request
data = urlopen(self.base_url + function + '?' + urlencode(params)).read().decode('utf-8')
# Parse the text into JSON
data = loads(data)
# Raise an exception if an error occurred
if data.get('error', None):
raise WebAPIError(data['error'])
# Return the data
return data
def count(self, query):
"""Returns the total number of search results for the query.
"""
return self._request('count', {'q': query})
def locations(self, query):
"""Return a break-down of all the countries and cities that the results for
the given search are located in.
"""
return self._request('locations', {'q': query})
def fingerprint(self, banner):
"""Determine the software based on the banner.
Arguments:
banner - HTTP banner
Returns:
A list of software that matched the given banner.
"""
return self._request('fingerprint', {'banner': banner})
def host(self, ip):
"""Get all available information on an IP.
Arguments:
ip -- IP of the computer
Returns:
All available information SHODAN has on the given IP,
subject to API key restrictions.
"""
return self._request('host', {'ip': ip})
def info(self):
"""Returns information about the current API key, such as a list of add-ons
and other features that are enabled for the current user's API plan.
"""
return self._request('info', {})
def search(self, query, page=1, limit=None, offset=None):
"""Search the SHODAN database.
Arguments:
query -- search query; identical syntax to the website
Optional arguments:
page -- page number of the search results
limit -- number of results to return
offset -- search offset to begin getting results from
Returns:
A dictionary with 3 main items: matches, countries and total.
Visit the website for more detailed information.
"""
args = {
'q': query,
'p': page,
}
if limit:
args['l'] = limit
if offset:
args['o'] = offset
return self._request('search', args)
| gpl-2.0 |
Lujeni/ansible | test/units/modules/network/fortios/test_fortios_router_prefix_list6.py | 21 | 7717 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_router_prefix_list6
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_router_prefix_list6.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_router_prefix_list6_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_prefix_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'prefix-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_prefix_list6_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_prefix_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'prefix-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_prefix_list6_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_prefix_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'prefix-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_router_prefix_list6_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'router_prefix_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
delete_method_mock.assert_called_with('router', 'prefix-list6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_router_prefix_list6_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_prefix_list6': {
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'prefix-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_router_prefix_list6_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'router_prefix_list6': {
'random_attribute_not_valid': 'tag',
'comments': 'test_value_3',
'name': 'default_name_4',
},
'vdom': 'root'}
is_error, changed, response = fortios_router_prefix_list6.fortios_router(input_data, fos_instance)
expected_data = {
'comments': 'test_value_3',
'name': 'default_name_4',
}
set_method_mock.assert_called_with('router', 'prefix-list6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
FATruden/boto | tests/integration/ec2/autoscale/__init__.py | 21 | 1099 | # Copyright (c) 2011 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
| mit |
benjello/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/transports/agregats_compte_transports/plot_transports_quantites.py | 4 | 2868 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 09:46:35 2015
@author: thomas.douenne
"""
# Ce script réalise des graphiques à partir des données des comptes des transports, i.e. nos agrégats de référence
# pour les transports : taille et composition du parc automobile, consommation des carburants, total des recettes
# issues de la TICPE
# Import de fonctions spécifiques à Openfisca indirect taxation et de bases de données des Comptes des Transports
from ipp_macro_series_parser.agregats_transports.transports_cleaner import a6_b, g2_1, g_3a
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_carburants, \
graph_builder_carburants_no_color
# Identification de la série des recettes de la TICPE
recettes_ticpe = a6_b[a6_b['categorie'] == u'TICPE ( TIPP avant 2010) (1)']
recettes_ticpe = recettes_ticpe[recettes_ticpe['index'] == 'Total']
del recettes_ticpe['index']
# Identification des séries des quantités de carburants consommées
g_3a.loc[g_3a['index'] == u'Voitures particulières', 'index'] = 'to_be_used'
g_3a.loc[g_3a['index'] == u'Total VP françaises', 'index'] = 'to_be_used'
quantite_conso_vp = g_3a[g_3a['index'] == 'to_be_used']
del quantite_conso_vp['index']
# Identification des séries décrivant la taille et composition du parc automobile
taille_parc_vp = g2_1[g2_1['categorie'] == u'Voitures particulières']
del taille_parc_vp['categorie']
# Redefinition des index et transposition des dataframes
recettes_ticpe.set_index(['categorie'], inplace = True)
recettes_ticpe = recettes_ticpe.transpose()
taille_parc_vp.set_index(['index'], inplace = True)
taille_parc_vp = taille_parc_vp.transpose()
quantite_conso_vp.set_index(['categorie'], inplace = True)
quantite_conso_vp = quantite_conso_vp.transpose()
# Changement de noms de variables pour être plus explicites
recettes_ticpe.rename(columns = {u'TICPE ( TIPP avant 2010) (1)': 'Total recettes TICPE'}, inplace = True)
taille_parc_vp.rename(columns = {'Total': 'parc vp total', 'dont Diesel': 'dont diesel'}, inplace = True)
quantite_conso_vp = quantite_conso_vp[['ESSENCE + GAZOLE'] + ['ESSENCE'] + ['GAZOLE']]
quantite_conso_vp.rename(columns = {'ESSENCE + GAZOLE': 'consommation totale', 'ESSENCE': 'consommation essence',
'GAZOLE': 'consommation diesel'}, inplace = True)
# Réalisation des graphiques
print 'Evolution de la taille du parc automobile au cours du temps'
graph_builder_carburants(taille_parc_vp, 'taille parc vp', 1, 0.3, 'blue', 'green', 'red', None)
print 'Evolution des quantités de carburants consommées par les voitures particulières françaises'
graph_builder_carburants(quantite_conso_vp, 'quantite conso vp', 0.85, 0.3, 'blue', 'green', 'red', None)
print 'Evolution des recettes totales issues de la TICPE'
graph_builder_carburants_no_color(recettes_ticpe, 'recettes totales ticpe', 1, 0.17)
| agpl-3.0 |
hachreak/invenio-ext | invenio_ext/sqlalchemy/engines/mysql.py | 5 | 3866 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Mysql dialect."""
import base64
import sqlalchemy
from sqlalchemy import types as types
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import TypeDecorator
@compiles(types.Text, 'sqlite')
@compiles(sqlalchemy.dialects.mysql.TEXT, 'sqlite')
def compile_text(element, compiler, **kw):
"""Redefine Text filed type for SQLite and MySQL."""
return 'TEXT'
@compiles(types.Binary, 'sqlite')
def compile_binary(element, compiler, **kw):
"""Redefine Binary filed type for SQLite."""
return 'BLOB'
@compiles(types.LargeBinary, 'sqlite')
def compile_largebinary(element, compiler, **kw):
"""Redefine LargeBinary filed type for SQLite."""
return 'LONGBLOB'
@compiles(types.Text, 'mysql')
@compiles(sqlalchemy.dialects.mysql.TEXT, 'mysql')
def compile_text(element, compiler, **kw):
"""Redefine Text filed type for MySQL."""
return 'TEXT'
@compiles(types.Binary, 'mysql')
def compile_binary(element, compiler, **kw):
"""Redefine Binary filed type for MySQL."""
return 'BLOB'
@compiles(types.LargeBinary, 'mysql')
def compile_largebinary(element, compiler, **kw):
"""Redefine LargeBinary filed type for MySQL."""
return 'LONGBLOB'
class iBinary(TypeDecorator):
"""Printable binary typea."""
impl = types.Binary
def __init__(self, *arg, **kw):
"""Init iBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
class iLargeBinary(TypeDecorator):
"""Printable large binary type."""
impl = types.LargeBinary
def __init__(self, *arg, **kw):
"""Init iLargeBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
class iMediumBinary(TypeDecorator):
"""Printable large binary type."""
impl = sqlalchemy.dialects.mysql.MEDIUMBLOB
def __init__(self, *arg, **kw):
"""Init iMediumBinary type."""
self.__class__.impl = self.impl
TypeDecorator.__init__(self, *arg, **kw)
def process_bind_param(self, value, dialect):
"""Decode string before saving to database."""
return (value is not None) and base64.decodestring(value) or None
def process_result_value(self, value, dialect):
"""Encode binary data to string."""
return (value is not None) and base64.encodestring(value) or None
| gpl-2.0 |
Sbalbp/DIRAC | RequestManagementSystem/Agent/RequestOperations/test/ForwardDISETTests.py | 19 | 2467 | ########################################################################
# $HeadURL $
# File: ForwardDISETTests.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/04/18 09:23:05
########################################################################
""" :mod: ForwardDISETTests
=======================
.. module: ForwardDISETTests
:synopsis: unittest for ForwardDISET handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
unittest for ForwardDISET handler
"""
__RCSID__ = "$Id $"
# #
# @file ForwardDISETTests.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/04/18 09:23:18
# @brief Definition of ForwardDISETTests class.
# # imports
import unittest
# # from DIRAC
from DIRAC.Core.Utilities import DEncode
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
# # SUT
from DIRAC.RequestManagementSystem.Agent.RequestOperations.ForwardDISET import ForwardDISET
########################################################################
class ForwardDISETTests( unittest.TestCase ):
"""
.. class:: ForwardDISETTests
"""
def setUp( self ):
""" test set up """
self.hiArgs = ( ( "RequestManagement/RequestManager",
{ "keepAliveLapse": 10, "timeout": 5 } ),
"foo",
( 12345, { "Hi": "There!" } ) )
self.req = Request( { "RequestName": "testRequest" } )
self.op = Operation( { "Type": "ForwardDISET",
"Arguments": DEncode.encode( self.hiArgs ) } )
self.req += self.op
def tearDown( self ):
""" tear down """
del self.hiArgs
del self.op
del self.req
def testCase( self ):
""" ctor and functionality """
forwardDISET = None
try:
forwardDISET = ForwardDISET()
except:
pass
self.assertEqual( isinstance( forwardDISET, ForwardDISET ), True, "construction error" )
forwardDISET.setOperation( self.op )
self.assertEqual( isinstance( forwardDISET.operation, Operation ), True, "setOperation error" )
call = forwardDISET()
# # should be failing right now
self.assertEqual( call["OK"], False, "call failed" )
if __name__ == "__main__":
testLoader = unittest.TestLoader()
forwardDISETTests = testLoader.loadTestsFromTestCase( ForwardDISETTests )
suite = unittest.TestSuite( [ forwardDISETTests ] )
unittest.TextTestRunner( verbosity = 3 ).run( suite )
| gpl-3.0 |
catb0t/input_constrain | test/tests.py | 2 | 4034 | #!/usr/bin/env python3
import unittest
import subprocess as sp
from sys import executable as py, maxsize as MAXSIZE, version_info as VERINFO
from os import getcwd, path
TESTMOD_INFO = ("pmlr", path.join(getcwd(), "..", "pmlr", "pmlr", "__main__.py"))
TEST_HELP = path.join(getcwd(), "..", "pmlr", "test", "_tests_helper.py")
if VERINFO.major == 2:
import imp
pmlr = imp.load_source(*TESTMOD_INFO)
elif VERINFO.major == 3:
if VERINFO.minor < 5:
from importlib.machinery import SourceFileLoader
pmlr = SourceFileLoader(*TESTMOD_INFO).load_module()
elif VERINFO.minor >= 5:
import importlib.util
spec = importlib.util.spec_from_file_location(*TESTMOD_INFO)
pmlr = importlib.util.module_from_spec(spec)
spec.loader.exec_module(pmlr)
else:
raise NotImplementedError("unsupported Python version: ", VERINFO.major)
class TestUtilsNoIO(unittest.TestCase):
"""io-less util tests"""
def test_parsenum(self):
"""test parsenum"""
nums = (-MAXSIZE, -122, -1, 0, 8, 88, 888880, MAXSIZE)
for elem in nums:
result = pmlr.util.parsenum(elem)
expect = MAXSIZE if elem < 0 else elem
self.assertEqual(result, expect)
def test_parsenum_complex(self):
"""test parsenum failure"""
with self.assertRaises(TypeError):
pmlr.util.parsenum(8j)
class TestKPressIO(unittest.TestCase):
def setUp(self):
pmlr.init()
def test_getch(self):
"""test getch (patience)"""
for ch in range(128):
p = sp.Popen(
[py, TEST_HELP, "getch"],
stdin=sp.PIPE,
stdout=sp.PIPE,
)
out, _ = p.communicate(input=bytes(chr(ch), "utf-8"))
self.assertEqual(
out, bytes('{}'.format(ch), "utf-8") # ewww
)
def test_read_keypress(self):
"""test readkey (patience)"""
specials = { # special cases
3: "KeyboardInterrupt",
4: "EOFError",
8: b'\x08\x088',
13: b'10',
27: b'\x08\x0827',
127: '',
}
for ch in range(128):
p = sp.Popen(
[py, TEST_HELP, "readkey"],
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE
)
out, err = p.communicate(input=bytes(chr(ch), "utf-8"))
if ch in specials.keys():
res = ( # magic
err
.decode("utf-8")
.strip()
.split("\n")[-1]
if ch not in (8, 13, 27)
else out
)
self.assertEqual(
specials[ch], res
)
else:
self.assertEqual(
out, bytes('{}'.format(ch), "utf-8")
)
def test_read_keypress_raw(self):
"""read raw keypress (patience)"""
specials = { # special cases
8: b'\x08\x088',
13: b'10',
27: b'\x08\x0827',
127: b'\x08\x08127',
}
for ch in range(128):
p = sp.Popen(
[py, TEST_HELP, "raw_readkey"],
stdin=sp.PIPE,
stdout=sp.PIPE,
stderr=sp.PIPE
)
out, err = p.communicate(input=bytes(chr(ch), "utf-8"))
if ch in specials.keys():
self.assertEqual(
specials[ch], out
)
else:
self.assertEqual(
out, bytes('{}'.format(ch), "utf-8")
)
if __name__ == '__main__':
from os import stat
try:
stat(TEST_HELP)
except FileNotFoundError as e:
print(e)
print("stat: cannot stat '{}': no such file or directory".format(TEST_HELP))
exit(2)
else:
unittest.main(verbosity = 3) | gpl-3.0 |
sysadminmatmoz/OCB | addons/account/tests/test_search.py | 47 | 3999 | from openerp.addons.account.tests.account_test_users import AccountTestUsers
class TestSearch(AccountTestUsers):
"""Tests for search on name_search (account.account)
The name search on account.account is quite complexe, make sure
we have all the correct results
"""
def test_name_search(self):
ac_ids = self.account_type_model.search([], limit=1)
self.atax = self.account_model.create(dict(
name="Tax Received",
code="X121",
user_type_id=ac_ids.id,
reconcile=True,
)).id, "X121 Tax Received"
self.apurchase = self.account_model.create(dict(
name="Purchased Stocks",
code="X1101",
user_type_id=ac_ids.id,
reconcile=True,
)).id, "X1101 Purchased Stocks"
self.asale = self.account_model.create(dict(
name="Product Sales",
code="XX200",
user_type_id=ac_ids.id,
reconcile=True,
)).id, "XX200 Product Sales"
self.all_ids = [self.atax[0], self.apurchase[0], self.asale[0]]
atax_ids = self.account_model.name_search(name="Tax", operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0]]), set([a[0] for a in atax_ids]), "name_search 'ilike Tax' should have returned Tax Received account only")
atax_ids = self.account_model.name_search(name="Tax", operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0], self.asale[0]]), set([a[0] for a in atax_ids]), "name_search 'not ilike Tax' should have returned all but Tax Received account")
apur_ids = self.account_model.name_search(name='Purchased Stocks', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.apurchase[0]]), set([a[0] for a in apur_ids]), "name_search 'ilike Purchased Stocks' should have returned Purchased Stocks account only")
apur_ids = self.account_model.name_search(name='Purchased Stocks', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.asale[0]]), set([a[0] for a in apur_ids]), "name_search 'not ilike X1101' should have returned all but Purchased Stocks account")
asale_ids = self.account_model.name_search(name='Product Sales', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike 200 Sales' should have returned Product Sales account only")
asale_ids = self.account_model.name_search(name='Product Sales', operator='not ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.atax[0], self.apurchase[0]]), set([a[0] for a in asale_ids]), "name_search 'not ilike 200 Sales' should have returned all but Product Sales account")
asale_ids = self.account_model.name_search(name='XX200', operator='ilike', args=[('id', 'in', self.all_ids)])
self.assertEqual(set([self.asale[0]]), set([a[0] for a in asale_ids]), "name_search 'ilike XX200' should have returned Product Sales account only")
def test_property_unset_search(self):
res_partner_model = self.env['res.partner']
account_payment_term_model = self.env['account.payment.term']
a_partner = res_partner_model.create({'name': 'test partner'})
a_payment_term = account_payment_term_model.create({'name': 'test payment term'})
partners = res_partner_model.search([('property_payment_term_id', '=', False), ('id', '=', a_partner.id)])
self.assertTrue(partners, "unset property field 'propety_payment_term' should have been found")
a_partner.write({'property_payment_term_id': a_payment_term})
partners = res_partner_model.search([('property_payment_term_id', '=', False), ('id', '=', a_partner.id)])
self.assertFalse(partners, "set property field 'propety_payment_term' should not have been found")
| agpl-3.0 |
huobaowangxi/scikit-learn | sklearn/metrics/tests/test_ranking.py | 75 | 40883 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
badpass/ansible-modules-core | source_control/git.py | 47 | 30762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP protocol address of the git repository.
dest:
required: true
description:
- Absolute path of where the repository should be checked out to.
This parameter is required, unless C(clone) is set to C(no)
This change was made in version 1.8.3. Prior to this version,
the C(dest) parameter was always required.
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
full 40-character I(SHA-1) hash, the literal string C(HEAD), a
branch name, or a tag name.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), adds the hostkey for the repo url if not already
added. If ssh_args contains "-o StrictHostKeyChecking=no",
this parameter is ignored.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be trusted in the GPG trustdb.
requirements:
- git (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git: repo=git://foosball.example.org/path/to/repo.git
dest=/srv/checkout
version=release-0.22
# Example read-write git checkout from github
- git: repo=ssh://git@github.com/mylogin/hello.git dest=/home/mylogin/hello
# Example just ensuring the repo checkout exists
- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no
# Example checkout a github repo and use refspec to fetch all pull requests
- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/*
'''
import re
import tempfile
def get_submodule_update_params(module, git_path, cwd):
#or: git submodule [--quiet] update [--init] [-N|--no-fetch]
#[-f|--force] [--rebase] [--reference <repository>] [--merge]
#[--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[','')
update_line = update_line.replace(']','')
update_line = update_line.replace('|',' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = """#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" $BASEOPTS "$@"
fi
"""
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = stdout.rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Unable to determine hashes of submodules')
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [ git_path, 'clone' ]
if bare:
cmd.append('--bare')
else:
cmd.extend([ '--origin', remote ])
if is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend([ '--branch', version ])
if depth:
cmd.extend([ '--depth', str(depth) ])
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status -s" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out)
for line in out.split('\n'):
branches.append(line.strip())
return branches
def get_tags(git_path, module, dest):
tags = []
cmd = '%s tag' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out)
for line in out.split('\n'):
tags.append(line.strip())
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for b in branches:
if b.startswith('* ') and 'no branch' in b:
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
f = open(os.path.join(repo_path, "HEAD"))
if is_not_a_branch(git_path, module, dest):
f.close()
f = open(os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD'))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
return branch
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])]
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
def fetch(git_path, module, repo, dest, version, remote, bare, refspec):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
else:
# unlike in bare mode, there's no way to combine the
# additional refspec with the default git fetch behavior,
# so use two commands
commands.append((fetch_str, [git_path, 'fetch', remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# add the submodule repo's hostkey
if line.strip().startswith('url'):
repo = line.split('=', 1)[1].strip()
if module.params['ssh_opts'] is not None:
if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
### FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s'
% (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [ git_path, 'submodule', 'sync' ]
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ]
else:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def switch_version(git_path, module, dest, remote, version, verify_commit):
cmd = ''
if version != 'HEAD':
if is_remote_branch(git_path, module, dest, remote, version):
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
else:
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch)
cmd = "%s reset --hard %s" % (git_path, remote)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version))
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch))
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
cmd = "%s verify-commit %s" % (git_path, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version)
return (rc, out, err)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(os.path.expanduser(dest))
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# make sure the key_file path is expanded for ~ and $HOME
if key_file is not None:
key_file = os.path.abspath(os.path.expanduser(key_file))
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = None
if key_file or ssh_opts:
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
# add the git repo's hostkey
if module.params['ssh_opts'] is not None:
if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
rc, out, err, status = (0, None, None, None)
before = None
local_mods = False
repo_updated = None
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
module.exit_json(changed=True, before=before, after=remote_head)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
repo_updated = True
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
before = get_version(module, git_path, dest)
module.exit_json(changed=False, before=before, after=before)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
before = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).")
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
# exit if already at desired sha version
set_remote_url(git_path, module, repo, dest, remote)
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
if before == remote_head:
if local_mods:
module.exit_json(changed=True, before=before, after=remote_head,
msg="Local modifications exist")
elif is_remote_tag(git_path, module, dest, repo, version):
# if the remote is a tag and we have the tag locally, exit early
if version in get_tags(git_path, module, dest):
repo_updated = False
else:
repo_updated = False
if repo_updated is None:
if module.check_mode:
module.exit_json(changed=True, before=before, after=remote_head)
fetch(git_path, module, repo, dest, version, remote, bare, refspec)
repo_updated = True
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if module.check_mode:
if submodules_updated:
module.exit_json(changed=True, before=before, after=remote_head, submodules_changed=True)
else:
module.exit_json(changed=False, before=before, after=remote_head)
if submodules_updated:
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules)
# determine if we changed anything
after = get_version(module, git_path, dest)
changed = False
if before != after or local_mods or submodules_updated:
changed = True
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.known_hosts import *
main()
| gpl-3.0 |
poiati/django | django/core/serializers/xml_serializer.py | 184 | 15662 | """
XML serializer.
"""
from __future__ import unicode_literals
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
model = obj._meta.proxy_for_model if obj._deferred else obj.__class__
attrs = OrderedDict([("model", smart_text(model._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", smart_text(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| bsd-3-clause |
mrquim/mrquimrepo | repo/script.module.youtube.dl/lib/youtube_dl/extractor/audimedia.py | 67 | 3924 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
sanitized_Request,
)
class AudiMediaIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?audi-mediacenter\.com/(?:en|de)/audimediatv/(?P<id>[^/?#]+)'
_TEST = {
'url': 'https://www.audi-mediacenter.com/en/audimediatv/60-seconds-of-audi-sport-104-2015-wec-bahrain-rookie-test-1467',
'md5': '79a8b71c46d49042609795ab59779b66',
'info_dict': {
'id': '1565',
'ext': 'mp4',
'title': '60 Seconds of Audi Sport 104/2015 - WEC Bahrain, Rookie Test',
'description': 'md5:60e5d30a78ced725f7b8d34370762941',
'upload_date': '20151124',
'timestamp': 1448354940,
'duration': 74022,
'view_count': int,
}
}
# extracted from https://audimedia.tv/assets/embed/embedded-player.js (dataSourceAuthToken)
_AUTH_TOKEN = 'e25b42847dba18c6c8816d5d8ce94c326e06823ebf0859ed164b3ba169be97f2'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
raw_payload = self._search_regex([
r'class="amtv-embed"[^>]+id="([^"]+)"',
r'class=\\"amtv-embed\\"[^>]+id=\\"([^"]+)\\"',
], webpage, 'raw payload')
_, stage_mode, video_id, lang = raw_payload.split('-')
# TODO: handle s and e stage_mode (live streams and ended live streams)
if stage_mode not in ('s', 'e'):
request = sanitized_Request(
'https://audimedia.tv/api/video/v1/videos/%s?embed[]=video_versions&embed[]=thumbnail_image&where[content_language_iso]=%s' % (video_id, lang),
headers={'X-Auth-Token': self._AUTH_TOKEN})
json_data = self._download_json(request, video_id)['results']
formats = []
stream_url_hls = json_data.get('stream_url_hls')
if stream_url_hls:
formats.extend(self._extract_m3u8_formats(
stream_url_hls, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
stream_url_hds = json_data.get('stream_url_hds')
if stream_url_hds:
formats.extend(self._extract_f4m_formats(
stream_url_hds + '?hdcore=3.4.0',
video_id, f4m_id='hds', fatal=False))
for video_version in json_data.get('video_versions'):
video_version_url = video_version.get('download_url') or video_version.get('stream_url')
if not video_version_url:
continue
f = {
'url': video_version_url,
'width': int_or_none(video_version.get('width')),
'height': int_or_none(video_version.get('height')),
'abr': int_or_none(video_version.get('audio_bitrate')),
'vbr': int_or_none(video_version.get('video_bitrate')),
}
bitrate = self._search_regex(r'(\d+)k', video_version_url, 'bitrate', default=None)
if bitrate:
f.update({
'format_id': 'http-%s' % bitrate,
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': json_data['title'],
'description': json_data.get('subtitle'),
'thumbnail': json_data.get('thumbnail_image', {}).get('file'),
'timestamp': parse_iso8601(json_data.get('publication_date')),
'duration': int_or_none(json_data.get('duration')),
'view_count': int_or_none(json_data.get('view_count')),
'formats': formats,
}
| gpl-2.0 |
zimmermanncode/ganymede | ganymede/__init__.py | 2 | 2851 | # ganymede
#
# Hacking Jupyter's atmosphere
#
# Copyright (C) 2015 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# ganymede is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ganymede is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ganymede. If not, see <http://www.gnu.org/licenses/>.
import sys
from base64 import b64encode
import json
from .__version__ import version as __version__
def load(shell=None, logo_src=None):
"""
Generate and return Ganymede HTML containing CSS and JavaScript
for modifying the Jupyter notebook web interface,
wrapped in a ``IPython.display.HTML`` object:
* Must be called with IPython `shell` instance as first argument.
* Optionally takes a custom `logo_src` value for the ``src=`` attribute
of Ganymede's HTML logo ``<img>`` element.
"""
# make sure that .static pkg gets reloaded on %reload_ext ganymede
# to recompile ganymede.coffee in development (non-installed) mode
sys.modules.pop('ganymede.static', None)
from ganymede.static import CSS, JS, TOUCH_PUNCH_JS, SVG
if logo_src is None:
# load Ganymede's default logo
logo_src = 'data:image/svg+xml;base64,%s' \
% b64encode(SVG.bytes()).decode('ascii')
# import locally to make this module importable in setup.py
# without further dependencies
from IPython.display import HTML
return HTML(u"""
<style id="ganymede-style" type="text/css">
{style}
</style>
<script type="text/javascript">
{touch_punch}
</script>
<script type="text/javascript">
{script}
window.ganymede = new Ganymede({logo_src});
</script>
<script type="text/javascript">
$('#ganymede-style').on('remove', function () {{
window.ganymede.unload();
}});
</script>
""".format(style=CSS.text('ascii'), script=JS.text('ascii'),
logo_src=json.dumps(logo_src),
touch_punch=TOUCH_PUNCH_JS.text('utf8')))
def load_ipython_extension(shell):
"""
Called from IPython on ``%load_ext ganymede`` and ``%reload_ext ganymede``
Calls :func:`ganymede.load` which does the actual magic.
"""
# import locally to make this module importable in setup.py
# without further dependencies
from IPython.display import display
display(load(shell))
| gpl-3.0 |
bjorncooley/rainforest_makers | spirit/utils/forms.py | 2 | 1756 | #-*- coding: utf-8 -*-
from django import forms
from django.utils.html import conditional_escape, mark_safe
from django.utils.encoding import smart_text
class NestedModelChoiceField(forms.ModelChoiceField):
"""A ModelChoiceField that groups parents and childrens"""
# TODO: subclass ModelChoiceIterator, remove _populate_choices()
def __init__(self, related_name, parent_field, label_field, *args, **kwargs):
"""
@related_name: related_name or "FOO_set"
@parent_field: ForeignKey('self') field, use 'name_id' to save some queries
@label_field: field for obj representation
"""
super(NestedModelChoiceField, self).__init__(*args, **kwargs)
self.related_name = related_name
self.parent_field = parent_field
self.label_field = label_field
self._populate_choices()
def _populate_choices(self):
# This is *hackish* but simpler than subclassing ModelChoiceIterator
choices = [("", self.empty_label), ]
kwargs = {self.parent_field: None, }
queryset = self.queryset.filter(**kwargs)\
.prefetch_related(self.related_name)
for parent in queryset:
choices.append((self.prepare_value(parent), self.label_from_instance(parent)))
choices.extend([(self.prepare_value(children), self.label_from_instance(children))
for children in getattr(parent, self.related_name).all()])
self.choices = choices
def label_from_instance(self, obj):
level_indicator = u""
if getattr(obj, self.parent_field):
level_indicator = u"--- "
return mark_safe(level_indicator + conditional_escape(smart_text(getattr(obj, self.label_field)))) | mit |
Superjom/models-1 | generate_sequence_by_rnn_lm/network_conf.py | 4 | 1897 | import paddle.v2 as paddle
def rnn_lm(vocab_dim,
emb_dim,
hidden_size,
stacked_rnn_num,
rnn_type="lstm",
is_infer=False):
"""
RNN language model definition.
:param vocab_dim: size of vocabulary.
:type vocab_dim: int
:param emb_dim: dimension of the embedding vector
:type emb_dim: int
:param rnn_type: the type of RNN cell.
:type rnn_type: int
:param hidden_size: number of hidden unit.
:type hidden_size: int
:param stacked_rnn_num: number of stacked rnn cell.
:type stacked_rnn_num: int
:return: cost and output layer of model.
:rtype: LayerOutput
"""
# input layers
input = paddle.layer.data(
name="input", type=paddle.data_type.integer_value_sequence(vocab_dim))
if not is_infer:
target = paddle.layer.data(
name="target",
type=paddle.data_type.integer_value_sequence(vocab_dim))
# embedding layer
input_emb = paddle.layer.embedding(input=input, size=emb_dim)
# rnn layer
if rnn_type == "lstm":
for i in range(stacked_rnn_num):
rnn_cell = paddle.networks.simple_lstm(
input=rnn_cell if i else input_emb, size=hidden_size)
elif rnn_type == "gru":
for i in range(stacked_rnn_num):
rnn_cell = paddle.networks.simple_gru(
input=rnn_cell if i else input_emb, size=hidden_size)
else:
raise Exception("rnn_type error!")
# fc(full connected) and output layer
output = paddle.layer.fc(input=[rnn_cell],
size=vocab_dim,
act=paddle.activation.Softmax())
if is_infer:
last_word = paddle.layer.last_seq(input=output)
return last_word
else:
cost = paddle.layer.classification_cost(input=output, label=target)
return cost
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.