repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-mac/lib-scriptpackages/StdSuites/AppleScript_Suite.py | 73 | 60956 | """Suite AppleScript Suite: Standard terms for AppleScript
Level 1, version 1
Generated from /Volumes/Sap/System Folder/Extensions/AppleScript
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'ascr'
class AppleScript_Suite_Events:
def _26_(self, _object, _attributes={}, **_arguments):
"""&: Concatenation
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'ccat'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _2a_(self, _object, _attributes={}, **_arguments):
"""*: Multiplication
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '* '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _2b_(self, _object, _attributes={}, **_arguments):
"""+: Addition
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '+ '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _2d_(self, _object, _attributes={}, **_arguments):
"""-: Subtraction
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '- '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _3c_(self, _object, _attributes={}, **_arguments):
"""<: Less than
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '< '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _3d_(self, _object, _attributes={}, **_arguments):
"""=: Equality
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '= '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _3e_(self, _object, _attributes={}, **_arguments):
""">: Greater than
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '> '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_Call_a5_subroutine = {
'at' : 'at ',
'from_' : 'from',
'for_' : 'for ',
'to' : 'to ',
'thru' : 'thru',
'through' : 'thgh',
'by' : 'by ',
'on' : 'on ',
'into' : 'into',
'onto' : 'onto',
'between' : 'btwn',
'against' : 'agst',
'out_of' : 'outo',
'instead_of' : 'isto',
'aside_from' : 'asdf',
'around' : 'arnd',
'beside' : 'bsid',
'beneath' : 'bnth',
'under' : 'undr',
'over' : 'over',
'above' : 'abve',
'below' : 'belw',
'apart_from' : 'aprt',
'about' : 'abou',
'since' : 'snce',
'given' : 'givn',
'with' : 'with',
'without' : 'wout',
}
def Call_a5_subroutine(self, _object=None, _attributes={}, **_arguments):
"""Call\xa5subroutine: A subroutine call
Required argument: anything
Keyword argument at: a preposition
Keyword argument from_: a preposition
Keyword argument for_: a preposition
Keyword argument to: a preposition
Keyword argument thru: a preposition
Keyword argument through: a preposition
Keyword argument by: a preposition
Keyword argument on: a preposition
Keyword argument into: a preposition
Keyword argument onto: a preposition
Keyword argument between: a preposition
Keyword argument against: a preposition
Keyword argument out_of: a preposition
Keyword argument instead_of: a preposition
Keyword argument aside_from: a preposition
Keyword argument around: a preposition
Keyword argument beside: a preposition
Keyword argument beneath: a preposition
Keyword argument under: a preposition
Keyword argument over: a preposition
Keyword argument above: a preposition
Keyword argument below: a preposition
Keyword argument apart_from: a preposition
Keyword argument about: a preposition
Keyword argument since: a preposition
Keyword argument given: parameter:value pairs, comma-separated
Keyword argument with: formal parameter set to true if matching actual parameter is provided
Keyword argument without: formal parameter set to false if matching actual parmeter is provided
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'psbr'
aetools.keysubst(_arguments, self._argmap_Call_a5_subroutine)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _5e_(self, _object, _attributes={}, **_arguments):
"""^: Exponentiation
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '^ '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def activate(self, _no_object=None, _attributes={}, **_arguments):
"""activate: Bring the targeted application program to the front
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'misc'
_subcode = 'actv'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def and_(self, _object, _attributes={}, **_arguments):
"""and: Logical conjunction
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'AND '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def as_(self, _object, _attributes={}, **_arguments):
"""as: Coercion
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'coer'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def contains(self, _object, _attributes={}, **_arguments):
"""contains: Containment
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'cont'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def div(self, _object, _attributes={}, **_arguments):
"""div: Quotient
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'div '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def end_tell(self, _no_object=None, _attributes={}, **_arguments):
"""end tell: Record or log an \xd4end tell\xd5 statement
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ascr'
_subcode = 'tend'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def ends_with(self, _object, _attributes={}, **_arguments):
"""ends with: Ends with
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'ends'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_error = {
'number' : 'errn',
'partial_result' : 'ptlr',
'from_' : 'erob',
'to' : 'errt',
}
def error(self, _object=None, _attributes={}, **_arguments):
"""error: Raise an error
Required argument: anything
Keyword argument number: an error number
Keyword argument partial_result: any partial result occurring before the error
Keyword argument from_: the object that caused the error
Keyword argument to: the desired class for a failed coercion
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ascr'
_subcode = 'err '
aetools.keysubst(_arguments, self._argmap_error)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def idle(self, _no_object=None, _attributes={}, **_arguments):
"""idle: Sent to a script application when it is idle
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: the number of seconds to wait for next idle event
"""
_code = 'misc'
_subcode = 'idle'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def launch(self, _no_object=None, _attributes={}, **_arguments):
"""launch: Start an application for scripting
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ascr'
_subcode = 'noop'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def log(self, _object, _attributes={}, **_arguments):
"""log: Cause a comment to be logged
Required argument: undocumented, typecode 'TEXT'
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ascr'
_subcode = 'cmnt'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def mod(self, _object, _attributes={}, **_arguments):
"""mod: Remainder
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'mod '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def negate(self, _object, _attributes={}, **_arguments):
"""negate: Numeric negation
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'neg '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def not_(self, _object, _attributes={}, **_arguments):
"""not: Logical negation
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'NOT '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def or_(self, _object, _attributes={}, **_arguments):
"""or: Logical disjunction
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'OR '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def start_log(self, _no_object=None, _attributes={}, **_arguments):
"""start log: Start event logging in the script editor
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ToyS'
_subcode = 'log1'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def starts_with(self, _object, _attributes={}, **_arguments):
"""starts with: Starts with
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = 'bgwt'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def stop_log(self, _no_object=None, _attributes={}, **_arguments):
"""stop log: Stop event logging in the script editor
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ToyS'
_subcode = 'log0'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def tell(self, _no_object=None, _attributes={}, **_arguments):
"""tell: Record or log a \xd4tell\xd5 statement
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'ascr'
_subcode = 'tell'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object is not None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _ad_(self, _object, _attributes={}, **_arguments):
"""\xad: Inequality
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '\xad '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _b2_(self, _object, _attributes={}, **_arguments):
"""\xb2: Less than or equal to
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '<= '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _b3_(self, _object, _attributes={}, **_arguments):
"""\xb3: Greater than or equal to
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '>= '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def _d6_(self, _object, _attributes={}, **_arguments):
"""\xd6: Division
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
Returns: anything
"""
_code = 'ascr'
_subcode = '/ '
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
class anything(aetools.ComponentItem):
"""anything - any class or reference """
want = '****'
class pictures(aetools.ComponentItem):
"""pictures - """
want = 'PICT'
picture = pictures
class styled_text(aetools.ComponentItem):
"""styled text - text with font, size, and style information """
want = 'STXT'
styled_text = styled_text
class strings(aetools.ComponentItem):
"""strings - """
want = 'TEXT'
string = strings
class alias(aetools.ComponentItem):
"""alias - a file on a disk or server. The file must exist when you check the syntax of your script. """
want = 'alis'
class _Prop_POSIX_path(aetools.NProperty):
"""POSIX path - the POSIX path of the file """
which = 'psxp'
want = 'TEXT'
aliases = alias
class April(aetools.ComponentItem):
"""April - the month of April """
want = 'apr '
class August(aetools.ComponentItem):
"""August - the month of August """
want = 'aug '
class booleans(aetools.ComponentItem):
"""booleans - """
want = 'bool'
boolean = booleans
class RGB_colors(aetools.ComponentItem):
"""RGB colors - """
want = 'cRGB'
RGB_color = RGB_colors
class application(aetools.ComponentItem):
"""application - specifies global properties of AppleScript """
want = 'capp'
class _Prop_AppleScript(aetools.NProperty):
"""AppleScript - the top-level script object """
which = 'ascr'
want = 'scpt'
AppleScript = _Prop_AppleScript()
class _Prop_days(aetools.NProperty):
"""days - the number of seconds in a day """
which = 'days'
want = 'long'
days = _Prop_days()
class _Prop_hours(aetools.NProperty):
"""hours - the number of seconds in an hour """
which = 'hour'
want = 'long'
hours = _Prop_hours()
class _Prop_minutes(aetools.NProperty):
"""minutes - the number of seconds in a minute """
which = 'min '
want = 'long'
minutes = _Prop_minutes()
class _Prop_pi(aetools.NProperty):
"""pi - the constant pi """
which = 'pi '
want = 'doub'
pi = _Prop_pi()
class _Prop_print_depth(aetools.NProperty):
"""print depth - the maximum depth to print """
which = 'prdp'
want = 'long'
print_depth = _Prop_print_depth()
class _Prop_print_length(aetools.NProperty):
"""print length - the maximum length to print """
which = 'prln'
want = 'long'
print_length = _Prop_print_length()
class _Prop_result(aetools.NProperty):
"""result - the last result of evaluation """
which = 'rslt'
want = '****'
result = _Prop_result()
class _Prop_return_(aetools.NProperty):
"""return - a return character """
which = 'ret '
want = 'cha '
return_ = _Prop_return_()
class _Prop_space(aetools.NProperty):
"""space - a space character """
which = 'spac'
want = 'cha '
space = _Prop_space()
class _Prop_tab(aetools.NProperty):
"""tab - a tab character """
which = 'tab '
want = 'cha '
tab = _Prop_tab()
class _Prop_text_item_delimiters(aetools.NProperty):
"""text item delimiters - the text item delimiters of a string """
which = 'txdl'
want = 'list'
text_item_delimiters = _Prop_text_item_delimiters()
class _Prop_weeks(aetools.NProperty):
"""weeks - the number of seconds in a week """
which = 'week'
want = 'long'
weeks = _Prop_weeks()
applications = application
app = application
class upper_case(aetools.ComponentItem):
"""upper case - Text with lower case converted to upper case """
want = 'case'
class cubic_centimeters(aetools.ComponentItem):
"""cubic centimeters - a volume measurement in SI cubic centimeters """
want = 'ccmt'
cubic_centimetres = cubic_centimeters
class cubic_feet(aetools.ComponentItem):
"""cubic feet - a volume measurement in Imperial cubic feet """
want = 'cfet'
class characters(aetools.ComponentItem):
"""characters - """
want = 'cha '
character = characters
class writing_code_info(aetools.ComponentItem):
"""writing code info - script code and language code of text run """
want = 'citl'
class _Prop_language_code(aetools.NProperty):
"""language code - the language code for the text """
which = 'plcd'
want = 'shor'
class _Prop_script_code(aetools.NProperty):
"""script code - the script code for the text """
which = 'pscd'
want = 'shor'
writing_code_infos = writing_code_info
class text_items(aetools.ComponentItem):
"""text items - """
want = 'citm'
text_item = text_items
class cubic_meters(aetools.ComponentItem):
"""cubic meters - a volume measurement in SI cubic meters """
want = 'cmet'
cubic_metres = cubic_meters
class centimeters(aetools.ComponentItem):
"""centimeters - a distance measurement in SI centimeters """
want = 'cmtr'
centimetres = centimeters
class item(aetools.ComponentItem):
"""item - An item of any type """
want = 'cobj'
class _Prop_id(aetools.NProperty):
"""id - the unique ID number of this object """
which = 'ID '
want = 'long'
items = item
class C_strings(aetools.ComponentItem):
"""C strings - """
want = 'cstr'
C_string = C_strings
class text(aetools.ComponentItem):
"""text - text with language and style information """
want = 'ctxt'
class cubic_inches(aetools.ComponentItem):
"""cubic inches - a volume measurement in Imperial cubic inches """
want = 'cuin'
class cubic_yards(aetools.ComponentItem):
"""cubic yards - a distance measurement in Imperial cubic yards """
want = 'cyrd'
class December(aetools.ComponentItem):
"""December - the month of December """
want = 'dec '
class degrees_Celsius(aetools.ComponentItem):
"""degrees Celsius - a temperature measurement in SI degrees Celsius """
want = 'degc'
class degrees_Fahrenheit(aetools.ComponentItem):
"""degrees Fahrenheit - a temperature measurement in degrees Fahrenheit """
want = 'degf'
class degrees_Kelvin(aetools.ComponentItem):
"""degrees Kelvin - a temperature measurement in degrees Kelvin """
want = 'degk'
class reals(aetools.ComponentItem):
"""reals - """
want = 'doub'
real = reals
class encoded_strings(aetools.ComponentItem):
"""encoded strings - """
want = 'encs'
encoded_string = encoded_strings
class constants(aetools.ComponentItem):
"""constants - """
want = 'enum'
constant = constants
class events(aetools.ComponentItem):
"""events - """
want = 'evnt'
event = events
class February(aetools.ComponentItem):
"""February - the month of February """
want = 'feb '
class feet(aetools.ComponentItem):
"""feet - a distance measurement in Imperial feet """
want = 'feet'
class Friday(aetools.ComponentItem):
"""Friday - Friday """
want = 'fri '
class file_specification(aetools.ComponentItem):
"""file specification - a file specification as used by the operating system """
want = 'fss '
file_specifications = file_specification
class gallons(aetools.ComponentItem):
"""gallons - a volume measurement in Imperial gallons """
want = 'galn'
class grams(aetools.ComponentItem):
"""grams - a mass measurement in SI meters """
want = 'gram'
class handlers(aetools.ComponentItem):
"""handlers - """
want = 'hand'
handler = handlers
class inches(aetools.ComponentItem):
"""inches - a distance measurement in Imperial inches """
want = 'inch'
class international_text(aetools.ComponentItem):
"""international text - text that begins with a writing code """
want = 'itxt'
international_text = international_text
class January(aetools.ComponentItem):
"""January - the month of January """
want = 'jan '
class July(aetools.ComponentItem):
"""July - the month of July """
want = 'jul '
class June(aetools.ComponentItem):
"""June - the month of June """
want = 'jun '
class reference_forms(aetools.ComponentItem):
"""reference forms - """
want = 'kfrm'
reference_form = reference_forms
class kilograms(aetools.ComponentItem):
"""kilograms - a mass measurement in SI kilograms """
want = 'kgrm'
class kilometers(aetools.ComponentItem):
"""kilometers - a distance measurement in SI kilometers """
want = 'kmtr'
kilometres = kilometers
class keystroke(aetools.ComponentItem):
"""keystroke - a press of a key combination on a Macintosh keyboard """
want = 'kprs'
class _Prop_key(aetools.NProperty):
"""key - the character for the key was pressed (ignoring modifiers) """
which = 'kMsg'
want = 'cha '
class _Prop_key_kind(aetools.NProperty):
"""key kind - the kind of key that was pressed """
which = 'kknd'
want = 'ekst'
class _Prop_modifiers(aetools.NProperty):
"""modifiers - the modifier keys pressed in combination """
which = 'kMod'
want = 'eMds'
keystrokes = keystroke
class pounds(aetools.ComponentItem):
"""pounds - a weight measurement in SI meters """
want = 'lbs '
class date(aetools.ComponentItem):
"""date - Absolute date and time values """
want = 'ldt '
class _Prop_date_string(aetools.NProperty):
"""date string - the date portion of a date-time value as text """
which = 'dstr'
want = 'TEXT'
class _Prop_day(aetools.NProperty):
"""day - the day of the month of a date """
which = 'day '
want = 'long'
class _Prop_month(aetools.NProperty):
"""month - the month of a date """
which = 'mnth'
want = 'mnth'
class _Prop_time(aetools.NProperty):
"""time - the time since midnight of a date """
which = 'time'
want = 'long'
class _Prop_time_string(aetools.NProperty):
"""time string - the time portion of a date-time value as text """
which = 'tstr'
want = 'TEXT'
class _Prop_weekday(aetools.NProperty):
"""weekday - the day of a week of a date """
which = 'wkdy'
want = 'wkdy'
class _Prop_year(aetools.NProperty):
"""year - the year of a date """
which = 'year'
want = 'long'
dates = date
class list(aetools.ComponentItem):
"""list - An ordered collection of items """
want = 'list'
class _Prop_length(aetools.NProperty):
"""length - the length of a list """
which = 'leng'
want = 'long'
class _Prop_rest(aetools.NProperty):
"""rest - all items of the list excluding first """
which = 'rest'
want = 'list'
class _Prop_reverse(aetools.NProperty):
"""reverse - the items of the list in reverse order """
which = 'rvse'
want = 'list'
lists = list
class liters(aetools.ComponentItem):
"""liters - a volume measurement in SI liters """
want = 'litr'
litres = liters
class linked_list(aetools.ComponentItem):
"""linked list - An ordered collection of items """
want = 'llst'
linked_lists = linked_list
class integers(aetools.ComponentItem):
"""integers - """
want = 'long'
integer = integers
class list_or_record(aetools.ComponentItem):
"""list or record - a list or record """
want = 'lr '
class list_2c__record_or_text(aetools.ComponentItem):
"""list, record or text - a list, record or text """
want = 'lrs '
class list_or_string(aetools.ComponentItem):
"""list or string - a list or string """
want = 'ls '
class machines(aetools.ComponentItem):
"""machines - """
want = 'mach'
machine = machines
class March(aetools.ComponentItem):
"""March - the month of March """
want = 'mar '
class May(aetools.ComponentItem):
"""May - the month of May """
want = 'may '
class meters(aetools.ComponentItem):
"""meters - a distance measurement in SI meters """
want = 'metr'
metres = meters
class miles(aetools.ComponentItem):
"""miles - a distance measurement in Imperial miles """
want = 'mile'
class months(aetools.ComponentItem):
"""months - """
want = 'mnth'
month = months
class Monday(aetools.ComponentItem):
"""Monday - Monday """
want = 'mon '
class missing_values(aetools.ComponentItem):
"""missing values - """
want = 'msng'
missing_value = missing_values
class number_or_date(aetools.ComponentItem):
"""number or date - a number or date """
want = 'nd '
class number_2c__date_or_text(aetools.ComponentItem):
"""number, date or text - a number, date or text """
want = 'nds '
class numbers(aetools.ComponentItem):
"""numbers - """
want = 'nmbr'
number = numbers
class November(aetools.ComponentItem):
"""November - the month of November """
want = 'nov '
class number_or_string(aetools.ComponentItem):
"""number or string - a number or string """
want = 'ns '
class references(aetools.ComponentItem):
"""references - """
want = 'obj '
reference = references
class October(aetools.ComponentItem):
"""October - the month of October """
want = 'oct '
class ounces(aetools.ComponentItem):
"""ounces - a weight measurement in SI meters """
want = 'ozs '
class class_(aetools.ComponentItem):
"""class - the type of a value """
want = 'pcls'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - inherits some of its properties from this class """
which = 'c@#^'
want = 'type'
classes = class_
class prepositions(aetools.ComponentItem):
"""prepositions - """
want = 'prep'
preposition = prepositions
class properties(aetools.ComponentItem):
"""properties - """
want = 'prop'
property = properties
class writing_code(aetools.ComponentItem):
"""writing code - codes that identify the language and script system """
want = 'psct'
class Pascal_strings(aetools.ComponentItem):
"""Pascal strings - """
want = 'pstr'
Pascal_string = Pascal_strings
class quarts(aetools.ComponentItem):
"""quarts - a volume measurement in Imperial quarts """
want = 'qrts'
class data(aetools.ComponentItem):
"""data - an AppleScript raw data object """
want = 'rdat'
class records(aetools.ComponentItem):
"""records - """
want = 'reco'
record = records
class Saturday(aetools.ComponentItem):
"""Saturday - Saturday """
want = 'sat '
class seconds(aetools.ComponentItem):
"""seconds - more than one second """
want = 'scnd'
class script(aetools.ComponentItem):
"""script - An AppleScript script """
want = 'scpt'
class _Prop_name(aetools.NProperty):
"""name - the name of the script """
which = 'pnam'
want = 'TEXT'
class _Prop_parent(aetools.NProperty):
"""parent - its parent, i.e. the script that will handle events that this script doesn\xd5t """
which = 'pare'
want = 'scpt'
scripts = script
class September(aetools.ComponentItem):
"""September - the month of September """
want = 'sep '
class alias_or_string(aetools.ComponentItem):
"""alias or string - an alias or string """
want = 'sf '
class sounds(aetools.ComponentItem):
"""sounds - """
want = 'snd '
sound = sounds
class square_feet(aetools.ComponentItem):
"""square feet - an area measurement in Imperial square feet """
want = 'sqft'
class square_kilometers(aetools.ComponentItem):
"""square kilometers - an area measurement in SI square kilometers """
want = 'sqkm'
square_kilometres = square_kilometers
class square_miles(aetools.ComponentItem):
"""square miles - an area measurement in Imperial square miles """
want = 'sqmi'
class square_meters(aetools.ComponentItem):
"""square meters - an area measurement in SI square meters """
want = 'sqrm'
square_metres = square_meters
class square_yards(aetools.ComponentItem):
"""square yards - an area measurement in Imperial square yards """
want = 'sqyd'
class styled_Clipboard_text(aetools.ComponentItem):
"""styled Clipboard text - clipboard text with font, size, and style information """
want = 'styl'
styled_Clipboard_text = styled_Clipboard_text
class Sunday(aetools.ComponentItem):
"""Sunday - Sunday """
want = 'sun '
class styled_Unicode_text(aetools.ComponentItem):
"""styled Unicode text - styled text in the Unicode format """
want = 'sutx'
styled_Unicode_text = styled_Unicode_text
class Thursday(aetools.ComponentItem):
"""Thursday - Thursday """
want = 'thu '
class Tuesday(aetools.ComponentItem):
"""Tuesday - Tuesday """
want = 'tue '
class type_class(aetools.ComponentItem):
"""type class - the name of a particular class (or any four-character code) """
want = 'type'
class empty_ae_name_(aetools.ComponentItem):
""" - the undefined value """
want = 'undf'
class Unicode_text(aetools.ComponentItem):
"""Unicode text - text in the Unicode format (cannot be viewed without conversion) """
want = 'utxt'
Unicode_text = Unicode_text
class vector(aetools.ComponentItem):
"""vector - An ordered collection of items """
want = 'vect'
vectors = vector
class version(aetools.ComponentItem):
"""version - a version value """
want = 'vers'
class Wednesday(aetools.ComponentItem):
"""Wednesday - Wednesday """
want = 'wed '
class weekdays(aetools.ComponentItem):
"""weekdays - """
want = 'wkdy'
weekday = weekdays
class yards(aetools.ComponentItem):
"""yards - a distance measurement in Imperial yards """
want = 'yard'
class zones(aetools.ComponentItem):
"""zones - """
want = 'zone'
zone = zones
anything._superclassnames = []
anything._privpropdict = {
}
anything._privelemdict = {
}
pictures._superclassnames = []
pictures._privpropdict = {
}
pictures._privelemdict = {
}
styled_text._superclassnames = []
styled_text._privpropdict = {
}
styled_text._privelemdict = {
}
styled_text._superclassnames = []
styled_text._privpropdict = {
}
styled_text._privelemdict = {
}
strings._superclassnames = []
strings._privpropdict = {
}
strings._privelemdict = {
}
alias._superclassnames = []
alias._privpropdict = {
'POSIX_path' : _Prop_POSIX_path,
}
alias._privelemdict = {
}
April._superclassnames = []
April._privpropdict = {
}
April._privelemdict = {
}
August._superclassnames = []
August._privpropdict = {
}
August._privelemdict = {
}
booleans._superclassnames = []
booleans._privpropdict = {
}
booleans._privelemdict = {
}
RGB_colors._superclassnames = []
RGB_colors._privpropdict = {
}
RGB_colors._privelemdict = {
}
application._superclassnames = []
application._privpropdict = {
'AppleScript' : _Prop_AppleScript,
'days' : _Prop_days,
'hours' : _Prop_hours,
'minutes' : _Prop_minutes,
'pi' : _Prop_pi,
'print_depth' : _Prop_print_depth,
'print_length' : _Prop_print_length,
'result' : _Prop_result,
'return_' : _Prop_return_,
'space' : _Prop_space,
'tab' : _Prop_tab,
'text_item_delimiters' : _Prop_text_item_delimiters,
'weeks' : _Prop_weeks,
}
application._privelemdict = {
}
upper_case._superclassnames = []
upper_case._privpropdict = {
}
upper_case._privelemdict = {
}
cubic_centimeters._superclassnames = []
cubic_centimeters._privpropdict = {
}
cubic_centimeters._privelemdict = {
}
cubic_feet._superclassnames = []
cubic_feet._privpropdict = {
}
cubic_feet._privelemdict = {
}
characters._superclassnames = []
characters._privpropdict = {
}
characters._privelemdict = {
}
writing_code_info._superclassnames = []
writing_code_info._privpropdict = {
'language_code' : _Prop_language_code,
'script_code' : _Prop_script_code,
}
writing_code_info._privelemdict = {
}
text_items._superclassnames = []
text_items._privpropdict = {
}
text_items._privelemdict = {
}
cubic_meters._superclassnames = []
cubic_meters._privpropdict = {
}
cubic_meters._privelemdict = {
}
centimeters._superclassnames = []
centimeters._privpropdict = {
}
centimeters._privelemdict = {
}
item._superclassnames = []
item._privpropdict = {
'id' : _Prop_id,
}
item._privelemdict = {
}
C_strings._superclassnames = []
C_strings._privpropdict = {
}
C_strings._privelemdict = {
}
text._superclassnames = []
text._privpropdict = {
}
text._privelemdict = {
}
cubic_inches._superclassnames = []
cubic_inches._privpropdict = {
}
cubic_inches._privelemdict = {
}
cubic_yards._superclassnames = []
cubic_yards._privpropdict = {
}
cubic_yards._privelemdict = {
}
December._superclassnames = []
December._privpropdict = {
}
December._privelemdict = {
}
degrees_Celsius._superclassnames = []
degrees_Celsius._privpropdict = {
}
degrees_Celsius._privelemdict = {
}
degrees_Fahrenheit._superclassnames = []
degrees_Fahrenheit._privpropdict = {
}
degrees_Fahrenheit._privelemdict = {
}
degrees_Kelvin._superclassnames = []
degrees_Kelvin._privpropdict = {
}
degrees_Kelvin._privelemdict = {
}
reals._superclassnames = []
reals._privpropdict = {
}
reals._privelemdict = {
}
encoded_strings._superclassnames = []
encoded_strings._privpropdict = {
}
encoded_strings._privelemdict = {
}
constants._superclassnames = []
constants._privpropdict = {
}
constants._privelemdict = {
}
events._superclassnames = []
events._privpropdict = {
}
events._privelemdict = {
}
February._superclassnames = []
February._privpropdict = {
}
February._privelemdict = {
}
feet._superclassnames = []
feet._privpropdict = {
}
feet._privelemdict = {
}
Friday._superclassnames = []
Friday._privpropdict = {
}
Friday._privelemdict = {
}
file_specification._superclassnames = []
file_specification._privpropdict = {
'POSIX_path' : _Prop_POSIX_path,
}
file_specification._privelemdict = {
}
gallons._superclassnames = []
gallons._privpropdict = {
}
gallons._privelemdict = {
}
grams._superclassnames = []
grams._privpropdict = {
}
grams._privelemdict = {
}
handlers._superclassnames = []
handlers._privpropdict = {
}
handlers._privelemdict = {
}
inches._superclassnames = []
inches._privpropdict = {
}
inches._privelemdict = {
}
international_text._superclassnames = []
international_text._privpropdict = {
}
international_text._privelemdict = {
}
international_text._superclassnames = []
international_text._privpropdict = {
}
international_text._privelemdict = {
}
January._superclassnames = []
January._privpropdict = {
}
January._privelemdict = {
}
July._superclassnames = []
July._privpropdict = {
}
July._privelemdict = {
}
June._superclassnames = []
June._privpropdict = {
}
June._privelemdict = {
}
reference_forms._superclassnames = []
reference_forms._privpropdict = {
}
reference_forms._privelemdict = {
}
kilograms._superclassnames = []
kilograms._privpropdict = {
}
kilograms._privelemdict = {
}
kilometers._superclassnames = []
kilometers._privpropdict = {
}
kilometers._privelemdict = {
}
keystroke._superclassnames = []
keystroke._privpropdict = {
'key' : _Prop_key,
'key_kind' : _Prop_key_kind,
'modifiers' : _Prop_modifiers,
}
keystroke._privelemdict = {
}
pounds._superclassnames = []
pounds._privpropdict = {
}
pounds._privelemdict = {
}
date._superclassnames = []
date._privpropdict = {
'date_string' : _Prop_date_string,
'day' : _Prop_day,
'month' : _Prop_month,
'time' : _Prop_time,
'time_string' : _Prop_time_string,
'weekday' : _Prop_weekday,
'year' : _Prop_year,
}
date._privelemdict = {
}
list._superclassnames = []
list._privpropdict = {
'length' : _Prop_length,
'rest' : _Prop_rest,
'reverse' : _Prop_reverse,
}
list._privelemdict = {
}
liters._superclassnames = []
liters._privpropdict = {
}
liters._privelemdict = {
}
linked_list._superclassnames = []
linked_list._privpropdict = {
'length' : _Prop_length,
}
linked_list._privelemdict = {
}
integers._superclassnames = []
integers._privpropdict = {
}
integers._privelemdict = {
}
list_or_record._superclassnames = []
list_or_record._privpropdict = {
}
list_or_record._privelemdict = {
}
list_2c__record_or_text._superclassnames = []
list_2c__record_or_text._privpropdict = {
}
list_2c__record_or_text._privelemdict = {
}
list_or_string._superclassnames = []
list_or_string._privpropdict = {
}
list_or_string._privelemdict = {
}
machines._superclassnames = []
machines._privpropdict = {
}
machines._privelemdict = {
}
March._superclassnames = []
March._privpropdict = {
}
March._privelemdict = {
}
May._superclassnames = []
May._privpropdict = {
}
May._privelemdict = {
}
meters._superclassnames = []
meters._privpropdict = {
}
meters._privelemdict = {
}
miles._superclassnames = []
miles._privpropdict = {
}
miles._privelemdict = {
}
months._superclassnames = []
months._privpropdict = {
}
months._privelemdict = {
}
Monday._superclassnames = []
Monday._privpropdict = {
}
Monday._privelemdict = {
}
missing_values._superclassnames = []
missing_values._privpropdict = {
}
missing_values._privelemdict = {
}
number_or_date._superclassnames = []
number_or_date._privpropdict = {
}
number_or_date._privelemdict = {
}
number_2c__date_or_text._superclassnames = []
number_2c__date_or_text._privpropdict = {
}
number_2c__date_or_text._privelemdict = {
}
numbers._superclassnames = []
numbers._privpropdict = {
}
numbers._privelemdict = {
}
November._superclassnames = []
November._privpropdict = {
}
November._privelemdict = {
}
number_or_string._superclassnames = []
number_or_string._privpropdict = {
}
number_or_string._privelemdict = {
}
references._superclassnames = []
references._privpropdict = {
}
references._privelemdict = {
}
October._superclassnames = []
October._privpropdict = {
}
October._privelemdict = {
}
ounces._superclassnames = []
ounces._privpropdict = {
}
ounces._privelemdict = {
}
class_._superclassnames = ['type_class']
class_._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
}
class_._privelemdict = {
}
prepositions._superclassnames = []
prepositions._privpropdict = {
}
prepositions._privelemdict = {
}
properties._superclassnames = []
properties._privpropdict = {
}
properties._privelemdict = {
}
writing_code._superclassnames = []
writing_code._privpropdict = {
}
writing_code._privelemdict = {
}
Pascal_strings._superclassnames = []
Pascal_strings._privpropdict = {
}
Pascal_strings._privelemdict = {
}
quarts._superclassnames = []
quarts._privpropdict = {
}
quarts._privelemdict = {
}
data._superclassnames = []
data._privpropdict = {
}
data._privelemdict = {
}
records._superclassnames = []
records._privpropdict = {
}
records._privelemdict = {
}
Saturday._superclassnames = []
Saturday._privpropdict = {
}
Saturday._privelemdict = {
}
seconds._superclassnames = []
seconds._privpropdict = {
}
seconds._privelemdict = {
}
script._superclassnames = []
script._privpropdict = {
'name' : _Prop_name,
'parent' : _Prop_parent,
}
script._privelemdict = {
}
September._superclassnames = []
September._privpropdict = {
}
September._privelemdict = {
}
alias_or_string._superclassnames = []
alias_or_string._privpropdict = {
}
alias_or_string._privelemdict = {
}
sounds._superclassnames = []
sounds._privpropdict = {
}
sounds._privelemdict = {
}
square_feet._superclassnames = []
square_feet._privpropdict = {
}
square_feet._privelemdict = {
}
square_kilometers._superclassnames = []
square_kilometers._privpropdict = {
}
square_kilometers._privelemdict = {
}
square_miles._superclassnames = []
square_miles._privpropdict = {
}
square_miles._privelemdict = {
}
square_meters._superclassnames = []
square_meters._privpropdict = {
}
square_meters._privelemdict = {
}
square_yards._superclassnames = []
square_yards._privpropdict = {
}
square_yards._privelemdict = {
}
styled_Clipboard_text._superclassnames = []
styled_Clipboard_text._privpropdict = {
}
styled_Clipboard_text._privelemdict = {
}
styled_Clipboard_text._superclassnames = []
styled_Clipboard_text._privpropdict = {
}
styled_Clipboard_text._privelemdict = {
}
Sunday._superclassnames = []
Sunday._privpropdict = {
}
Sunday._privelemdict = {
}
styled_Unicode_text._superclassnames = []
styled_Unicode_text._privpropdict = {
}
styled_Unicode_text._privelemdict = {
}
styled_Unicode_text._superclassnames = []
styled_Unicode_text._privpropdict = {
}
styled_Unicode_text._privelemdict = {
}
Thursday._superclassnames = []
Thursday._privpropdict = {
}
Thursday._privelemdict = {
}
Tuesday._superclassnames = []
Tuesday._privpropdict = {
}
Tuesday._privelemdict = {
}
type_class._superclassnames = []
type_class._privpropdict = {
}
type_class._privelemdict = {
}
empty_ae_name_._superclassnames = []
empty_ae_name_._privpropdict = {
}
empty_ae_name_._privelemdict = {
}
Unicode_text._superclassnames = []
Unicode_text._privpropdict = {
}
Unicode_text._privelemdict = {
}
Unicode_text._superclassnames = []
Unicode_text._privpropdict = {
}
Unicode_text._privelemdict = {
}
vector._superclassnames = []
vector._privpropdict = {
'length' : _Prop_length,
}
vector._privelemdict = {
}
version._superclassnames = []
version._privpropdict = {
}
version._privelemdict = {
}
Wednesday._superclassnames = []
Wednesday._privpropdict = {
}
Wednesday._privelemdict = {
}
weekdays._superclassnames = []
weekdays._privpropdict = {
}
weekdays._privelemdict = {
}
yards._superclassnames = []
yards._privpropdict = {
}
yards._privelemdict = {
}
zones._superclassnames = []
zones._privpropdict = {
}
zones._privelemdict = {
}
_Enum_boov = {
'true' : 'true', # the true boolean value
'false' : 'fals', # the false boolean value
}
_Enum_cons = {
'case' : 'case', # case
'diacriticals' : 'diac', # diacriticals
'white_space' : 'whit', # white space
'hyphens' : 'hyph', # hyphens
'expansion' : 'expa', # expansion
'punctuation' : 'punc', # punctuation
'application_responses' : 'rmte', # remote event replies
}
_Enum_eMds = {
'option_down' : 'Kopt', #
'command_down' : 'Kcmd', #
'control_down' : 'Kctl', #
'shift_down' : 'Ksft', #
'caps_lock_down' : 'Kclk', #
}
_Enum_ekst = {
'escape_key' : 'ks5\x00', #
'delete_key' : 'ks3\x00', #
'tab_key' : 'ks0\x00', #
'return_key' : 'ks$\x00', #
'clear_key' : 'ksG\x00', #
'enter_key' : 'ksL\x00', #
'up_arrow_key' : 'ks~\x00', #
'down_arrow_key' : 'ks}\x00', #
'left_arrow_key' : 'ks{\x00', #
'right_arrow_key' : 'ks|\x00', #
'help_key' : 'ksr\x00', #
'home_key' : 'kss\x00', #
'page_up_key' : 'kst\x00', #
'page_down_key' : 'ksy\x00', #
'forward_del_key' : 'ksu\x00', #
'end_key' : 'ksw\x00', #
'F1_key' : 'ksz\x00', #
'F2_key' : 'ksx\x00', #
'F3_key' : 'ksc\x00', #
'F4_key' : 'ksv\x00', #
'F5_key' : 'ks`\x00', #
'F6_key' : 'ksa\x00', #
'F7_key' : 'ksb\x00', #
'F8_key' : 'ksd\x00', #
'F9_key' : 'kse\x00', #
'F10_key' : 'ksm\x00', #
'F11_key' : 'ksg\x00', #
'F12_key' : 'kso\x00', #
'F13_key' : 'ksi\x00', #
'F14_key' : 'ksk\x00', #
'F15_key' : 'ksq\x00', #
}
_Enum_misc = {
'current_application' : 'cura', # the current application
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'****' : anything,
'PICT' : pictures,
'STXT' : styled_text,
'TEXT' : strings,
'alis' : alias,
'apr ' : April,
'aug ' : August,
'bool' : booleans,
'cRGB' : RGB_colors,
'capp' : application,
'case' : upper_case,
'ccmt' : cubic_centimeters,
'cfet' : cubic_feet,
'cha ' : characters,
'citl' : writing_code_info,
'citm' : text_items,
'cmet' : cubic_meters,
'cmtr' : centimeters,
'cobj' : item,
'cstr' : C_strings,
'ctxt' : text,
'cuin' : cubic_inches,
'cyrd' : cubic_yards,
'dec ' : December,
'degc' : degrees_Celsius,
'degf' : degrees_Fahrenheit,
'degk' : degrees_Kelvin,
'doub' : reals,
'encs' : encoded_strings,
'enum' : constants,
'evnt' : events,
'feb ' : February,
'feet' : feet,
'fri ' : Friday,
'fss ' : file_specification,
'galn' : gallons,
'gram' : grams,
'hand' : handlers,
'inch' : inches,
'itxt' : international_text,
'jan ' : January,
'jul ' : July,
'jun ' : June,
'kfrm' : reference_forms,
'kgrm' : kilograms,
'kmtr' : kilometers,
'kprs' : keystroke,
'lbs ' : pounds,
'ldt ' : date,
'list' : list,
'litr' : liters,
'llst' : linked_list,
'long' : integers,
'lr ' : list_or_record,
'lrs ' : list_2c__record_or_text,
'ls ' : list_or_string,
'mach' : machines,
'mar ' : March,
'may ' : May,
'metr' : meters,
'mile' : miles,
'mnth' : months,
'mon ' : Monday,
'msng' : missing_values,
'nd ' : number_or_date,
'nds ' : number_2c__date_or_text,
'nmbr' : numbers,
'nov ' : November,
'ns ' : number_or_string,
'obj ' : references,
'oct ' : October,
'ozs ' : ounces,
'pcls' : class_,
'prep' : prepositions,
'prop' : properties,
'psct' : writing_code,
'pstr' : Pascal_strings,
'qrts' : quarts,
'rdat' : data,
'reco' : records,
'sat ' : Saturday,
'scnd' : seconds,
'scpt' : script,
'sep ' : September,
'sf ' : alias_or_string,
'snd ' : sounds,
'sqft' : square_feet,
'sqkm' : square_kilometers,
'sqmi' : square_miles,
'sqrm' : square_meters,
'sqyd' : square_yards,
'styl' : styled_Clipboard_text,
'sun ' : Sunday,
'sutx' : styled_Unicode_text,
'thu ' : Thursday,
'tue ' : Tuesday,
'type' : type_class,
'undf' : empty_ae_name_,
'utxt' : Unicode_text,
'vect' : vector,
'vers' : version,
'wed ' : Wednesday,
'wkdy' : weekdays,
'yard' : yards,
'zone' : zones,
}
_propdeclarations = {
'ID ' : _Prop_id,
'ascr' : _Prop_AppleScript,
'c@#^' : _Prop__3c_Inheritance_3e_,
'day ' : _Prop_day,
'days' : _Prop_days,
'dstr' : _Prop_date_string,
'hour' : _Prop_hours,
'kMod' : _Prop_modifiers,
'kMsg' : _Prop_key,
'kknd' : _Prop_key_kind,
'leng' : _Prop_length,
'min ' : _Prop_minutes,
'mnth' : _Prop_month,
'pare' : _Prop_parent,
'pi ' : _Prop_pi,
'plcd' : _Prop_language_code,
'pnam' : _Prop_name,
'prdp' : _Prop_print_depth,
'prln' : _Prop_print_length,
'pscd' : _Prop_script_code,
'psxp' : _Prop_POSIX_path,
'rest' : _Prop_rest,
'ret ' : _Prop_return_,
'rslt' : _Prop_result,
'rvse' : _Prop_reverse,
'spac' : _Prop_space,
'tab ' : _Prop_tab,
'time' : _Prop_time,
'tstr' : _Prop_time_string,
'txdl' : _Prop_text_item_delimiters,
'week' : _Prop_weeks,
'wkdy' : _Prop_weekday,
'year' : _Prop_year,
}
_compdeclarations = {
}
_enumdeclarations = {
'boov' : _Enum_boov,
'cons' : _Enum_cons,
'eMds' : _Enum_eMds,
'ekst' : _Enum_ekst,
'misc' : _Enum_misc,
}
| gpl-3.0 |
jfpla/odoo | openerp/addons/base/workflow/workflow_report.py | 314 | 9001 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import itemgetter
import os
from openerp import report, tools
_logger = logging.getLogger(__name__)
def graph_get(cr, graph, wkf_ids, nested, workitem, witm_trans, processed_subflows):
import pydot
cr.execute('select * from wkf_activity where wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
nodes = cr.dictfetchall()
activities = {}
actfrom = {}
actto = {}
for n in nodes:
activities[n['id']] = n
if n['subflow_id'] and nested and n['subflow_id'] not in processed_subflows:
processed_subflows.add(n['subflow_id']) # don't create multiple times the same cluster.
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
graph2 = pydot.Cluster('subflow'+str(n['subflow_id']), fontsize='12', label = "\"Subflow: %s\\nOSV: %s\"" % ( n['name'], wkfinfo['osv']) )
(s1,s2) = graph_get(cr, graph2, [n['subflow_id']], True, workitem, witm_trans, processed_subflows)
graph.add_subgraph(graph2)
actfrom[n['id']] = s2
actto[n['id']] = s1
else:
args = {}
if n['flow_start'] or n['flow_stop']:
args['style']='filled'
args['color']='lightgrey'
args['label']=n['name']
workitems = ''
if n['id'] in workitem:
workitems = '\\nx ' + str(workitem[n['id']])
args['label'] += workitems
args['color'] = "red"
args['style']='filled'
if n['subflow_id']:
args['shape'] = 'box'
if nested and n['subflow_id'] in processed_subflows:
cr.execute('select * from wkf where id=%s', (n['subflow_id'],))
wkfinfo = cr.dictfetchone()
args['label'] = \
'\"Subflow: %s\\nOSV: %s\\n(already expanded)%s\"' % \
(n['name'], wkfinfo['osv'], workitems)
args['color'] = 'green'
args['style'] ='filled'
graph.add_node(pydot.Node(n['id'], **args))
actfrom[n['id']] = (n['id'],{})
actto[n['id']] = (n['id'],{})
node_ids = tuple(map(itemgetter('id'), nodes))
cr.execute('select * from wkf_transition where act_from IN %s ORDER BY sequence,id', (node_ids,))
transitions = cr.dictfetchall()
for t in transitions:
if not t['act_to'] in activities:
continue
args = {
'label': str(t['condition']).replace(' or ', '\\nor ')
.replace(' and ','\\nand ')
}
if t['signal']:
args['label'] += '\\n'+str(t['signal'])
args['style'] = 'bold'
if activities[t['act_from']]['split_mode']=='AND':
args['arrowtail']='box'
elif str(activities[t['act_from']]['split_mode'])=='OR ':
args['arrowtail']='inv'
if activities[t['act_to']]['join_mode']=='AND':
args['arrowhead']='crow'
if t['id'] in witm_trans:
args['color'] = 'red'
activity_from = actfrom[t['act_from']][1].get(t['signal'], actfrom[t['act_from']][0])
activity_to = actto[t['act_to']][1].get(t['signal'], actto[t['act_to']][0])
graph.add_edge(pydot.Edge( str(activity_from) ,str(activity_to), fontsize='10', **args))
cr.execute('select * from wkf_activity where flow_start=True and wkf_id in ('+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
start = cr.fetchone()[0]
cr.execute("select 'subflow.'||name,id from wkf_activity where flow_stop=True and wkf_id in ("+','.join(['%s']*len(wkf_ids))+')', wkf_ids)
stop = cr.fetchall()
if stop:
stop = (stop[0][1], dict(stop))
else:
stop = ("stop",{})
return (start, {}), stop
def graph_instance_get(cr, graph, inst_id, nested=False):
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
inst = cr.fetchall()
def workitem_get(instance):
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (instance,))
workitems = dict(cr.fetchall())
cr.execute('select subflow_id from wkf_workitem where inst_id=%s', (instance,))
for (subflow_id,) in cr.fetchall():
workitems.update(workitem_get(subflow_id))
return workitems
def witm_get(instance):
cr.execute("select trans_id from wkf_witm_trans where inst_id=%s", (instance,))
return set(t[0] for t in cr.fetchall())
processed_subflows = set()
graph_get(cr, graph, [x[0] for x in inst], nested, workitem_get(inst_id), witm_get(inst_id), processed_subflows)
#
# TODO: pas clean: concurrent !!!
#
class report_graph_instance(object):
def __init__(self, cr, uid, ids, data):
try:
import pydot
except Exception,e:
_logger.warning(
'Import Error for pydot, you will not be able to render workflows.\n'
'Consider Installing PyDot or dependencies: http://dkbza.org/pydot.html.')
raise e
self.done = False
try:
cr.execute('select * from wkf where osv=%s limit 1',
(data['model'],))
wkfinfo = cr.dictfetchone()
if not wkfinfo:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow defined) show
showpage'''
else:
cr.execute('select i.id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where res_id=%s and osv=%s',(data['id'],data['model']))
inst_ids = cr.fetchall()
if not inst_ids:
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow instance defined) show
showpage'''
else:
graph = pydot.Dot(graph_name=data['model'].replace('.','_'),
fontsize='16',
label="""\\\n\\nWorkflow: %s\\n OSV: %s""" % (wkfinfo['name'],wkfinfo['osv']),
size='7.3, 10.1', center='1', ratio='auto', rotate='0', rankdir='TB',
)
for inst_id in inst_ids:
inst_id = inst_id[0]
graph_instance_get(cr, graph, inst_id, data.get('nested', False))
ps_string = graph.create(prog='dot', format='ps')
except Exception:
_logger.exception('Exception in call:')
# string is in PS, like the success message would have been
ps_string = '''%PS-Adobe-3.0
/inch {72 mul} def
/Times-Roman findfont 50 scalefont setfont
1.5 inch 15 inch moveto
(No workflow available) show
showpage'''
if os.name == "nt":
prog = 'ps2pdf.bat'
else:
prog = 'ps2pdf'
args = (prog, '-', '-')
input, output = tools.exec_command_pipe(*args)
input.write(ps_string)
input.close()
self.result = output.read()
output.close()
self.done = True
def is_done(self):
return self.done
def get(self):
if self.done:
return self.result
else:
return None
class report_graph(report.interface.report_int):
def __init__(self, name, table):
report.interface.report_int.__init__(self, name)
self.table = table
def result(self):
if self.obj.is_done():
return True, self.obj.get(), 'pdf'
else:
return False, False, False
def create(self, cr, uid, ids, data, context=None):
self.obj = report_graph_instance(cr, uid, ids, data)
return self.obj.get(), 'pdf'
report_graph('report.workflow.instance.graph', 'ir.workflow')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nelmiux/CarnotKE | jyhton/lib-python/2.7/plat-irix5/cddb.py | 66 | 7225 | # This file implements a class which forms an interface to the .cddb
# directory that is maintained by SGI's cdman program.
#
# Usage is as follows:
#
# import readcd
# r = readcd.Readcd()
# c = Cddb(r.gettrackinfo())
#
# Now you can use c.artist, c.title and c.track[trackno] (where trackno
# starts at 1). When the CD is not recognized, all values will be the empty
# string.
# It is also possible to set the above mentioned variables to new values.
# You can then use c.write() to write out the changed values to the
# .cdplayerrc file.
from warnings import warnpy3k
warnpy3k("the cddb module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import string, posix, os
_cddbrc = '.cddb'
_DB_ID_NTRACKS = 5
_dbid_map = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ@_=+abcdefghijklmnopqrstuvwxyz'
def _dbid(v):
if v >= len(_dbid_map):
return string.zfill(v, 2)
else:
return _dbid_map[v]
def tochash(toc):
if type(toc) == type(''):
tracklist = []
for i in range(2, len(toc), 4):
tracklist.append((None,
(int(toc[i:i+2]),
int(toc[i+2:i+4]))))
else:
tracklist = toc
ntracks = len(tracklist)
hash = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
if ntracks <= _DB_ID_NTRACKS:
nidtracks = ntracks
else:
nidtracks = _DB_ID_NTRACKS - 1
min = 0
sec = 0
for track in tracklist:
start, length = track
min = min + length[0]
sec = sec + length[1]
min = min + sec / 60
sec = sec % 60
hash = hash + _dbid(min) + _dbid(sec)
for i in range(nidtracks):
start, length = tracklist[i]
hash = hash + _dbid(length[0]) + _dbid(length[1])
return hash
class Cddb:
def __init__(self, tracklist):
if os.environ.has_key('CDDB_PATH'):
path = os.environ['CDDB_PATH']
cddb_path = path.split(',')
else:
home = os.environ['HOME']
cddb_path = [home + '/' + _cddbrc]
self._get_id(tracklist)
for dir in cddb_path:
file = dir + '/' + self.id + '.rdb'
try:
f = open(file, 'r')
self.file = file
break
except IOError:
pass
ntracks = int(self.id[:2], 16)
self.artist = ''
self.title = ''
self.track = [None] + [''] * ntracks
self.trackartist = [None] + [''] * ntracks
self.notes = []
if not hasattr(self, 'file'):
return
import re
reg = re.compile(r'^([^.]*)\.([^:]*):[\t ]+(.*)')
while 1:
line = f.readline()
if not line:
break
match = reg.match(line)
if not match:
print 'syntax error in ' + file
continue
name1, name2, value = match.group(1, 2, 3)
if name1 == 'album':
if name2 == 'artist':
self.artist = value
elif name2 == 'title':
self.title = value
elif name2 == 'toc':
if not self.toc:
self.toc = value
if self.toc != value:
print 'toc\'s don\'t match'
elif name2 == 'notes':
self.notes.append(value)
elif name1[:5] == 'track':
try:
trackno = int(name1[5:])
except strings.atoi_error:
print 'syntax error in ' + file
continue
if trackno > ntracks:
print 'track number %r in file %r out of range' % (trackno, file)
continue
if name2 == 'title':
self.track[trackno] = value
elif name2 == 'artist':
self.trackartist[trackno] = value
f.close()
for i in range(2, len(self.track)):
track = self.track[i]
# if track title starts with `,', use initial part
# of previous track's title
if track and track[0] == ',':
try:
off = self.track[i - 1].index(',')
except ValueError:
pass
else:
self.track[i] = self.track[i-1][:off] \
+ track
def _get_id(self, tracklist):
# fill in self.id and self.toc.
# if the argument is a string ending in .rdb, the part
# upto the suffix is taken as the id.
if type(tracklist) == type(''):
if tracklist[-4:] == '.rdb':
self.id = tracklist[:-4]
self.toc = ''
return
t = []
for i in range(2, len(tracklist), 4):
t.append((None, \
(int(tracklist[i:i+2]), \
int(tracklist[i+2:i+4]))))
tracklist = t
ntracks = len(tracklist)
self.id = _dbid((ntracks >> 4) & 0xF) + _dbid(ntracks & 0xF)
if ntracks <= _DB_ID_NTRACKS:
nidtracks = ntracks
else:
nidtracks = _DB_ID_NTRACKS - 1
min = 0
sec = 0
for track in tracklist:
start, length = track
min = min + length[0]
sec = sec + length[1]
min = min + sec / 60
sec = sec % 60
self.id = self.id + _dbid(min) + _dbid(sec)
for i in range(nidtracks):
start, length = tracklist[i]
self.id = self.id + _dbid(length[0]) + _dbid(length[1])
self.toc = string.zfill(ntracks, 2)
for track in tracklist:
start, length = track
self.toc = self.toc + string.zfill(length[0], 2) + \
string.zfill(length[1], 2)
def write(self):
import posixpath
if os.environ.has_key('CDDB_WRITE_DIR'):
dir = os.environ['CDDB_WRITE_DIR']
else:
dir = os.environ['HOME'] + '/' + _cddbrc
file = dir + '/' + self.id + '.rdb'
if posixpath.exists(file):
# make backup copy
posix.rename(file, file + '~')
f = open(file, 'w')
f.write('album.title:\t' + self.title + '\n')
f.write('album.artist:\t' + self.artist + '\n')
f.write('album.toc:\t' + self.toc + '\n')
for note in self.notes:
f.write('album.notes:\t' + note + '\n')
prevpref = None
for i in range(1, len(self.track)):
if self.trackartist[i]:
f.write('track%r.artist:\t%s\n' % (i, self.trackartist[i]))
track = self.track[i]
try:
off = track.index(',')
except ValuError:
prevpref = None
else:
if prevpref and track[:off] == prevpref:
track = track[off:]
else:
prevpref = track[:off]
f.write('track%r.title:\t%s\n' % (i, track))
f.close()
| apache-2.0 |
durai145/youtube-dl | youtube_dl/extractor/lynda.py | 18 | 8769 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
clean_html,
int_or_none,
)
class LyndaBaseIE(InfoExtractor):
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_NETRC_MACHINE = 'lynda'
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'username': username.encode('utf-8'),
'password': password.encode('utf-8'),
'remember': 'false',
'stayPut': 'false'
}
request = compat_urllib_request.Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
login_page = self._download_webpage(
request, None, 'Logging in as %s' % username)
# Not (yet) logged in
m = re.search(r'loginResultJson\s*=\s*\'(?P<json>[^\']+)\';', login_page)
if m is not None:
response = m.group('json')
response_json = json.loads(response)
state = response_json['state']
if state == 'notlogged':
raise ExtractorError(
'Unable to login, incorrect username and/or password',
expected=True)
# This is when we get popup:
# > You're already logged in to lynda.com on two devices.
# > If you log in here, we'll log you out of another device.
# So, we need to confirm this.
if state == 'conflicted':
confirm_form = {
'username': '',
'password': '',
'resolve': 'true',
'remember': 'false',
'stayPut': 'false',
}
request = compat_urllib_request.Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
login_page = self._download_webpage(
request, None,
'Confirming log in and log out from another device')
if all(not re.search(p, login_page) for p in ('isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
if 'login error' in login_page:
mobj = re.search(
r'(?s)<h1[^>]+class="topmost">(?P<title>[^<]+)</h1>\s*<div>(?P<description>.+?)</div>',
login_page)
if mobj:
raise ExtractorError(
'lynda returned error: %s - %s'
% (mobj.group('title'), clean_html(mobj.group('description'))),
expected=True)
raise ExtractorError('Unable to log in')
class LyndaIE(LyndaBaseIE):
IE_NAME = 'lynda'
IE_DESC = 'lynda.com videos'
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
_NETRC_MACHINE = 'lynda'
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
_TESTS = [{
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
'info_dict': {
'id': '114408',
'ext': 'mp4',
'title': 'Using the exercise files',
'duration': 68
}
}, {
'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
page = self._download_webpage(
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
video_id, 'Downloading video JSON')
video_json = json.loads(page)
if 'Status' in video_json:
raise ExtractorError(
'lynda returned error: %s' % video_json['Message'], expected=True)
if video_json['HasAccess'] is False:
raise ExtractorError(
'Video %s is only available for members. '
% video_id + self._ACCOUNT_CREDENTIALS_HINT, expected=True)
video_id = compat_str(video_json['ID'])
duration = video_json['DurationInSeconds']
title = video_json['Title']
formats = []
fmts = video_json.get('Formats')
if fmts:
formats.extend([
{
'url': fmt['Url'],
'ext': fmt['Extension'],
'width': fmt['Width'],
'height': fmt['Height'],
'filesize': fmt['FileSize'],
'format_id': str(fmt['Resolution'])
} for fmt in fmts])
prioritized_streams = video_json.get('PrioritizedStreams')
if prioritized_streams:
formats.extend([
{
'url': video_url,
'width': int_or_none(format_id),
'format_id': format_id,
} for format_id, video_url in prioritized_streams['0'].items()
])
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = self.extract_subtitles(video_id, page)
return {
'id': video_id,
'title': title,
'duration': duration,
'subtitles': subtitles,
'formats': formats
}
def _fix_subtitles(self, subs):
srt = ''
seq_counter = 0
for pos in range(0, len(subs) - 1):
seq_current = subs[pos]
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
if m_current is None:
continue
seq_next = subs[pos + 1]
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
if m_next is None:
continue
appear_time = m_current.group('timecode')
disappear_time = m_next.group('timecode')
text = seq_current['Caption'].strip()
if text:
seq_counter += 1
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text)
if srt:
return srt
def _get_subtitles(self, video_id, webpage):
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
subs = self._download_json(url, None, False)
if subs:
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
else:
return {}
class LyndaCourseIE(LyndaBaseIE):
IE_NAME = 'lynda:course'
IE_DESC = 'lynda.com online courses'
# Course link equals to welcome/introduction video link of same course
# We will recognize it as course link
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
page = self._download_webpage(
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
course_json = json.loads(page)
if 'Status' in course_json and course_json['Status'] == 'NotFound':
raise ExtractorError(
'Course %s does not exist' % course_id, expected=True)
unaccessible_videos = 0
videos = []
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
# by single video API anymore
for chapter in course_json['Chapters']:
for video in chapter['Videos']:
if video['HasAccess'] is False:
unaccessible_videos += 1
continue
videos.append(video['ID'])
if unaccessible_videos > 0:
self._downloader.report_warning(
'%s videos are only available for members (or paid members) and will not be downloaded. '
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
entries = [
self.url_result(
'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
'Lynda')
for video_id in videos]
course_title = course_json['Title']
return self.playlist_result(entries, course_id, course_title)
| unlicense |
derekjchow/models | research/syntaxnet/dragnn/python/lexicon_test.py | 3 | 2738 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SyntaxNet lexicon."""
import os
import os.path
import tensorflow as tf
from google.protobuf import text_format
from dragnn.python import lexicon
# Imported for FLAGS.tf_master, which is used in the lexicon module.
from syntaxnet import parser_trainer
from syntaxnet import task_spec_pb2
from syntaxnet import test_flags
_EXPECTED_CONTEXT = r"""
input { name: "word-map" Part { file_pattern: "/tmp/word-map" } }
input { name: "tag-map" Part { file_pattern: "/tmp/tag-map" } }
input { name: "tag-to-category" Part { file_pattern: "/tmp/tag-to-category" } }
input { name: "lcword-map" Part { file_pattern: "/tmp/lcword-map" } }
input { name: "category-map" Part { file_pattern: "/tmp/category-map" } }
input { name: "char-map" Part { file_pattern: "/tmp/char-map" } }
input { name: "char-ngram-map" Part { file_pattern: "/tmp/char-ngram-map" } }
input { name: "label-map" Part { file_pattern: "/tmp/label-map" } }
input { name: "prefix-table" Part { file_pattern: "/tmp/prefix-table" } }
input { name: "suffix-table" Part { file_pattern: "/tmp/suffix-table" } }
input { name: "known-word-map" Part { file_pattern: "/tmp/known-word-map" } }
"""
class LexiconTest(tf.test.TestCase):
def testCreateLexiconContext(self):
expected_context = task_spec_pb2.TaskSpec()
text_format.Parse(_EXPECTED_CONTEXT, expected_context)
self.assertProtoEquals(
lexicon.create_lexicon_context('/tmp'), expected_context)
def testBuildLexicon(self):
empty_input_path = os.path.join(test_flags.temp_dir(), 'empty-input')
lexicon_output_path = os.path.join(test_flags.temp_dir(), 'lexicon-output')
with open(empty_input_path, 'w'):
pass
# The directory may already exist when running locally multiple times.
if not os.path.exists(lexicon_output_path):
os.mkdir(lexicon_output_path)
# Just make sure this doesn't crash; the lexicon builder op is already
# exercised in its own unit test.
lexicon.build_lexicon(lexicon_output_path, empty_input_path)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
sahutd/youtube-dl | youtube_dl/extractor/wsj.py | 104 | 3431 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'https?://video-api\.wsj\.com/api-video/player/iframe\.html\?guid=(?P<id>[a-zA-Z0-9-]+)'
IE_DESC = 'Wall Street Journal'
_TEST = {
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': '9747d7a6ebc2f4df64b981e1dde9efa9',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
bitrates = [128, 174, 264, 320, 464, 664, 1264]
api_url = (
'http://video-api.wsj.com/api-video/find_all_videos.asp?'
'type=guid&count=1&query=%s&'
'fields=hls,adZone,thumbnailList,guid,state,secondsUntilStartTime,'
'author,description,name,linkURL,videoStillURL,duration,videoURL,'
'adCategory,catastrophic,linkShortURL,doctypeID,youtubeID,'
'titletag,rssURL,wsj-section,wsj-subsection,allthingsd-section,'
'allthingsd-subsection,sm-section,sm-subsection,provider,'
'formattedCreationDate,keywords,keywordsOmniture,column,editor,'
'emailURL,emailPartnerID,showName,omnitureProgramName,'
'omnitureVideoFormat,linkRelativeURL,touchCastID,'
'omniturePublishDate,%s') % (
video_id, ','.join('video%dkMP4Url' % br for br in bitrates))
info = self._download_json(api_url, video_id)['items'][0]
# Thumbnails are conveniently in the correct format already
thumbnails = info.get('thumbnailList')
creator = info.get('author')
uploader_id = info.get('editor')
categories = info.get('keywords')
duration = int_or_none(info.get('duration'))
upload_date = unified_strdate(
info.get('formattedCreationDate'), day_first=False)
title = info.get('name', info.get('titletag'))
formats = [{
'format_id': 'f4m',
'format_note': 'f4m (meta URL)',
'url': info['videoURL'],
}]
if info.get('hls'):
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
preference=0, entry_protocol='m3u8_native'))
for br in bitrates:
field = 'video%dkMP4Url' % br
if info.get(field):
formats.append({
'format_id': 'mp4-%d' % br,
'container': 'mp4',
'tbr': br,
'url': info[field],
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'thumbnails': thumbnails,
'creator': creator,
'uploader_id': uploader_id,
'duration': duration,
'upload_date': upload_date,
'title': title,
'formats': formats,
'categories': categories,
}
| unlicense |
was4444/chromium.src | chrome/browser/web_dev_style/js_checker_test.py | 16 | 11434 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import js_checker
from os import path as os_path
import re
from sys import path as sys_path
import test_util
import unittest
_HERE = os_path.dirname(os_path.abspath(__file__))
sys_path.append(os_path.join(_HERE, '..', '..', '..', 'build'))
import find_depot_tools # pylint: disable=W0611
from testing_support.super_mox import SuperMoxTestBase
class JsCheckerTest(SuperMoxTestBase):
def setUp(self):
SuperMoxTestBase.setUp(self)
input_api = self.mox.CreateMockAnything()
input_api.re = re
output_api = self.mox.CreateMockAnything()
self.checker = js_checker.JSChecker(input_api, output_api)
def ShouldFailConstCheck(self, line):
"""Checks that the 'const' checker flags |line| as a style error."""
error = self.checker.ConstCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(test_util.GetHighlight(line, error), 'const')
def ShouldPassConstCheck(self, line):
"""Checks that the 'const' checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.ConstCheck(1, line),
'Should not be flagged as style error: ' + line)
def testConstFails(self):
lines = [
"const foo = 'bar';",
" const bar = 'foo';",
# Trying to use |const| as a variable name
"var const = 0;",
"var x = 5; const y = 6;",
"for (var i=0, const e=10; i<e; i++) {",
"for (const x=0; x<foo; i++) {",
"while (const x = 7) {",
]
for line in lines:
self.ShouldFailConstCheck(line)
def testConstPasses(self):
lines = [
# sanity check
"var foo = 'bar'",
# @const JsDoc tag
"/** @const */ var SEVEN = 7;",
# @const tag in multi-line comment
" * @const",
" * @const",
# @constructor tag in multi-line comment
" * @constructor",
" * @constructor",
# words containing 'const'
"if (foo.constructor) {",
"var deconstruction = 'something';",
"var madeUpWordconst = 10;",
# Strings containing the word |const|
"var str = 'const at the beginning';",
"var str = 'At the end: const';",
# doing this one with regex is probably not practical
#"var str = 'a const in the middle';",
]
for line in lines:
self.ShouldPassConstCheck(line)
def ShouldFailChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker flags |line| as a style error."""
error = self.checker.ChromeSendCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(test_util.GetHighlight(line, error), ', []')
def ShouldPassChromeSendCheck(self, line):
"""Checks that the 'chrome.send' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.ChromeSendCheck(1, line),
'Should not be flagged as style error: ' + line)
def testChromeSendFails(self):
lines = [
"chrome.send('message', []);",
" chrome.send('message', []);",
]
for line in lines:
self.ShouldFailChromeSendCheck(line)
def testChromeSendPasses(self):
lines = [
"chrome.send('message', constructArgs('foo', []));",
" chrome.send('message', constructArgs('foo', []));",
"chrome.send('message', constructArgs([]));",
" chrome.send('message', constructArgs([]));",
]
for line in lines:
self.ShouldPassChromeSendCheck(line)
def ShouldFailEndJsDocCommentCheck(self, line):
"""Checks that the **/ checker flags |line| as a style error."""
error = self.checker.EndJsDocCommentCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(test_util.GetHighlight(line, error), '**/')
def ShouldPassEndJsDocCommentCheck(self, line):
"""Checks that the **/ checker doesn't flag |line| as a style error."""
self.assertEqual('', self.checker.EndJsDocCommentCheck(1, line),
'Should not be flagged as style error: ' + line)
def testEndJsDocCommentFails(self):
lines = [
"/** @override **/",
"/** @type {number} @const **/",
" **/",
"**/ ",
]
for line in lines:
self.ShouldFailEndJsDocCommentCheck(line)
def testEndJsDocCommentPasses(self):
lines = [
"/***************/", # visual separators
" */", # valid JSDoc comment ends
"*/ ",
"/**/", # funky multi-line comment enders
"/** @override */", # legit JSDoc one-liners
]
for line in lines:
self.ShouldPassEndJsDocCommentCheck(line)
def ShouldFailExtraDotInGenericCheck(self, line):
"""Checks that Array.< or Object.< is flagged as a style nit."""
error = self.checker.ExtraDotInGenericCheck(1, line)
self.assertNotEqual('', error)
self.assertTrue(test_util.GetHighlight(line, error).endswith(".<"))
def testExtraDotInGenericFails(self):
lines = [
"/** @private {!Array.<!Frobber>} */",
"var a = /** @type {Object.<number>} */({});",
"* @return {!Promise.<Change>}"
]
for line in lines:
self.ShouldFailExtraDotInGenericCheck(line)
def ShouldFailGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker flags |line| as a style
error.
"""
error = self.checker.GetElementByIdCheck(1, line)
self.assertNotEqual('', error,
'Should be flagged as style error: ' + line)
self.assertEqual(test_util.GetHighlight(line, error),
'document.getElementById')
def ShouldPassGetElementByIdCheck(self, line):
"""Checks that the 'getElementById' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.GetElementByIdCheck(1, line),
'Should not be flagged as style error: ' + line)
def testGetElementByIdFails(self):
lines = [
"document.getElementById('foo');",
" document.getElementById('foo');",
"var x = document.getElementById('foo');",
"if (document.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldFailGetElementByIdCheck(line)
def testGetElementByIdPasses(self):
lines = [
"elem.ownerDocument.getElementById('foo');",
" elem.ownerDocument.getElementById('foo');",
"var x = elem.ownerDocument.getElementById('foo');",
"if (elem.ownerDocument.getElementById('foo').hidden) {",
"doc.getElementById('foo');",
" doc.getElementById('foo');",
"cr.doc.getElementById('foo');",
" cr.doc.getElementById('foo');",
"var x = doc.getElementById('foo');",
"if (doc.getElementById('foo').hidden) {",
]
for line in lines:
self.ShouldPassGetElementByIdCheck(line)
def ShouldFailInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker flags |line| as a style error."""
error = self.checker.InheritDocCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
self.assertEqual(test_util.GetHighlight(line, error), '@inheritDoc')
def ShouldPassInheritDocCheck(self, line):
"""Checks that the '@inheritDoc' checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.InheritDocCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testInheritDocFails(self):
lines = [
" /** @inheritDoc */",
" * @inheritDoc",
]
for line in lines:
self.ShouldFailInheritDocCheck(line)
def testInheritDocPasses(self):
lines = [
"And then I said, but I won't @inheritDoc! Hahaha!",
" If your dad's a doctor, do you inheritDoc?",
" What's up, inherit doc?",
" this.inheritDoc(someDoc)",
]
for line in lines:
self.ShouldPassInheritDocCheck(line)
def ShouldFailWrapperTypeCheck(self, line):
"""Checks that the use of wrapper types (i.e. new Number(), @type {Number})
is a style error.
"""
error = self.checker.WrapperTypeCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = test_util.GetHighlight(line, error)
self.assertTrue(highlight in ('Boolean', 'Number', 'String'))
def ShouldPassWrapperTypeCheck(self, line):
"""Checks that the wrapper type checker doesn't flag |line| as a style
error.
"""
self.assertEqual('', self.checker.WrapperTypeCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testWrapperTypePasses(self):
lines = [
"/** @param {!ComplexType} */",
" * @type {Object}",
" * @param {Function=} opt_callback",
" * @param {} num Number of things to add to {blah}.",
" * @return {!print_preview.PageNumberSet}",
" /* @returns {Number} */", # Should be /** @return {Number} */
"* @param {!LocalStrings}"
" Your type of Boolean is false!",
" Then I parameterized her Number from her friend!",
" A String of Pearls",
" types.params.aBoolean.typeString(someNumber)",
]
for line in lines:
self.ShouldPassWrapperTypeCheck(line)
def testWrapperTypeFails(self):
lines = [
" /**@type {String}*/(string)",
" * @param{Number=} opt_blah A number",
"/** @private @return {!Boolean} */",
" * @param {number|String}",
]
for line in lines:
self.ShouldFailWrapperTypeCheck(line)
def ShouldFailVarNameCheck(self, line):
"""Checks that var unix_hacker, $dollar are style errors."""
error = self.checker.VarNameCheck(1, line)
self.assertNotEqual('', error,
msg='Should be flagged as style error: ' + line)
highlight = test_util.GetHighlight(line, error)
self.assertFalse('var ' in highlight);
def ShouldPassVarNameCheck(self, line):
"""Checks that variableNamesLikeThis aren't style errors."""
self.assertEqual('', self.checker.VarNameCheck(1, line),
msg='Should not be flagged as style error: ' + line)
def testVarNameFails(self):
lines = [
"var private_;",
" var _super_private",
" var unix_hacker = someFunc();",
]
for line in lines:
self.ShouldFailVarNameCheck(line)
def testVarNamePasses(self):
lines = [
" var namesLikeThis = [];",
" for (var i = 0; i < 10; ++i) { ",
"for (var i in obj) {",
" var one, two, three;",
" var magnumPI = {};",
" var g_browser = 'da browzer';",
"/** @const */ var Bla = options.Bla;", # goog.scope() replacement.
" var $ = function() {", # For legacy reasons.
" var StudlyCaps = cr.define('bla')", # Classes.
" var SCARE_SMALL_CHILDREN = [", # TODO(dbeam): add @const in
# front of all these vars like
"/** @const */ CONST_VAR = 1;", # this line has (<--).
]
for line in lines:
self.ShouldPassVarNameCheck(line)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
curiousguy13/shogun | tests/integration/generator/preproc.py | 22 | 2603 | """Generator for Preprocessors"""
import shogun.Library as library
from shogun.Kernel import GaussianKernel, CommWordStringKernel, \
CommUlongStringKernel
import fileop
import featop
import dataop
import category
def _compute (feats, params):
"""Perform computations on kernel using preprocessors.
@param name name of the kernel
@param feats features of the kernel
@return dict of testcase data ready to be written to file
"""
output=fileop.get_output(category.KERNEL, params)
fun=eval(params['name']+'Kernel')
if params.has_key('args'):
kernel=fun(feats['train'], feats['train'], *params['args']['val'])
else:
kernel=fun(feats['train'], feats['train'])
output['kernel_matrix_train']=kernel.get_kernel_matrix()
kernel.init(feats['train'], feats['test'])
output['kernel_matrix_test']=kernel.get_kernel_matrix()
return output
def _run_string_complex (ftype):
"""Run preprocessor applied on complex StringFeatures.
@param ftype Feature type, like Word
"""
params={
'name': 'Comm'+ftype+'String',
'accuracy': 1e-9,
'feature_class': 'string_complex',
'feature_type': ftype,
'data': dataop.get_dna()
}
feats=featop.get_features(
params['feature_class'], params['feature_type'], params['data'])
# string_complex gets preproc added implicitely on Word/Ulong feats
output=_compute(feats, params)
params={
'name': 'Sort'+ftype+'String'
}
output.update(fileop.get_output(category.PREPROC, params))
fileop.write(category.PREPROC, output)
def _run_real (name, args=None):
"""Run preprocessor applied on RealFeatures.
@param name name of the preprocessor
@param args argument list (in a dict) for the preprocessor
"""
params={
'name': 'Gaussian',
'accuracy': 1e-8,
'data': dataop.get_rand(),
'feature_class': 'simple',
'feature_type': 'Real',
'args': {'key': ('width',), 'val': (1.2,)}
}
feats=featop.get_features(
params['feature_class'], params['feature_type'], params['data'])
if args:
feats=featop.add_preproc(name, feats, *args['val'])
else:
feats=featop.add_preproc(name, feats)
output=_compute(feats, params)
params={ 'name': name }
if args:
params['args']=args
output.update(fileop.get_output(category.PREPROC, params))
fileop.write(category.PREPROC, output)
def run():
"""Run generator for all preprocessors."""
_run_real('LogPlusOne')
_run_real('NormOne')
_run_real('PruneVarSubMean', {'key': ('divide',), 'val': (False,)})
_run_real('PruneVarSubMean', {'key': ('divide',), 'val': (True,)})
_run_string_complex('Word')
_run_string_complex('Ulong')
# _run_norm_derivative_lem3()
# _run_pcacut()
| gpl-3.0 |
hifly/OpenUpgrade | addons/account_cancel/__init__.py | 702 | 1046 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cloudbearings/vitess | test/initial_sharding.py | 8 | 20125 | #!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
# This test simulates the first time a database has to be split:
# - we start with a keyspace with a single shard and a single table
# - we add and populate the sharding key
# - we set the sharding key in the topology
# - we clone into 2 instances
# - we enable filtered replication
# - we move all serving types
# - we scrap the source tablets
# - we remove the original shard
import base64
import logging
import threading
import struct
import time
import unittest
from vtdb import keyrange_constants
import environment
import utils
import tablet
keyspace_id_type = keyrange_constants.KIT_UINT64
pack_keyspace_id = struct.Struct('!Q').pack
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestInitialSharding(unittest.TestCase):
# create_schema will create the same schema on the keyspace
def _create_schema(self):
create_table_template = '''create table %s(
id bigint auto_increment,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _add_sharding_key_to_schema(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s add keyspace_id ' + t
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
def _mark_sharding_key_not_null(self):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
sql = 'alter table %s modify keyspace_id ' + t + ' not null'
utils.run_vtctl(['ApplySchema',
'-sql=' + sql % ('resharding1'),
'test_keyspace'],
auto_log=True)
# _insert_startup_value inserts a value in the MySQL database before it
# is sharded
def _insert_startup_value(self, tablet, table, id, msg):
tablet.mquery('vt_test_keyspace', [
'begin',
'insert into %s(id, msg) values(%d, "%s")' % (table, id, msg),
'commit'
], write=True)
def _insert_startup_values(self):
self._insert_startup_value(shard_master, 'resharding1', 1, 'msg1')
self._insert_startup_value(shard_master, 'resharding1', 2, 'msg2')
self._insert_startup_value(shard_master, 'resharding1', 3, 'msg3')
def _backfill_keyspace_id(self, tablet):
tablet.mquery('vt_test_keyspace', [
'begin',
'update resharding1 set keyspace_id=0x1000000000000000 where id=1',
'update resharding1 set keyspace_id=0x9000000000000000 where id=2',
'update resharding1 set keyspace_id=0xD000000000000000 where id=3',
'commit'
], write=True)
# _insert_value inserts a value in the MySQL database along with the comments
# required for routing.
def _insert_value(self, tablet, table, id, msg, keyspace_id):
if keyspace_id_type == keyrange_constants.KIT_BYTES:
k = base64.b64encode(pack_keyspace_id(keyspace_id))
else:
k = '%d' % keyspace_id
tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into %s(id, msg, keyspace_id) '
'values(%d, "%s", 0x%x) /* EMD keyspace_id:%s user_id:%d */' %
(table, id, msg, keyspace_id, k, id),
'commit'],
write=True)
def _get_value(self, tablet, table, id):
return tablet.mquery(
'vt_test_keyspace',
'select id, msg, keyspace_id from %s where id=%d' % (table, id))
def _check_value(self, tablet, table, id, msg, keyspace_id,
should_be_here=True):
result = self._get_value(tablet, table, id)
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
if should_be_here:
self.assertEqual(result, ((id, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' +
fmt + ', row=%s') % (tablet.tablet_alias, id,
keyspace_id, str(result)))
else:
self.assertEqual(len(result), 0,
('Extra row in tablet %s for id=%d, keyspace_id=' +
fmt + ': %s') % (tablet.tablet_alias, id, keyspace_id,
str(result)))
# _is_value_present_and_correct tries to read a value.
# if it is there, it will check it is correct and return True if it is.
# if not correct, it will self.fail.
# if not there, it will return False.
def _is_value_present_and_correct(self, tablet, table, id, msg, keyspace_id):
result = self._get_value(tablet, table, id)
if len(result) == 0:
return False
if keyspace_id_type == keyrange_constants.KIT_BYTES:
fmt = '%s'
keyspace_id = pack_keyspace_id(keyspace_id)
else:
fmt = '%x'
self.assertEqual(result, ((id, msg, keyspace_id),),
('Bad row in tablet %s for id=%d, keyspace_id=' + fmt) % (
tablet.tablet_alias, id, keyspace_id))
return True
def _check_startup_values(self):
# check first value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1', 0x1000000000000000)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 1, 'msg1',
0x1000000000000000, should_be_here=False)
# check second value is in the right shard
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 2, 'msg2', 0x9000000000000000)
# check third value is in the right shard too
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000,
should_be_here=False)
for t in [shard_1_master, shard_1_replica, shard_1_rdonly1]:
self._check_value(t, 'resharding1', 3, 'msg3', 0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_1_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return
timeout = utils.wait_step('enough data went through', timeout)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_0_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_0_replica, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# create the keyspace with just one shard
utils.run_vtctl(['CreateKeyspace',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace',
'keyspace_id', keyspace_id_type])
shard_master.init_tablet('master', 'test_keyspace', '0')
shard_replica.init_tablet('replica', 'test_keyspace', '0')
shard_rdonly1.init_tablet('rdonly', 'test_keyspace', '0')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# create databases so vttablet can start behaving normally
for t in [shard_master, shard_replica, shard_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None)
# wait for the tablets
shard_master.wait_for_vttablet_state('SERVING')
shard_replica.wait_for_vttablet_state('SERVING')
shard_rdonly1.wait_for_vttablet_state('SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', 'test_keyspace/0',
shard_master.tablet_alias], auto_log=True)
# create the tables and add startup values
self._create_schema()
self._insert_startup_values()
# change the schema, backfill keyspace_id, and change schema again
self._add_sharding_key_to_schema()
self._backfill_keyspace_id(shard_master)
self._mark_sharding_key_not_null()
# create the split shards
shard_0_master.init_tablet('master', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_rdonly1.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('master', 'test_keyspace', '80-')
shard_1_replica.init_tablet('replica', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.start_vttablet(wait_for_state=None)
for t in [shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
utils.run_vtworker(['--cell', 'test_nj',
'--command_display_interval', '10ms',
'SplitClone',
'--exclude_tables', 'unrelated',
'--strategy=-populate_blp_checkpoint',
'--source_reader_count', '10',
'--min_table_size_for_split', '1',
'test_keyspace/0'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', 'test_keyspace'], auto_log=True)
# check the binlog players are running
logging.debug('Waiting for binlog players to start on new masters...')
shard_0_master.wait_for_binlog_player_count(1)
shard_1_master.wait_for_binlog_player_count(1)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Checking 80 percent of data is sent quickly')
self._check_lots_timeout(1000, 80, 5)
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
# use vtworker to compare the data
logging.debug('Running vtworker SplitDiff for -80')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/-80'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_0_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
logging.debug('Running vtworker SplitDiff for 80-')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff', 'test_keyspace/80-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
expect_fail=True)
# now serve rdonly from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
# then serve replica from the split shards
source_tablet = shard_replica
destination_tablets = [shard_0_replica, shard_1_replica]
utils.run_vtctl(
['MigrateServedTypes', 'test_keyspace/0', 'replica'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, source_tablet, True, False)
utils.check_tablet_query_services(self, destination_tablets, False, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -\n',
keyspace_id_type=keyspace_id_type)
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, source_tablet, False, True)
utils.check_tablet_query_services(self, destination_tablets, True, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# then serve master from the split shards
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/0', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=keyspace_id_type)
# check the binlog players are gone now
shard_0_master.wait_for_binlog_player_count(0)
shard_1_master.wait_for_binlog_player_count(0)
# make sure we can't delete a shard with tablets
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], expect_fail=True)
# scrap the original tablets in the original shard
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['ScrapTablet', t.tablet_alias], auto_log=True)
tablet.kill_tablets([shard_master, shard_replica, shard_rdonly1])
for t in [shard_master, shard_replica, shard_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/0'], auto_log=True)
# kill everything else
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_rdonly1,
shard_1_master, shard_1_replica, shard_1_rdonly1])
if __name__ == '__main__':
utils.main()
| bsd-3-clause |
HeinleinSupport/check_mk | python-jinja2/lib/python/jinja2/lexer.py | 90 | 28559 | # -*- coding: utf-8 -*-
"""
jinja2.lexer
~~~~~~~~~~~~
This module implements a Jinja / Python combination lexer. The
`Lexer` class provided by this module is used to do some preprocessing
for Jinja.
On the one hand it filters out invalid operators like the bitshift
operators we don't allow in templates. On the other hand it separates
template code and python code in expressions.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
from collections import deque
from operator import itemgetter
from jinja2._compat import implements_iterator, intern, iteritems, text_type
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache
# cache for the lexers. Exists in order to be able to have multiple
# environments with the same lexer
_lexer_cache = LRUCache(50)
# static regular expressions
whitespace_re = re.compile(r'\s+', re.U)
string_re = re.compile(r"('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
integer_re = re.compile(r'\d+')
try:
# check if this Python supports Unicode identifiers
compile('föö', '<unknown>', 'eval')
except SyntaxError:
# no Unicode support, use ASCII identifiers
name_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*')
check_ident = False
else:
# Unicode support, build a pattern to match valid characters, and set flag
# to use str.isidentifier to validate during lexing
from jinja2 import _identifier
name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
check_ident = True
# remove the pattern from memory after building the regex
import sys
del sys.modules['jinja2._identifier']
import jinja2
del jinja2._identifier
del _identifier
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
# internal the tokens and keep references to them
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
# bind operators to token types
operators = {
'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON
}
reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
assert len(operators) == len(reverse_operators), 'operators dropped'
operator_re = re.compile('(%s)' % '|'.join(re.escape(x) for x in
sorted(operators, key=lambda x: -len(x))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN, TOKEN_COMMENT,
TOKEN_COMMENT_END, TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN, TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE, TOKEN_DATA,
TOKEN_COMMENT, TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {
TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'
}.get(token_type, token_type)
def describe_token(token):
"""Returns a description of the token."""
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
"""Count the number of newline characters in the string. This is
useful for extensions that filter a stream.
"""
return len(newline_re.findall(value))
def compile_rules(environment):
"""Compiles all the rules from the environment into a list of rules."""
e = re.escape
rules = [
(len(environment.comment_start_string), 'comment',
e(environment.comment_start_string)),
(len(environment.block_start_string), 'block',
e(environment.block_start_string)),
(len(environment.variable_start_string), 'variable',
e(environment.variable_start_string))
]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement',
r'^[ \t\v]*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment',
r'(?:^|(?<=\S))[^\S\r\n]*' +
e(environment.line_comment_prefix)))
return [x[1:] for x in sorted(rules, reverse=True)]
class Failure(object):
"""Class that raises a `TemplateSyntaxError` if called.
Used by the `Lexer` to specify known errors.
"""
def __init__(self, message, cls=TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
"""Token class."""
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
elif self.type == 'name':
return self.value
return self.type
def test(self, expr):
"""Test a token against a token expression. This can either be a
token type or ``'token_type:token_value'``. This can only test
against string values and types.
"""
# here we do a regular string equality check as test_any is usually
# passed an iterable of not interned strings.
if self.type == expr:
return True
elif ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
"""Test against multiple token expressions."""
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (
self.lineno,
self.type,
self.value
)
@implements_iterator
class TokenStreamIterator(object):
"""The iterator for tokenstreams. Iterate over the stream
until the eof token is reached.
"""
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def __next__(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
@implements_iterator
class TokenStream(object):
"""A token stream is an iterable that yields :class:`Token`\\s. The
parser however does not iterate over it but calls :meth:`next` to go
one token ahead. The current active token is stored as :attr:`current`.
"""
def __init__(self, generator, name, filename):
self._iter = iter(generator)
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __bool__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
__nonzero__ = __bool__ # py2
eos = property(lambda x: not x, doc="Are we at the end of the stream?")
def push(self, token):
"""Push a token back to the stream."""
self._pushed.append(token)
def look(self):
"""Look at the next token."""
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n=1):
"""Got n tokens ahead."""
for x in range(n):
next(self)
def next_if(self, expr):
"""Perform the token test and return the token if it matched.
Otherwise the return value is `None`.
"""
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
"""Like :meth:`next_if` but only returns `True` or `False`."""
return self.next_if(expr) is not None
def __next__(self):
"""Go one token ahead and return the old one.
Use the built-in :func:`next` instead of calling this directly.
"""
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = next(self._iter)
except StopIteration:
self.close()
return rv
def close(self):
"""Close the stream."""
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._iter = None
self.closed = True
def expect(self, expr):
"""Expect a given token type and return it. This accepts the same
argument as :meth:`jinja2.lexer.Token.test`.
"""
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, '
'expected %r.' % expr,
self.current.lineno,
self.name, self.filename)
raise TemplateSyntaxError("expected token %r, got %r" %
(expr, describe_token(self.current)),
self.current.lineno,
self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
"""Return a lexer which is probably cached."""
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.lstrip_blocks,
environment.newline_sequence,
environment.keep_trailing_newline)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
"""Class that implements a lexer for a given environment. Automatically
created by the environment class, usually you don't have to do that.
Note that the lexer is not automatically bound to an environment.
Multiple environments can share the same lexer.
"""
def __init__(self, environment):
# shortcuts
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
# lexing rules for tags
tag_rules = [
(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)
]
# assemble the root lexing rule. because "|" is ungreedy
# we have to sort by length so that the lexer continues working
# as expected when we have parsing rules like <% for block and
# <%= for variables. (if someone wants asp like syntax)
# variables are just part of the rules if variable processing
# is required.
root_tag_rules = compile_rules(environment)
# block suffix if trimming is enabled
block_suffix_re = environment.trim_blocks and '\\n?' or ''
# strip leading spaces if lstrip_blocks is enabled
prefix_re = {}
if environment.lstrip_blocks:
# use '{%+' to manually disable lstrip_blocks behavior
no_lstrip_re = e('+')
# detect overlap between block and variable or comment strings
block_diff = c(r'^%s(.*)' % e(environment.block_start_string))
# make sure we don't mistake a block for a variable or a comment
m = block_diff.match(environment.comment_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
m = block_diff.match(environment.variable_start_string)
no_lstrip_re += m and r'|%s' % e(m.group(1)) or ''
# detect overlap between comment and variable strings
comment_diff = c(r'^%s(.*)' % e(environment.comment_start_string))
m = comment_diff.match(environment.variable_start_string)
no_variable_re = m and r'(?!%s)' % e(m.group(1)) or ''
lstrip_re = r'^[ \t]*'
block_prefix_re = r'%s%s(?!%s)|%s\+?' % (
lstrip_re,
e(environment.block_start_string),
no_lstrip_re,
e(environment.block_start_string),
)
comment_prefix_re = r'%s%s%s|%s\+?' % (
lstrip_re,
e(environment.comment_start_string),
no_variable_re,
e(environment.comment_start_string),
)
prefix_re['block'] = block_prefix_re
prefix_re['comment'] = comment_prefix_re
else:
block_prefix_re = '%s' % e(environment.block_start_string)
self.newline_sequence = environment.newline_sequence
self.keep_trailing_newline = environment.keep_trailing_newline
# global lexing rules
self.rules = {
'root': [
# directives
(c('(.*?)(?:%s)' % '|'.join(
[r'(?P<raw_begin>(?:\s*%s\-|%s)\s*raw\s*(?:\-%s\s*|%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string)
)] + [
r'(?P<%s_begin>\s*%s\-|%s)' % (n, r, prefix_re.get(n,r))
for n, r in root_tag_rules
])), (TOKEN_DATA, '#bygroup'), '#bygroup'),
# data
(c('.+'), TOKEN_DATA, None)
],
# comments
TOKEN_COMMENT_BEGIN: [
(c(r'(.*?)((?:\-%s\s*|%s)%s)' % (
e(environment.comment_end_string),
e(environment.comment_end_string),
block_suffix_re
)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'),
(c('(.)'), (Failure('Missing end of comment tag'),), None)
],
# blocks
TOKEN_BLOCK_BEGIN: [
(c(r'(?:\-%s\s*|%s)%s' % (
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), TOKEN_BLOCK_END, '#pop'),
] + tag_rules,
# variables
TOKEN_VARIABLE_BEGIN: [
(c(r'\-%s\s*|%s' % (
e(environment.variable_end_string),
e(environment.variable_end_string)
)), TOKEN_VARIABLE_END, '#pop')
] + tag_rules,
# raw block
TOKEN_RAW_BEGIN: [
(c(r'(.*?)((?:\s*%s\-|%s)\s*endraw\s*(?:\-%s\s*|%s%s))' % (
e(environment.block_start_string),
block_prefix_re,
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re
)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'),
(c('(.)'), (Failure('Missing end of raw directive'),), None)
],
# line statements
TOKEN_LINESTATEMENT_BEGIN: [
(c(r'\s*(\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')
] + tag_rules,
# line comments
TOKEN_LINECOMMENT_BEGIN: [
(c(r'(.*?)()(?=\n|$)'), (TOKEN_LINECOMMENT,
TOKEN_LINECOMMENT_END), '#pop')
]
}
def _normalize_newlines(self, value):
"""Called for strings and template data to normalize it to unicode."""
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name=None, filename=None, state=None):
"""Calls tokeniter + tokenize and wraps it in a token stream.
"""
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name=None, filename=None):
"""This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value.
"""
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
# we are not interested in those tokens in the parser
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
'Invalid character in identifier',
lineno, name, filename)
elif token == 'string':
# try to unescape string
try:
value = self._normalize_newlines(value[1:-1]) \
.encode('ascii', 'backslashreplace') \
.decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename=None, state=None):
"""This method tokenizes the text and returns the tokens in a
generator. Use this method if you just want to tokenize a template.
"""
source = text_type(source)
lines = source.splitlines()
if self.keep_trailing_newline and source:
for newline in ('\r\n', '\r', '\n'):
if source.endswith(newline):
lines.append('')
break
source = '\n'.join(lines)
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
assert state in ('variable', 'block'), 'invalid state'
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
# tokenizer loop
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
# if no match we try again with the next rule
if m is None:
continue
# we only match blocks and variables if braces / parentheses
# are balanced. continue parsing with the lower rule which
# is the operator rule. do this only if the end tags look
# like operators
if balancing_stack and \
tokens in ('variable_end', 'block_end',
'linestatement_end'):
continue
# tuples support more options
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
# failure group
if token.__class__ is Failure:
raise token(lineno, filename)
# bygroup is a bit more complex, in that case we
# yield for the current token the first named
# group that matched
elif token == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
yield lineno, key, value
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve '
'the token dynamically'
' but no group matched'
% regex)
# normal group
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield lineno, token, data
lineno += data.count('\n')
# strings as token just are yielded as it.
else:
data = m.group()
# update brace/parentheses balance
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError('unexpected \'%s\'' %
data, lineno, name,
filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError('unexpected \'%s\', '
'expected \'%s\'' %
(data, expected_op),
lineno, name,
filename)
# yield items
if data or tokens not in ignore_if_empty:
yield lineno, tokens, data
lineno += data.count('\n')
# fetch new position into new variable so that we can check
# if there is a internal parsing error which would result
# in an infinite loop
pos2 = m.end()
# handle state changes
if new_state is not None:
# remove the uppermost state
if new_state == '#pop':
stack.pop()
# resolve the new state by group checking
elif new_state == '#bygroup':
for key, value in iteritems(m.groupdict()):
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the '
'new state dynamically but'
' no group matched' %
regex)
# direct state name given
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
# we are still at the same position and no stack change.
# this means a loop without break condition, avoid that and
# raise error
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without '
'stack change' % regex)
# publish new function and start again
pos = pos2
break
# if loop terminated without break we haven't found a single match
# either we are at the end of the file or we have a problem
else:
# end of text
if pos >= source_length:
return
# something went wrong
raise TemplateSyntaxError('unexpected char %r at %d' %
(source[pos], pos), lineno,
name, filename)
| gpl-2.0 |
DaanHoogland/cloudstack | tools/marvin/marvin/settings.py | 8 | 1204 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Settings for the XML test runner
# Use this setting to choose between a verbose and a non-verbose output.
TEST_OUTPUT_VERBOSE = 2.
# If your test methods contains docstrings, you can display such docstrings
# instead of display the test name (ex: module.TestCase.test_method).
# In order to use this feature, you have to enable verbose output by setting
# TEST_OUTPUT_VERBOSE = 2.
TEST_OUTPUT_DESCRIPTIONS = True
| apache-2.0 |
ares/robottelo | robottelo/ui/base.py | 2 | 32387 | # -*- encoding: utf-8 -*-
"""Base class for all UI operations"""
import logging
import time
from robottelo.helpers import escape_search
from robottelo.ui.locators import locators, common_locators, Locator
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
LOGGER = logging.getLogger(__name__)
class UIError(Exception):
"""Indicates that a UI action could not be done."""
class UINoSuchElementError(UIError):
"""Indicates that UI Element is not found."""
class UIPageSubmitionFailed(Exception):
"""Indicates that UI Page submition Failed."""
class Base(object):
"""Base class for UI"""
logger = LOGGER
search_key = None
is_katello = False
button_timeout = 15
result_timeout = 15
delete_locator = None
actions_dropdown_locator = None
def __init__(self, browser):
"""Sets up the browser object."""
self.browser = browser
def find_element(self, locator):
"""Wrapper around Selenium's WebDriver that allows you to search for an
element in the web page.
"""
try:
_webelement = self.browser.find_element(*locator)
self.wait_for_ajax()
if _webelement.is_displayed():
return _webelement
else:
return None
except NoSuchElementException as err:
self.logger.debug(
u'%s: Could not locate element %s: %s',
type(err).__name__,
locator[1],
err
)
except TimeoutException as err:
self.logger.debug(
u'%s: Waiting for locator %s: %s',
type(err).__name__,
locator[1],
err
)
return None
def find_elements(self, locator):
"""Wrapper around Selenium's WebDriver that allows you to fetch list of
elements in the web page.
"""
try:
_webelements = self.browser.find_elements(*locator)
self.wait_for_ajax()
webelements = []
for _webelement in _webelements:
if _webelement.is_displayed():
webelements.append(_webelement)
return webelements
except NoSuchElementException as err:
self.logger.debug(
u'%s: Could not locate the elements of %s: %s',
type(err).__name__,
locator[1],
err
)
except TimeoutException as err:
self.logger.debug(
u'%s: Waiting for locator "%s": "%s"',
type(err).__name__,
locator[1],
err
)
return None
def _search_locator(self):
"""Specify element name locator which should be used in search
procedure
"""
raise NotImplementedError(
'Subclasses must return locator of element to search')
def navigate_to_entity(self):
"""Perform navigation to main page for specific entity"""
raise NotImplementedError('Subclasses must implement navigator method')
def search_and_click(self, element):
"""Helper method to perform the commonly used search then click"""
return self.click(self.search(element))
def search(self, element, _raw_query=None, expecting_results=True):
"""Uses the search box to locate an element from a list of elements.
:param element: either element name or a tuple, containing element name
as a first element and all the rest variables required for element
locator.
:param _raw_query: (optional) custom search query. Can be used to find
entity by some of its fields (e.g. 'hostgroup = foo' for entity
named 'bar') or to combine complex queries (e.g.
'name = foo and os = bar'). Note that this will ignore entity's
default ``search_key``.
:param expecting_results: Specify whether we expect to find any entity
or not
"""
element_name = element[0] if isinstance(element, tuple) else element
# Navigate to the page
self.logger.debug(u'Searching for: %s', element_name)
self.navigate_to_entity()
# Provide search criterions or use default ones
search_key = self.search_key or 'name'
element_locator = self._search_locator()
# Determine search box and search button locators depending on the type
# of entity
prefix = 'kt_' if self.is_katello else ''
searchbox = self.wait_until_element(
common_locators[prefix + 'search'],
timeout=self.button_timeout
)
search_button_locator = common_locators[prefix + 'search_button']
# Do not proceed if searchbox is not found
if searchbox is None:
# For katello, search box should be always present on the page
# no matter we have entity on the page or not...
if self.is_katello:
raise UINoSuchElementError('Search box not found.')
# ...but not for foreman
return None
# Pass the data into search field and push the search button if
# applicable
searchbox.clear()
searchbox.send_keys(_raw_query or u'{0} = {1}'.format(
search_key, escape_search(element_name)))
# ensure mouse points at search button and no tooltips are covering it
# before clicking
self.perform_action_chain_move(search_button_locator)
self.click(search_button_locator)
# In case we expecting that search should not find any entity
if expecting_results is False:
return self.wait_until_element(
common_locators[prefix + 'search_no_results'])
# Make sure that found element is returned no matter it described by
# its own locator or common one (locator can transform depending on
# element name length)
for _ in range(self.result_timeout):
for strategy, value in (
element_locator,
common_locators['select_filtered_entity']
):
result = self.find_element((strategy, value % element))
if result is not None:
return result
time.sleep(1)
return None
def delete(self, name, really=True, dropdown_present=False,
search_query=None):
"""Delete an added entity, handles both with and without dropdown."""
self.logger.debug(u'Deleting entity %s', name)
# Some overridden search methods do not support search queries,
# e.g. when page does not have search field. Skip search_query then.
if search_query:
searched = self.search(name, _raw_query=search_query)
else:
searched = self.search(name)
if not searched:
raise UIError(u'Could not search the entity "{0}"'.format(name))
if self.is_katello:
self.click(searched)
if self.delete_locator:
self.click(self.delete_locator)
else:
self.perform_entity_action('Remove')
if really:
self.click(common_locators['confirm_remove'])
else:
self.click(common_locators['cancel'])
else:
if dropdown_present:
if self.actions_dropdown_locator:
self.click(self.actions_dropdown_locator % name)
else:
self.click(
common_locators['select_action_dropdown'] % name)
if self.delete_locator:
self.click(self.delete_locator % name, wait_for_ajax=False)
else:
self.click(
common_locators['delete_button'] % name,
wait_for_ajax=False
)
self.handle_alert(really)
# Make sure that element is really removed from UI. It is necessary to
# verify that fact few times as sometimes 1 second is not enough for
# element to be actually deleted from DB
self.button_timeout = 3
self.result_timeout = 1
try:
for _ in range(3):
if search_query:
searched = self.search(name, _raw_query=search_query)
else:
searched = self.search(name)
if bool(searched) != really:
break
self.browser.refresh()
if bool(searched) == really:
raise UIError(
u'Delete functionality works improperly for "{0}" entity'
.format(name))
finally:
self.button_timeout = 15
self.result_timeout = 15
def clear_search_box(self):
"""Helper to clear text that was inputted into search box using
application button
"""
prefix = 'kt_' if self.is_katello else ''
self.click(common_locators[prefix + 'clear_search'])
def create_a_bookmark(self, name=None, query=None, public=None,
searchbox_query=None):
"""Bookmark a search on current entity page"""
self.navigate_to_entity()
prefix = 'kt_' if self.is_katello else ''
searchbox = self.wait_until_element(
common_locators[prefix + 'search'],
timeout=self.button_timeout
)
if searchbox is None:
raise UINoSuchElementError('Search box not found.')
if searchbox_query is not None:
searchbox.clear()
searchbox.send_keys(u'{0}'.format(escape_search(searchbox_query)))
self.click(common_locators['search_dropdown'])
self.click(locators['bookmark.new'])
self.wait_until_element(locators['bookmark.name'])
if name is not None:
self.assign_value(locators['bookmark.name'], name)
if query is not None:
self.assign_value(locators['bookmark.query'], query)
if public is not None:
self.assign_value(locators['bookmark.public'], public)
self.click(locators['bookmark.create'])
def handle_alert(self, really):
"""
Handles any alerts
"""
if really:
alert = self.browser.switch_to_alert()
alert.accept()
else:
alert = self.browser.switch_to_alert()
alert.dismiss()
def select_deselect_entity(self, filter_key, loc, entity_list):
"""Function to select and deselect entity like OS, Partition Table,
Arch from selection list or by selecting relevant checkbox.
"""
for entity in entity_list:
# Scroll to top
self.browser.execute_script('window.scroll(0, 0)')
txt_field = self.wait_until_element(
common_locators['filter'] % filter_key)
self.logger.debug(u'Toggling entity %s select state', entity)
if txt_field:
txt_field.clear()
txt_field.send_keys(entity)
self.click(loc % entity)
else:
self.click(common_locators['entity_checkbox'] % entity)
def configure_entity(self, entity_list, filter_key, tab_locator=None,
new_entity_list=None, entity_select=True):
"""Configures entities like orgs, OS, ptable, Archs, Users, Usergroups.
"""
if entity_list is None:
entity_list = []
if new_entity_list is None:
new_entity_list = []
if entity_list:
if tab_locator:
self.click(tab_locator)
if entity_select:
entity_locator = common_locators['entity_select']
else:
entity_locator = common_locators['entity_deselect']
self.select_deselect_entity(
filter_key, entity_locator, entity_list)
if new_entity_list:
if tab_locator:
self.click(tab_locator)
entity_locator = common_locators['entity_select']
self.select_deselect_entity(
filter_key, entity_locator, new_entity_list)
def wait_until_element_exists(self, locator, timeout=12,
poll_frequency=0.5):
"""Wrapper around Selenium's WebDriver that allows you to pause your
test until an element in the web page is present.
"""
try:
element = WebDriverWait(
self.browser, timeout, poll_frequency
).until(
expected_conditions.presence_of_element_located(locator),
message=u'element %s is not present' % locator[1]
)
self.wait_for_ajax(poll_frequency=poll_frequency)
return element
except TimeoutException as err:
self.logger.debug(
u"%s: Waiting for element '%s' to exists. %s",
type(err).__name__,
locator[1],
err
)
return None
def wait_until_element(self, locator, timeout=12, poll_frequency=0.5):
"""Wrapper around Selenium's WebDriver that allows you to pause your
test until an element in the web page is present and visible.
"""
try:
element = WebDriverWait(
self.browser, timeout, poll_frequency
).until(
expected_conditions.visibility_of_element_located(locator),
message=u'element %s is not visible' % locator[1]
)
self.wait_for_ajax(poll_frequency=poll_frequency)
return element
except TimeoutException as err:
self.logger.debug(
u"%s: Waiting for element '%s' to display. %s",
type(err).__name__,
locator[1],
err
)
return None
def wait_until_element_is_clickable(
self, locator, timeout=12, poll_frequency=0.5):
"""Wrapper around Selenium's WebDriver that allows you to pause your
test until an element in the web page is present and can be clicked.
"""
try:
element = WebDriverWait(
self.browser, timeout, poll_frequency
).until(
expected_conditions.element_to_be_clickable(locator),
message=u'element %s is not clickable' % locator[1]
)
self.wait_for_ajax(poll_frequency=poll_frequency)
if element.get_attribute('disabled') == u'true':
return None
return element
except TimeoutException as err:
self.logger.debug(
u'%s: Waiting for element "%s" to display or to be '
u'clickable. %s',
type(err).__name__,
locator[1],
err
)
return None
def wait_until_element_is_not_visible(
self, locator, timeout=12, poll_frequency=0.5):
"""Wrapper around Selenium's WebDriver that allows us to pause our test
until specified element will disappear. That means that it will not be
present and will not be visible anymore.
:param locator: Locator of element under test
:param timeout: How long this method should wait for the element to
disappear (in seconds)
:param poll_frequency: How frequently this method should check for the
presence of the element (in seconds)
:returns: If the page element still present after timeout expired,
return None. Otherwise, return True.
"""
try:
WebDriverWait(self.browser, timeout, poll_frequency).until(
expected_conditions.invisibility_of_element_located(locator))
self.wait_for_ajax(poll_frequency=poll_frequency)
return True
except TimeoutException as err:
self.logger.debug(
u"%s: Waiting for element '%s' to disappear. %s",
type(err).__name__,
locator[1],
err
)
return None
def ajax_complete(self, driver):
"""
Checks whether an ajax call is completed.
"""
jquery_active = False
angular_active = False
try:
jquery_active = driver.execute_script('return jQuery.active') > 0
except WebDriverException:
pass
try:
angular_active = driver.execute_script(
u'return angular.element(document).injector().get("$http")'
u'.pendingRequests.length') > 0
except WebDriverException:
pass
return not (jquery_active or angular_active)
def wait_for_ajax(self, timeout=30, poll_frequency=0.5):
"""Waits for an ajax call to complete until timeout."""
WebDriverWait(
self.browser, timeout, poll_frequency
).until(
self.ajax_complete, 'Timeout waiting for page to load'
)
def scroll_page(self):
"""
Scrolls page up
"""
self.browser.execute_script('scroll(350, 0);')
def scroll_right_pane(self):
"""
Scrolls right pane down to find the save/submit button
"""
self.browser.execute_script("$('#panel_main').\
data('jsp').scrollBy(0, 100);")
def scroll_into_view(self, element):
""" Scrolls current element into visible area of the browser window."""
# Here aligntoTop=False option is set.
self.browser.execute_script(
'arguments[0].scrollIntoView(false);',
element,
)
def input(self, target, newtext):
"""Function to replace text from textbox using a common locator or
WebElement
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
"""
if isinstance(target, (tuple, Locator)):
txt_field = self.wait_until_element(target)
else:
txt_field = target
txt_field.clear()
txt_field.send_keys(newtext)
self.wait_for_ajax()
def set_parameter(self, param_name, param_value):
"""Function to set parameters for different entities like OS and Domain
"""
self.click(common_locators['parameter_tab'])
self.click(common_locators['add_parameter'])
self.assign_value(common_locators['parameter_name'], param_name)
self.assign_value(common_locators['parameter_value'], param_value)
self.click(common_locators['submit'])
self.logger.debug(u'Param: %s set to: %s', param_name, param_value)
def remove_parameter(self, param_name):
"""Function to remove parameters for different entities like OS and
Domain.
"""
self.click(common_locators['parameter_tab'])
self.click(common_locators['parameter_remove'] % param_name)
self.click(common_locators['submit'])
self.logger.debug(u'Removed param: %s', param_name)
def edit_entity(self, edit_loc, edit_text_loc, entity_value, save_loc):
"""Function to edit the selected entity's text and save it."""
self.click(edit_loc)
self.assign_value(edit_text_loc, entity_value)
self.click(save_loc)
def set_limit(self, limit):
"""Specify content host limit value for host collection or activation
key entities.
"""
self.click(common_locators['usage_limit_checkbox'])
if limit != 'Unlimited':
self.assign_value(common_locators['usage_limit'], limit)
def select_repo(self, repo_name):
"""Select specific repository for packages or errata search
functionality
"""
self.navigate_to_entity()
self.select(common_locators['select_repo'], repo_name)
def auto_complete_search(self, go_to_page, entity_locator, partial_name,
name, search_key):
"""Auto complete search by giving partial name of any entity.
:param go_to_page: Navigates to the entities page.
:param entity_locator: The locator of the entity.
:param str partial_name: The partial name of the entity.
:param str name: The name of the entity. Ex: org, loc
:param str search_key: The search key for searching an entity. Ex: name
:return: Returns the searched element.
"""
go_to_page()
self.assign_value(
common_locators['search'],
search_key + " = " + partial_name
)
self.click(common_locators['auto_search'] % name)
self.click(common_locators['search_button'])
return self.wait_until_element(entity_locator % name)
def check_all_values(self, go_to_page, entity_name, entity_locator,
tab_locator, context=None):
"""
Checks whether the 'All values' checkbox is checked/selected.
:param go_to_page: Navigates to the entities page.
:param str entity_name: The name of the entity. Ex: org, loc
:param entity_locator: The locator of the entity.
:param tab_locator: The tab locator to switch to the entity's tab.
:return: Returns whether the element is checked/selected or not.
:rtype: bool
:raises robottelo.ui.base.UINoSuchElementError: If the entity is not
found via search.
"""
go_to_page()
searched = self.search(entity_name)
if searched is None:
raise UINoSuchElementError('Entity not found via search.')
searched.click()
self.click(tab_locator)
selected = self.find_element(
common_locators['all_values'] % context).is_selected()
return selected
def is_element_enabled(self, locator):
"""Check whether UI element is enabled or disabled
:param locator: The locator of the element.
:return: Returns True if element is enabled and False otherwise
"""
element = self.wait_until_element(locator)
if element is None:
return False
self.wait_for_ajax()
return element.is_enabled()
def is_element_visible(self, locator):
"""Check whether UI element is visible
:param locator: The locator of the element.
:return: Returns True if element is visible and False otherwise
"""
element = self.wait_until_element_exists(locator)
if element is None:
return False
self.wait_for_ajax()
return element.is_displayed()
def element_type(self, target):
"""Determine UI element type using locator/element tag
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
:return: Returns element type value
:rtype: str
"""
element_type = None
if isinstance(target, (tuple, Locator)):
element = self.wait_until_element(target)
else:
element = target
if element is not None:
element_type = element.tag_name.lower()
if (element_type == 'input' and
element.get_attribute('type') == 'checkbox'):
element_type = 'checkbox'
elif (element_type == 'input' and
element.get_attribute('type') == 'radio'):
element_type = 'radio'
elif (element_type == 'div' and
'ace_editor' in element.get_attribute('class')):
element_type = 'ace_editor'
return element_type
def click(self, target, wait_for_ajax=True,
ajax_timeout=30, waiter_timeout=12, scroll=True):
"""Locate the element described by the ``target`` and click on it.
:param tuple || WebElement target: Could be either locator that
describes the element or element itself.
:param wait_for_ajax: Flag that indicates if should wait for AJAX after
clicking on the element
:param ajax_timeout: The amount of time that wait_fox_ajax should wait.
This will have effect if ``wait_fox_ajax`` parameter is ``True``.
:param waiter_timeout: The amount of time that wait_until_element
should wait. That value should be specified when non-default delay
is needed (e.g. long run procedures)
:param scroll: Decide whether scroll to element in case it is located
out of the page
:raise: UINoSuchElementError if the element could not be found.
"""
if isinstance(target, (tuple, Locator)):
element = self.wait_until_element(target, timeout=waiter_timeout)
else:
element = target
if element is None:
raise UINoSuchElementError(
'{0}: element {1} was not found while trying to click'
.format(type(self).__name__, str(target))
)
# Required since from selenium 2.48.0. which makes Selenium more
# closely resemble a user when interacting with elements.
# Scrolling element into view before attempting to click solves this.
# Behaviour can be changed with new selenium versions, so it is
# necessary to validate that functionality in case click method stopped
# to work as intended
if scroll:
self.scroll_into_view(element)
element.click()
if wait_for_ajax:
self.wait_for_ajax(ajax_timeout)
def select(self, target, list_value, wait_for_ajax=True, timeout=30,
scroll=True, select_by='visible_text'):
"""Select the element. Current method supports both classical <select>
tags and newer jquery-select elements
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
:param list_value: The value to select from the dropdown
:param wait_for_ajax: Flag that indicates if should wait for AJAX after
clicking on the element
:param timeout: The amount of time that wait_fox_ajax should wait. This
will have effect if ``wait_fox_ajax`` parameter is ``True``.
:param scroll: Decide whether scroll to element in case it is located
out of the page
:param select_by: method for select element in the list of options
visible_text, index, value
"""
# Check whether our select list element has <select> tag
if self.element_type(target) == 'select':
if isinstance(target, (tuple, Locator)):
element = self.wait_until_element(target)
else:
element = target
if scroll:
self.scroll_into_view(element)
select_element = Select(element)
getattr(select_element, 'select_by_%s' % select_by)(list_value)
if wait_for_ajax:
self.wait_for_ajax(timeout)
# If no - treat it like jquery select list
else:
self.click(
target,
wait_for_ajax=wait_for_ajax,
ajax_timeout=timeout,
scroll=scroll,
)
self.assign_value(
common_locators['select_list_search_box'], list_value)
self.click(
common_locators['entity_select_list'] % list_value,
wait_for_ajax=wait_for_ajax,
ajax_timeout=timeout,
scroll=scroll,
)
self.logger.debug(u'Selected value %s on %s', list_value, str(target))
def perform_action_chain_move(self, locator):
"""Moving the mouse to the middle of an element specified by locator
parameter
:param locator: The locator that describes the element.
:raise: UINoSuchElementError if the element could not be found.
"""
element = self.wait_until_element(locator)
if element is None:
raise UINoSuchElementError(
u'Cannot move cursor to {0}: element with locator {1}'
.format(type(self).__name__, locator)
)
self.scroll_into_view(element)
ActionChains(self.browser).move_to_element(element).perform()
self.wait_for_ajax()
def perform_action_chain_move_by_offset(self, x=0, y=0):
"""Moving the mouse to an offset from current mouse position
:param x: X offset to move to
:param y: Y offset to move to
"""
ActionChains(self.browser).move_by_offset(x, y).perform()
self.wait_for_ajax()
def assign_value(self, target, value):
"""Assign provided value to page element depending on the type of that
element
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
:param value: Value that needs to be assigned to the element
:raise: ValueError if the element type is unknown to our code.
"""
element_type = self.element_type(target)
if element_type == 'input' or element_type == 'textarea':
self.input(target, value)
elif element_type == 'select' or element_type == 'span':
self.select(target, value)
elif element_type == 'checkbox' or element_type == 'radio':
if isinstance(target, (tuple, Locator)):
state = self.wait_until_element(target).is_selected()
else:
state = target.is_selected()
if value != state:
self.click(target)
elif element_type == 'ace_editor':
if isinstance(target, (tuple, Locator)):
ace_edit_element = self.wait_until_element(target)
else:
ace_edit_element = target
ace_edit_id = ace_edit_element.get_attribute("id")
self.browser.execute_script(
"ace.edit('{0}').setValue('{1}');".format(ace_edit_id, value))
else:
raise ValueError(
u'Provided target {0} is not supported by framework'
.format(str(target))
)
self.logger.debug(u'Assigned value %s to %s', value, str(target))
def clear_entity_value(self, target):
"""Clear current value for provided page element
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
"""
element_type = self.element_type(target)
if element_type == 'input' or element_type == 'textarea':
self.input(target, '')
elif element_type == 'abbr':
self.click(target)
def get_selected_value(self, target):
"""Get currently selected value for select list
:param tuple || Locator || WebElement target: Either locator that
describes the element or element itself.
:return: Currently selected list element text
"""
if isinstance(target, (tuple, Locator)):
element = self.wait_until_element(target)
else:
element = target
selected_option = Select(element).first_selected_option
return selected_option.text
def perform_entity_action(self, action_name):
"""Execute specified action from katello entity 'Select Action'
dropdown
:param action_name: Name of action to be executed (e.g.
'Remove Product')
"""
self.click(common_locators['kt_select_action_dropdown'])
self.click(common_locators['select_action'] % action_name)
| gpl-3.0 |
teritos/tero-saas | alarm/events.py | 2 | 1503 | """Alarm Event names"""
import logging
import zope.event.classhandler
from base64 import b64decode
from vendors import onesignal
from vision.cloud import azure
from alarm.models import AlarmImage
from django.contrib.auth.models import User
logger = logging.getLogger('alarm') # pylint: disable=C0103
class Event(object):
"""An alarm event."""
@classmethod
def GetEventInstanceFromKwargs(cls, **kwargs): # pylint: disable=C0103
"""Return an event instance with attrs taken from kwargs."""
event = cls()
for key, val in kwargs.items():
setattr(event, key, val)
return event
class MotionDetected(Event):
"""Trigger this when motion is detected."""
@zope.event.classhandler.handler(MotionDetected)
def handle_motion_detection(event):
"""Handle motion detection."""
# Save image on DB
image = AlarmImage.create_from_encoded_data(event.image64, event.filetype, event.alarm)
# Notify using OneSignal API
message = 'Movimiento detectado'
# If human detection is enabled, check it
if event.alarm.human_detection:
tag_list = azure.find_humans_on(b64decode(event.image64))
print(tag_list)
if tag_list:
message = 'Intrusos detectados!!!'
print(message)
user = User.objects.get(username=event.username)
logger.debug('%s en Alarma %s', message, event.alarm)
onesignal.send_message(user, message, title='Tero [{}]'.format(event.sender), big_picture=image.full_url)
| gpl-3.0 |
carsongee/edx-platform | lms/djangoapps/django_comment_client/tests/test_models.py | 30 | 2542 | import django_comment_common.models as models
from django.test import TestCase
from django.test.utils import override_settings
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class RoleClassTestCase(TestCase):
def setUp(self):
# For course ID, syntax edx/classname/classdate is important
# because xmodel.course_module.id_to_location looks for a string to split
self.course_id = SlashSeparatedCourseKey("edX", "toy", "2012_Fall")
self.student_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.student_role.add_permission("delete_thread")
self.student_2_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.TA_role = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id)[0]
self.course_id_2 = SlashSeparatedCourseKey("edx", "6.002x", "2012_Fall")
self.TA_role_2 = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id_2)[0]
class Dummy():
def render_template():
pass
def testHasPermission(self):
# Whenever you add a permission to student_role,
# Roles with the same FORUM_ROLE in same class also receives the same
# permission.
# Is this desirable behavior?
self.assertTrue(self.student_role.has_permission("delete_thread"))
self.assertTrue(self.student_2_role.has_permission("delete_thread"))
self.assertFalse(self.TA_role.has_permission("delete_thread"))
def testInheritPermissions(self):
self.TA_role.inherit_permissions(self.student_role)
self.assertTrue(self.TA_role.has_permission("delete_thread"))
# Despite being from 2 different courses, TA_role_2 can still inherit
# permissions from TA_role without error
self.TA_role_2.inherit_permissions(self.TA_role)
class PermissionClassTestCase(TestCase):
def setUp(self):
self.permission = models.Permission.objects.get_or_create(name="test")[0]
def testUnicode(self):
self.assertEqual(str(self.permission), "test")
| agpl-3.0 |
rjschwei/boto | boto/route53/__init__.py | 22 | 2862 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from connection import Route53Connection
from boto.regioninfo import RegionInfo
class Route53RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Route53 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return [Route53RegionInfo(name='universal',
endpoint='route53.amazonaws.com',
connection_cls=Route53Connection)
]
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.route53.connection.Route53Connection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.route53.connection.Route53Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
brain-tec/sale-workflow | sale_automatic_workflow/sale.py | 9 | 3840 | # -*- coding: utf-8 -*-
###############################################################################
#
# sale_automatic_workflow for OpenERP
# Copyright (C) 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com>
# Copyright 2013 Camptocamp SA (Guewen Baconnier)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from openerp import models, fields, api, _
class sale_order(models.Model):
_inherit = "sale.order"
workflow_process_id = fields.Many2one(comodel_name='sale.workflow.process',
string='Automatic Workflow',
ondelete='restrict')
def _prepare_invoice(self, cr, uid, order, lines, context=None):
invoice_vals = super(sale_order, self)._prepare_invoice(
cr, uid, order, lines, context=context)
workflow = order.workflow_process_id
if not workflow:
return invoice_vals
invoice_vals['workflow_process_id'] = workflow.id
if workflow.invoice_date_is_order_date:
invoice_vals['date_invoice'] = order.date_order
if workflow.property_journal_id:
invoice_vals['journal_id'] = workflow.property_journal_id.id
return invoice_vals
def _prepare_order_picking(self, cr, uid, order, context=None):
picking_vals = super(sale_order, self)._prepare_order_picking(
cr, uid, order, context=context)
if order.workflow_process_id:
picking_vals['workflow_process_id'] = order.workflow_process_id.id
return picking_vals
@api.onchange('workflow_process_id')
def onchange_workflow_process_id(self):
if not self.workflow_process_id:
return
workflow = self.workflow_process_id
if workflow.picking_policy:
self.picking_policy = workflow.picking_policy
if workflow.order_policy:
self.order_policy = workflow.order_policy
if workflow.invoice_quantity:
self.invoice_quantity = workflow.invoice_quantity
if workflow.section_id:
self.section_id = workflow.section_id.id
if workflow.warning:
warning = {'title': _('Workflow Warning'),
'message': workflow.warning}
return {'warning': warning}
@api.multi
def test_create_invoice(self):
""" Workflow condition: test if an invoice should be created,
based on the automatic workflow rules """
self.ensure_one()
if self.order_policy != 'manual' or not self.workflow_process_id:
return False
invoice_on = self.workflow_process_id.create_invoice_on
if invoice_on == 'on_order_confirm':
return True
elif invoice_on == 'on_picking_done':
if self.shipped:
return True
# case of a sale order with only product service
sale_obj = self.env['sale.order']
no_products = [sale_obj.test_no_product(order) for order in self]
if no_products.count(False) == 0:
return True
return False
| agpl-3.0 |
shinriyo/workalendar | workalendar/tests/test_america.py | 2 | 5679 | # -*- coding: utf-8 -*-
from datetime import date
from workalendar.tests import GenericCalendarTest
from workalendar.america import Brazil, BrazilSaoPauloState
from workalendar.america import BrazilSaoPauloCity
from workalendar.america import Mexico, Chile, Panama
class BrazilTest(GenericCalendarTest):
cal_class = Brazil
def test_year_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays) # new year
self.assertIn(date(2013, 4, 21), holidays) # Tiradentes
self.assertIn(date(2013, 5, 1), holidays) # Dia do trabalhador
self.assertIn(date(2013, 9, 7), holidays) # Dia da Independência
self.assertIn(date(2013, 10, 12), holidays) # Nossa Senhora Aparecida
self.assertIn(date(2013, 11, 2), holidays) # Finados
self.assertIn(date(2013, 11, 15), holidays) # Proclamação da República
self.assertIn(date(2013, 12, 25), holidays) # Natal
class SaoPauloStateTest(BrazilTest):
cal_class = BrazilSaoPauloState
def test_regional_2013(self):
holidays = self.cal.holidays_set(2013)
# Revolução Constitucionalista de 1932
self.assertIn(date(2013, 7, 9), holidays)
class SaoPauloCityTest(SaoPauloStateTest):
cal_class = BrazilSaoPauloCity
def test_city_2013(self):
holidays = self.cal.holidays_set(2013)
# Aniversário da Cidade de São Paulo
self.assertIn(date(2013, 1, 25), holidays)
self.assertIn(date(2013, 2, 12), holidays) # Carnaval
self.assertIn(date(2013, 11, 20), holidays) # Dia da Consciência Negra
self.assertIn(date(2013, 3, 29), holidays) # Sexta-feira da Paixão
self.assertIn(date(2013, 3, 31), holidays) # Páscoa
self.assertIn(date(2013, 5, 30), holidays) # Corpus Christi
class ChileTest(GenericCalendarTest):
cal_class = Chile
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 3, 29), holidays)
self.assertIn(date(2013, 3, 30), holidays)
self.assertIn(date(2013, 5, 1), holidays)
self.assertIn(date(2013, 5, 21), holidays)
self.assertIn(date(2013, 6, 29), holidays)
self.assertIn(date(2013, 7, 16), holidays)
self.assertIn(date(2013, 8, 15), holidays)
self.assertIn(date(2013, 9, 18), holidays)
self.assertIn(date(2013, 9, 19), holidays)
self.assertIn(date(2013, 9, 20), holidays)
self.assertIn(date(2013, 10, 12), holidays)
self.assertIn(date(2013, 10, 31), holidays)
self.assertIn(date(2013, 11, 1), holidays)
self.assertIn(date(2013, 12, 8), holidays)
self.assertIn(date(2013, 12, 25), holidays)
self.assertIn(date(2013, 12, 31), holidays)
def test_reformation_day(self):
holidays = self.cal.holidays_set(2012)
self.assertNotIn(date(2012, 10, 31), holidays)
self.assertIn(date(2012, 11, 2), holidays)
#
holidays = self.cal.holidays_set(2017)
self.assertNotIn(date(2017, 10, 31), holidays)
self.assertIn(date(2017, 10, 27), holidays)
class MexicoTest(GenericCalendarTest):
cal_class = Mexico
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 2, 4), holidays) # Constitution day
self.assertIn(date(2013, 3, 18), holidays) # Benito Juárez's birthday
self.assertIn(date(2013, 5, 1), holidays) # Labour day
self.assertIn(date(2013, 9, 16), holidays) # Independence day
self.assertIn(date(2013, 11, 18), holidays) # Revolution day
self.assertIn(date(2013, 12, 25), holidays) # XMas
def test_shift_to_monday(self):
holidays = self.cal.holidays_set(2017)
# New year on Sunday -> shift
self.assertIn(date(2017, 1, 2), holidays)
holidays = self.cal.holidays_set(2016)
# XMas on sunday -> shift to monday
self.assertIn(date(2016, 12, 26), holidays)
# Same for Labour day
self.assertIn(date(2016, 5, 2), holidays)
def test_shift_to_friday(self):
holidays = self.cal.holidays_set(2021)
# January 1st 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 31), holidays)
# Same for Labour day
self.assertIn(date(2021, 4, 30), holidays)
holidays = self.cal.holidays_set(2021)
# December 25th, 2022 is a saturday, so we shift to friday
self.assertIn(date(2021, 12, 24), holidays)
class PanamaTest(GenericCalendarTest):
cal_class = Panama
def test_holidays_2013(self):
holidays = self.cal.holidays_set(2013)
self.assertIn(date(2013, 1, 1), holidays)
self.assertIn(date(2013, 1, 9), holidays) # Martyrs day
self.assertIn(date(2013, 2, 12), holidays) # carnival tuesday
self.assertIn(date(2013, 3, 29), holidays) # good friday
self.assertIn(date(2013, 3, 30), holidays) # easter saturday
self.assertIn(date(2013, 3, 31), holidays) # easter sunday
self.assertIn(date(2013, 5, 1), holidays) # labour day
self.assertIn(date(2013, 11, 3), holidays) # independence day
self.assertIn(date(2013, 11, 5), holidays) # colon day
# Shout in Villa de los Santos
self.assertIn(date(2013, 11, 10), holidays)
self.assertIn(date(2013, 12, 2), holidays) # Independence from spain
self.assertIn(date(2013, 12, 8), holidays) # mother day
self.assertIn(date(2013, 12, 25), holidays) # XMas
| mit |
tmerrick1/spack | var/spack/repos/builtin/packages/r-flashclust/package.py | 5 | 1665 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RFlashclust(RPackage):
"""flashClust: Implementation of optimal hierarchical clustering"""
homepage = "https://CRAN.R-project.org/package=flashClust"
url = "https://cran.r-project.org/src/contrib/flashClust_1.01-2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/flashClust"
version('1.01-2', '23409aeeef98bf35d0b3d5dd755fdeff')
depends_on('r@2.3.0:')
| lgpl-2.1 |
barbuza/django | django/db/models/options.py | 149 | 36502 | from __future__ import unicode_literals
import warnings
from bisect import bisect
from collections import OrderedDict, defaultdict
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db import connections
from django.db.models.fields import AutoField
from django.db.models.fields.proxy import OrderWrt
from django.db.models.fields.related import ManyToManyField
from django.utils import six
from django.utils.datastructures import ImmutableList, OrderedSet
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import (
force_text, python_2_unicode_compatible, smart_text,
)
from django.utils.functional import cached_property
from django.utils.lru_cache import lru_cache
from django.utils.text import camel_case_to_spaces
from django.utils.translation import override, string_concat
PROXY_PARENTS = object()
EMPTY_RELATION_TREE = tuple()
IMMUTABLE_WARNING = (
"The return type of '%s' should never be mutated. If you want to manipulate this list "
"for your own use, make a copy first."
)
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'apps', 'default_permissions',
'select_on_save', 'default_related_name',
'required_db_features', 'required_db_vendor')
class raise_deprecation(object):
def __init__(self, suggested_alternative):
self.suggested_alternative = suggested_alternative
def __call__(self, fn):
def wrapper(*args, **kwargs):
warnings.warn(
"'%s is an unofficial API that has been deprecated. "
"You may be able to replace it with '%s'" % (
fn.__name__,
self.suggested_alternative,
),
RemovedInDjango110Warning, stacklevel=2
)
return fn(*args, **kwargs)
return wrapper
def normalize_together(option_together):
"""
option_together can be either a tuple of tuples, or a single
tuple of two strings. Normalize it to a tuple of tuples, so that
calling code can uniformly expect that.
"""
try:
if not option_together:
return ()
if not isinstance(option_together, (tuple, list)):
raise TypeError
first_element = next(iter(option_together))
if not isinstance(first_element, (tuple, list)):
option_together = (option_together,)
# Normalize everything to tuples
return tuple(tuple(ot) for ot in option_together)
except TypeError:
# If the value of option_together isn't valid, return it
# verbatim; this will be picked up by the check framework later.
return option_together
def make_immutable_fields_list(name, data):
return ImmutableList(data, warning=IMMUTABLE_WARNING % name)
@python_2_unicode_compatible
class Options(object):
FORWARD_PROPERTIES = ('fields', 'many_to_many', 'concrete_fields',
'local_concrete_fields', '_forward_fields_map')
REVERSE_PROPERTIES = ('related_objects', 'fields_map', '_relation_tree')
def __init__(self, meta, app_label=None):
self._get_fields_cache = {}
self.proxied_children = []
self.local_fields = []
self.local_many_to_many = []
self.virtual_fields = []
self.model_name = None
self.verbose_name = None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self._ordering_clash = False
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.default_permissions = ('add', 'change', 'delete')
self.permissions = []
self.object_name = None
self.app_label = app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.required_db_features = []
self.required_db_vendor = None
self.meta = meta
self.pk = None
self.has_auto_field = False
self.auto_field = None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = OrderedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes). `managers`
# keeps a list of 3-tuples of the form:
# (creation_counter, instance, abstract(=True))
self.managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom app registry to use, if you're making a separate model set.
self.apps = apps
self.default_related_name = None
@lru_cache(maxsize=None)
def _map_model(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# It maps a field to (field, model or related_model,) depending on the
# field type.
model = link.model._meta.concrete_model
if model is self.model:
model = None
return link, model
@lru_cache(maxsize=None)
def _map_model_details(self, link):
# This helper function is used to allow backwards compatibility with
# the previous API. No future methods should use this function.
# This function maps a field to a tuple of:
# (field, model or related_model, direct, is_m2m) depending on the
# field type.
direct = not link.auto_created or link.concrete
model = link.model._meta.concrete_model
if model is self.model:
model = None
m2m = link.is_relation and link.many_to_many
return link, model, direct, m2m
@property
def label(self):
return '%s.%s' % (self.app_label, self.object_name)
@property
def label_lower(self):
return '%s.%s' % (self.app_label, self.model_name)
@property
def app_config(self):
# Don't go through get_app_config to avoid triggering imports.
return self.apps.app_configs.get(self.app_label)
@property
def installed(self):
return self.app_config is not None
@property
def abstract_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if abstract
]
@property
def concrete_managers(self):
return [
(counter, instance.name, instance) for counter, instance, abstract
in self.managers if not abstract
]
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.utils import truncate_name
cls._meta = self
self.model = cls
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = camel_case_to_spaces(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
self.unique_together = normalize_together(self.unique_together)
self.index_together = normalize_together(self.index_together)
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# order_with_respect_and ordering are mutually exclusive.
self._ordering_clash = bool(self.ordering and self.order_with_respect_to)
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
def _prepare(self, model):
if self.order_with_respect_to:
# The app registry will not be ready at this point, so we cannot
# use get_field().
query = self.order_with_respect_to
try:
self.order_with_respect_to = next(
f for f in self._get_fields(reverse=False)
if f.name == query or f.attname == query
)
except StopIteration:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, query))
self.ordering = ('_order',)
if not any(isinstance(field, OrderWrt) for field in model._meta.local_fields):
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field, virtual=False):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if virtual:
self.virtual_fields.append(field)
elif field.is_relation and field.many_to_many:
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
# If the field being added is a relation to another known field,
# expire the cache on this field and the forward cache on the field
# being referenced, because there will be new relationships in the
# cache. Otherwise, expire the cache of references *to* this field.
# The mechanism for getting at the related model is slightly odd -
# ideally, we'd just ask for field.related_model. However, related_model
# is a cached property, and all the models haven't been loaded yet, so
# we need to make sure we don't cache a string reference.
if field.is_relation and hasattr(field.remote_field, 'model') and field.remote_field.model:
try:
field.remote_field.model._meta._expire_cache(forward=False)
except AttributeError:
pass
self._expire_cache()
else:
self._expire_cache(reverse=False)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def can_migrate(self, connection):
"""
Return True if the model can/should be migrated on the `connection`.
`connection` can be either a real connection or a connection alias.
"""
if self.proxy or self.swapped or not self.managed:
return False
if isinstance(connection, six.string_types):
connection = connections[connection]
if self.required_db_vendor:
return self.required_db_vendor == connection.vendor
if self.required_db_features:
return all(getattr(connection.features, feat, False)
for feat in self.required_db_features)
return True
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
with override(None):
return force_text(self.verbose_name)
@property
def swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) != self.label_lower:
return swapped_for
return None
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
# For legacy reasons, the fields property should only contain forward
# fields that are not virtual or with a m2m cardinality. Therefore we
# pass these three filters as filters to the generator.
# The third lambda is a longwinded way of checking f.related_model - we don't
# use that property directly because related_model is a cached property,
# and all the models may not have been loaded yet; we don't want to cache
# the string reference to the related_model.
is_not_an_m2m_field = lambda f: not (f.is_relation and f.many_to_many)
is_not_a_generic_relation = lambda f: not (f.is_relation and f.one_to_many)
is_not_a_generic_foreign_key = lambda f: not (
f.is_relation and f.many_to_one and not (hasattr(f.remote_field, 'model') and f.remote_field.model)
)
return make_immutable_fields_list(
"fields",
(f for f in self._get_fields(reverse=False) if
is_not_an_m2m_field(f) and is_not_a_generic_relation(f)
and is_not_a_generic_foreign_key(f))
)
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"concrete_fields", (f for f in self.fields if f.concrete)
)
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
return make_immutable_fields_list(
"local_concrete_fields", (f for f in self.local_fields if f.concrete)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_fields_with_model(self):
return [self._map_model(f) for f in self.get_fields()]
@raise_deprecation(suggested_alternative="get_fields()")
def get_concrete_fields_with_model(self):
return [self._map_model(f) for f in self.concrete_fields]
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this list.
"""
return make_immutable_fields_list(
"many_to_many",
(f for f in self._get_fields(reverse=False)
if f.is_relation and f.many_to_many)
)
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
obtaining this field list.
"""
all_related_fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return make_immutable_fields_list(
"related_objects",
(obj for obj in all_related_fields
if not obj.hidden or obj.field.many_to_many)
)
@raise_deprecation(suggested_alternative="get_fields()")
def get_m2m_with_model(self):
return [self._map_model(f) for f in self.many_to_many]
@cached_property
def _forward_fields_map(self):
res = {}
fields = self._get_fields(reverse=False)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
@cached_property
def fields_map(self):
res = {}
fields = self._get_fields(forward=False, include_hidden=True)
for field in fields:
res[field.name] = field
# Due to the way Django's internals work, get_field() should also
# be able to fetch a field by attname. In the case of a concrete
# field with relation, includes the *_id name too
try:
res[field.attname] = field
except AttributeError:
pass
return res
def get_field(self, field_name, many_to_many=None):
"""
Returns a field instance given a field name. The field can be either a
forward or reverse field, unless many_to_many is specified; if it is,
only forward fields will be returned.
The many_to_many argument exists for backwards compatibility reasons;
it has been deprecated and will be removed in Django 1.10.
"""
m2m_in_kwargs = many_to_many is not None
if m2m_in_kwargs:
# Always throw a warning if many_to_many is used regardless of
# whether it alters the return type or not.
warnings.warn(
"The 'many_to_many' argument on get_field() is deprecated; "
"use a filter on field.many_to_many instead.",
RemovedInDjango110Warning
)
try:
# In order to avoid premature loading of the relation tree
# (expensive) we prefer checking if the field is a forward field.
field = self._forward_fields_map[field_name]
if many_to_many is False and field.many_to_many:
raise FieldDoesNotExist(
'%s has no field named %r' % (self.object_name, field_name)
)
return field
except KeyError:
# If the app registry is not ready, reverse fields are
# unavailable, therefore we throw a FieldDoesNotExist exception.
if not self.apps.models_ready:
raise FieldDoesNotExist(
"%s has no field named %r. The app cache isn't ready yet, "
"so if this is an auto-created related field, it won't "
"be available yet." % (self.object_name, field_name)
)
try:
if m2m_in_kwargs:
# Previous API does not allow searching reverse fields.
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
# Retrieve field instance by name from cached or just-computed
# field map.
return self.fields_map[field_name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, field_name))
@raise_deprecation(suggested_alternative="get_field()")
def get_field_by_name(self, name):
return self._map_model_details(self.get_field(name))
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_field_names(self):
names = set()
fields = self.get_fields()
for field in fields:
# For backwards compatibility GenericForeignKey should not be
# included in the results.
if field.is_relation and field.many_to_one and field.related_model is None:
continue
# Relations to child proxy models should not be included.
if (field.model != self.model and
field.model._meta.concrete_model == self.concrete_model):
continue
names.add(field.name)
if hasattr(field, 'attname'):
names.add(field.attname)
return list(names)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
include_parents = True if local_only is False else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents,
include_hidden=include_hidden,
)
fields = (obj for obj in fields if not isinstance(obj.field, ManyToManyField))
if include_proxy_eq:
children = chain.from_iterable(c._relation_tree
for c in self.concrete_model._meta.proxied_children
if c is not self)
relations = (f.remote_field for f in children
if include_hidden or not f.remote_field.field.remote_field.is_hidden())
fields = chain(fields, relations)
return list(fields)
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [
self._map_model(f) for f in self.get_all_related_objects(
local_only=local_only,
include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq,
)
]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_many_to_many_objects(self, local_only=False):
include_parents = True if local_only is not True else PROXY_PARENTS
fields = self._get_fields(
forward=False, reverse=True,
include_parents=include_parents, include_hidden=True
)
return [obj for obj in fields if isinstance(obj.field, ManyToManyField)]
@raise_deprecation(suggested_alternative="get_fields()")
def get_all_related_m2m_objects_with_model(self):
fields = self._get_fields(forward=False, reverse=True, include_hidden=True)
return [self._map_model(obj) for obj in fields if isinstance(obj.field, ManyToManyField)]
def get_base_chain(self, model):
"""
Return a list of parent classes leading to `model` (ordered from
closest to most distant ancestor). This has to handle the case where
`model` is a grandparent or even more distant relation.
"""
if not self.parents:
return []
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return []
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
for parent in self.parents:
for ancestor in parent._meta.get_parent_list():
result.add(ancestor)
return list(result)
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
def _populate_directed_relation_graph(self):
"""
This method is used by each model to find its reverse objects. As this
method is very expensive and is accessed frequently (it looks up every
field in a model, in every app), it is computed on first access and then
is set as a property on every model.
"""
related_objects_graph = defaultdict(list)
all_models = self.apps.get_models(include_auto_created=True)
for model in all_models:
# Abstract model's fields are copied to child models, hence we will
# see the fields from the child models.
if model._meta.abstract:
continue
fields_with_relations = (
f for f in model._meta._get_fields(reverse=False, include_parents=False)
if f.is_relation and f.related_model is not None
)
for f in fields_with_relations:
if not isinstance(f.remote_field.model, six.string_types):
related_objects_graph[f.remote_field.model._meta].append(f)
for model in all_models:
# Set the relation_tree using the internal __dict__. In this way
# we avoid calling the cached property. In attribute lookup,
# __dict__ takes precedence over a data descriptor (such as
# @cached_property). This means that the _meta._relation_tree is
# only called if related_objects is not in __dict__.
related_objects = related_objects_graph[model._meta]
model._meta.__dict__['_relation_tree'] = related_objects
# It seems it is possible that self is not in all_models, so guard
# against that with default for get().
return self.__dict__.get('_relation_tree', EMPTY_RELATION_TREE)
@cached_property
def _relation_tree(self):
return self._populate_directed_relation_graph()
def _expire_cache(self, forward=True, reverse=True):
# This method is usually called by apps.cache_clear(), when the
# registry is finalized, or when a new field is added.
properties_to_expire = []
if forward:
properties_to_expire.extend(self.FORWARD_PROPERTIES)
if reverse and not self.abstract:
properties_to_expire.extend(self.REVERSE_PROPERTIES)
for cache_key in properties_to_expire:
try:
delattr(self, cache_key)
except AttributeError:
pass
self._get_fields_cache = {}
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:
- include_parents: include fields derived from inheritance
- include_hidden: include fields that have a related_name that
starts with a "+"
"""
if include_parents is False:
include_parents = PROXY_PARENTS
return self._get_fields(include_parents=include_parents, include_hidden=include_hidden)
def _get_fields(self, forward=True, reverse=True, include_parents=True, include_hidden=False,
seen_models=None):
"""
Internal helper function to return fields of the model.
* If forward=True, then fields defined on this model are returned.
* If reverse=True, then relations pointing to this model are returned.
* If include_hidden=True, then fields with is_hidden=True are returned.
* The include_parents argument toggles if fields from parent models
should be included. It has three values: True, False, and
PROXY_PARENTS. When set to PROXY_PARENTS, the call will return all
fields defined for the current model or any of its parents in the
parent chain to the model's concrete model.
"""
if include_parents not in (True, False, PROXY_PARENTS):
raise TypeError("Invalid argument for include_parents: %s" % (include_parents,))
# This helper function is used to allow recursion in ``get_fields()``
# implementation and to provide a fast way for Django's internals to
# access specific subsets of fields.
# We must keep track of which models we have already seen. Otherwise we
# could include the same field multiple times from different models.
topmost_call = False
if seen_models is None:
seen_models = set()
topmost_call = True
seen_models.add(self.model)
# Creates a cache key composed of all arguments
cache_key = (forward, reverse, include_parents, include_hidden, topmost_call)
try:
# In order to avoid list manipulation. Always return a shallow copy
# of the results.
return self._get_fields_cache[cache_key]
except KeyError:
pass
fields = []
# Recursively call _get_fields() on each parent, with the same
# options provided in this call.
if include_parents is not False:
for parent in self.parents:
# In diamond inheritance it is possible that we see the same
# model from two different routes. In that case, avoid adding
# fields from the same parent again.
if parent in seen_models:
continue
if (parent._meta.concrete_model != self.concrete_model and
include_parents == PROXY_PARENTS):
continue
for obj in parent._meta._get_fields(
forward=forward, reverse=reverse, include_parents=include_parents,
include_hidden=include_hidden, seen_models=seen_models):
if hasattr(obj, 'parent_link') and obj.parent_link:
continue
fields.append(obj)
if reverse:
# Tree is computed once and cached until the app cache is expired.
# It is composed of a list of fields pointing to the current model
# from other models.
all_fields = self._relation_tree
for field in all_fields:
# If hidden fields should be included or the relation is not
# intentionally hidden, add to the fields dict.
if include_hidden or not field.remote_field.hidden:
fields.append(field.remote_field)
if forward:
fields.extend(
field for field in chain(self.local_fields, self.local_many_to_many)
)
# Virtual fields are recopied to each child model, and they get a
# different model as field.model in each child. Hence we have to
# add the virtual fields separately from the topmost call. If we
# did this recursively similar to local_fields, we would get field
# instances with field.model != self.model.
if topmost_call:
fields.extend(
f for f in self.virtual_fields
)
# In order to avoid list manipulation. Always
# return a shallow copy of the results
fields = make_immutable_fields_list("get_fields()", fields)
# Store result into cache for later access
self._get_fields_cache[cache_key] = fields
return fields
| bsd-3-clause |
Gui13/CouchPotatoServer | libs/tmdb3/locales.py | 34 | 20495 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: locales.py Stores locale information for filtering results
# Python Library
# Author: Raymond Wagner
#-----------------------
from tmdb_exceptions import *
import locale
syslocale = None
class LocaleBase(object):
__slots__ = ['__immutable']
_stored = {}
fallthrough = False
def __init__(self, *keys):
for key in keys:
self._stored[key.lower()] = self
self.__immutable = True
def __setattr__(self, key, value):
if getattr(self, '__immutable', False):
raise NotImplementedError(self.__class__.__name__ +
' does not support modification.')
super(LocaleBase, self).__setattr__(key, value)
def __delattr__(self, key):
if getattr(self, '__immutable', False):
raise NotImplementedError(self.__class__.__name__ +
' does not support modification.')
super(LocaleBase, self).__delattr__(key)
def __lt__(self, other):
return (id(self) != id(other)) and (str(self) > str(other))
def __gt__(self, other):
return (id(self) != id(other)) and (str(self) < str(other))
def __eq__(self, other):
return (id(self) == id(other)) or (str(self) == str(other))
@classmethod
def getstored(cls, key):
if key is None:
return None
try:
return cls._stored[key.lower()]
except:
raise TMDBLocaleError("'{0}' is not a known valid {1} code."\
.format(key, cls.__name__))
class Language(LocaleBase):
__slots__ = ['ISO639_1', 'ISO639_2', 'ISO639_2B', 'englishname',
'nativename']
_stored = {}
def __init__(self, iso1, iso2, ename):
self.ISO639_1 = iso1
self.ISO639_2 = iso2
# self.ISO639_2B = iso2b
self.englishname = ename
# self.nativename = nname
super(Language, self).__init__(iso1, iso2)
def __str__(self):
return self.ISO639_1
def __repr__(self):
return u"<Language '{0.englishname}' ({0.ISO639_1})>".format(self)
class Country(LocaleBase):
__slots__ = ['alpha2', 'name']
_stored = {}
def __init__(self, alpha2, name):
self.alpha2 = alpha2
self.name = name
super(Country, self).__init__(alpha2)
def __str__(self):
return self.alpha2
def __repr__(self):
return u"<Country '{0.name}' ({0.alpha2})>".format(self)
class Locale(LocaleBase):
__slots__ = ['language', 'country', 'encoding']
def __init__(self, language, country, encoding):
self.language = Language.getstored(language)
self.country = Country.getstored(country)
self.encoding = encoding if encoding else 'latin-1'
def __str__(self):
return u"{0}_{1}".format(self.language, self.country)
def __repr__(self):
return u"<Locale {0.language}_{0.country}>".format(self)
def encode(self, dat):
"""Encode using system default encoding for network/file output."""
try:
return dat.encode(self.encoding)
except AttributeError:
# not a string type, pass along
return dat
except UnicodeDecodeError:
# just return unmodified and hope for the best
return dat
def decode(self, dat):
"""Decode to system default encoding for internal use."""
try:
return dat.decode(self.encoding)
except AttributeError:
# not a string type, pass along
return dat
except UnicodeEncodeError:
# just return unmodified and hope for the best
return dat
def set_locale(language=None, country=None, fallthrough=False):
global syslocale
LocaleBase.fallthrough = fallthrough
sysloc, sysenc = locale.getdefaultlocale()
if (not language) or (not country):
dat = None
if syslocale is not None:
dat = (str(syslocale.language), str(syslocale.country))
else:
if (sysloc is None) or ('_' not in sysloc):
dat = ('en', 'US')
else:
dat = sysloc.split('_')
if language is None:
language = dat[0]
if country is None:
country = dat[1]
syslocale = Locale(language, country, sysenc)
def get_locale(language=-1, country=-1):
"""Output locale using provided attributes, or return system locale."""
global syslocale
# pull existing stored values
if syslocale is None:
loc = Locale(None, None, locale.getdefaultlocale()[1])
else:
loc = syslocale
# both options are default, return stored values
if language == country == -1:
return loc
# supplement default option with stored values
if language == -1:
language = loc.language
elif country == -1:
country = loc.country
return Locale(language, country, loc.encoding)
######## AUTOGENERATED LANGUAGE AND COUNTRY DATA BELOW HERE #########
Language("ab", "abk", u"Abkhazian")
Language("aa", "aar", u"Afar")
Language("af", "afr", u"Afrikaans")
Language("ak", "aka", u"Akan")
Language("sq", "alb/sqi", u"Albanian")
Language("am", "amh", u"Amharic")
Language("ar", "ara", u"Arabic")
Language("an", "arg", u"Aragonese")
Language("hy", "arm/hye", u"Armenian")
Language("as", "asm", u"Assamese")
Language("av", "ava", u"Avaric")
Language("ae", "ave", u"Avestan")
Language("ay", "aym", u"Aymara")
Language("az", "aze", u"Azerbaijani")
Language("bm", "bam", u"Bambara")
Language("ba", "bak", u"Bashkir")
Language("eu", "baq/eus", u"Basque")
Language("be", "bel", u"Belarusian")
Language("bn", "ben", u"Bengali")
Language("bh", "bih", u"Bihari languages")
Language("bi", "bis", u"Bislama")
Language("nb", "nob", u"Bokmål, Norwegian")
Language("bs", "bos", u"Bosnian")
Language("br", "bre", u"Breton")
Language("bg", "bul", u"Bulgarian")
Language("my", "bur/mya", u"Burmese")
Language("es", "spa", u"Castilian")
Language("ca", "cat", u"Catalan")
Language("km", "khm", u"Central Khmer")
Language("ch", "cha", u"Chamorro")
Language("ce", "che", u"Chechen")
Language("ny", "nya", u"Chewa")
Language("ny", "nya", u"Chichewa")
Language("zh", "chi/zho", u"Chinese")
Language("za", "zha", u"Chuang")
Language("cu", "chu", u"Church Slavic")
Language("cu", "chu", u"Church Slavonic")
Language("cv", "chv", u"Chuvash")
Language("kw", "cor", u"Cornish")
Language("co", "cos", u"Corsican")
Language("cr", "cre", u"Cree")
Language("hr", "hrv", u"Croatian")
Language("cs", "cze/ces", u"Czech")
Language("da", "dan", u"Danish")
Language("dv", "div", u"Dhivehi")
Language("dv", "div", u"Divehi")
Language("nl", "dut/nld", u"Dutch")
Language("dz", "dzo", u"Dzongkha")
Language("en", "eng", u"English")
Language("eo", "epo", u"Esperanto")
Language("et", "est", u"Estonian")
Language("ee", "ewe", u"Ewe")
Language("fo", "fao", u"Faroese")
Language("fj", "fij", u"Fijian")
Language("fi", "fin", u"Finnish")
Language("nl", "dut/nld", u"Flemish")
Language("fr", "fre/fra", u"French")
Language("ff", "ful", u"Fulah")
Language("gd", "gla", u"Gaelic")
Language("gl", "glg", u"Galician")
Language("lg", "lug", u"Ganda")
Language("ka", "geo/kat", u"Georgian")
Language("de", "ger/deu", u"German")
Language("ki", "kik", u"Gikuyu")
Language("el", "gre/ell", u"Greek, Modern (1453-)")
Language("kl", "kal", u"Greenlandic")
Language("gn", "grn", u"Guarani")
Language("gu", "guj", u"Gujarati")
Language("ht", "hat", u"Haitian")
Language("ht", "hat", u"Haitian Creole")
Language("ha", "hau", u"Hausa")
Language("he", "heb", u"Hebrew")
Language("hz", "her", u"Herero")
Language("hi", "hin", u"Hindi")
Language("ho", "hmo", u"Hiri Motu")
Language("hu", "hun", u"Hungarian")
Language("is", "ice/isl", u"Icelandic")
Language("io", "ido", u"Ido")
Language("ig", "ibo", u"Igbo")
Language("id", "ind", u"Indonesian")
Language("ia", "ina", u"Interlingua (International Auxiliary Language Association)")
Language("ie", "ile", u"Interlingue")
Language("iu", "iku", u"Inuktitut")
Language("ik", "ipk", u"Inupiaq")
Language("ga", "gle", u"Irish")
Language("it", "ita", u"Italian")
Language("ja", "jpn", u"Japanese")
Language("jv", "jav", u"Javanese")
Language("kl", "kal", u"Kalaallisut")
Language("kn", "kan", u"Kannada")
Language("kr", "kau", u"Kanuri")
Language("ks", "kas", u"Kashmiri")
Language("kk", "kaz", u"Kazakh")
Language("ki", "kik", u"Kikuyu")
Language("rw", "kin", u"Kinyarwanda")
Language("ky", "kir", u"Kirghiz")
Language("kv", "kom", u"Komi")
Language("kg", "kon", u"Kongo")
Language("ko", "kor", u"Korean")
Language("kj", "kua", u"Kuanyama")
Language("ku", "kur", u"Kurdish")
Language("kj", "kua", u"Kwanyama")
Language("ky", "kir", u"Kyrgyz")
Language("lo", "lao", u"Lao")
Language("la", "lat", u"Latin")
Language("lv", "lav", u"Latvian")
Language("lb", "ltz", u"Letzeburgesch")
Language("li", "lim", u"Limburgan")
Language("li", "lim", u"Limburger")
Language("li", "lim", u"Limburgish")
Language("ln", "lin", u"Lingala")
Language("lt", "lit", u"Lithuanian")
Language("lu", "lub", u"Luba-Katanga")
Language("lb", "ltz", u"Luxembourgish")
Language("mk", "mac/mkd", u"Macedonian")
Language("mg", "mlg", u"Malagasy")
Language("ms", "may/msa", u"Malay")
Language("ml", "mal", u"Malayalam")
Language("dv", "div", u"Maldivian")
Language("mt", "mlt", u"Maltese")
Language("gv", "glv", u"Manx")
Language("mi", "mao/mri", u"Maori")
Language("mr", "mar", u"Marathi")
Language("mh", "mah", u"Marshallese")
Language("ro", "rum/ron", u"Moldavian")
Language("ro", "rum/ron", u"Moldovan")
Language("mn", "mon", u"Mongolian")
Language("na", "nau", u"Nauru")
Language("nv", "nav", u"Navaho")
Language("nv", "nav", u"Navajo")
Language("nd", "nde", u"Ndebele, North")
Language("nr", "nbl", u"Ndebele, South")
Language("ng", "ndo", u"Ndonga")
Language("ne", "nep", u"Nepali")
Language("nd", "nde", u"North Ndebele")
Language("se", "sme", u"Northern Sami")
Language("no", "nor", u"Norwegian")
Language("nb", "nob", u"Norwegian Bokmål")
Language("nn", "nno", u"Norwegian Nynorsk")
Language("ii", "iii", u"Nuosu")
Language("ny", "nya", u"Nyanja")
Language("nn", "nno", u"Nynorsk, Norwegian")
Language("ie", "ile", u"Occidental")
Language("oc", "oci", u"Occitan (post 1500)")
Language("oj", "oji", u"Ojibwa")
Language("cu", "chu", u"Old Bulgarian")
Language("cu", "chu", u"Old Church Slavonic")
Language("cu", "chu", u"Old Slavonic")
Language("or", "ori", u"Oriya")
Language("om", "orm", u"Oromo")
Language("os", "oss", u"Ossetian")
Language("os", "oss", u"Ossetic")
Language("pi", "pli", u"Pali")
Language("pa", "pan", u"Panjabi")
Language("ps", "pus", u"Pashto")
Language("fa", "per/fas", u"Persian")
Language("pl", "pol", u"Polish")
Language("pt", "por", u"Portuguese")
Language("pa", "pan", u"Punjabi")
Language("ps", "pus", u"Pushto")
Language("qu", "que", u"Quechua")
Language("ro", "rum/ron", u"Romanian")
Language("rm", "roh", u"Romansh")
Language("rn", "run", u"Rundi")
Language("ru", "rus", u"Russian")
Language("sm", "smo", u"Samoan")
Language("sg", "sag", u"Sango")
Language("sa", "san", u"Sanskrit")
Language("sc", "srd", u"Sardinian")
Language("gd", "gla", u"Scottish Gaelic")
Language("sr", "srp", u"Serbian")
Language("sn", "sna", u"Shona")
Language("ii", "iii", u"Sichuan Yi")
Language("sd", "snd", u"Sindhi")
Language("si", "sin", u"Sinhala")
Language("si", "sin", u"Sinhalese")
Language("sk", "slo/slk", u"Slovak")
Language("sl", "slv", u"Slovenian")
Language("so", "som", u"Somali")
Language("st", "sot", u"Sotho, Southern")
Language("nr", "nbl", u"South Ndebele")
Language("es", "spa", u"Spanish")
Language("su", "sun", u"Sundanese")
Language("sw", "swa", u"Swahili")
Language("ss", "ssw", u"Swati")
Language("sv", "swe", u"Swedish")
Language("tl", "tgl", u"Tagalog")
Language("ty", "tah", u"Tahitian")
Language("tg", "tgk", u"Tajik")
Language("ta", "tam", u"Tamil")
Language("tt", "tat", u"Tatar")
Language("te", "tel", u"Telugu")
Language("th", "tha", u"Thai")
Language("bo", "tib/bod", u"Tibetan")
Language("ti", "tir", u"Tigrinya")
Language("to", "ton", u"Tonga (Tonga Islands)")
Language("ts", "tso", u"Tsonga")
Language("tn", "tsn", u"Tswana")
Language("tr", "tur", u"Turkish")
Language("tk", "tuk", u"Turkmen")
Language("tw", "twi", u"Twi")
Language("ug", "uig", u"Uighur")
Language("uk", "ukr", u"Ukrainian")
Language("ur", "urd", u"Urdu")
Language("ug", "uig", u"Uyghur")
Language("uz", "uzb", u"Uzbek")
Language("ca", "cat", u"Valencian")
Language("ve", "ven", u"Venda")
Language("vi", "vie", u"Vietnamese")
Language("vo", "vol", u"Volapük")
Language("wa", "wln", u"Walloon")
Language("cy", "wel/cym", u"Welsh")
Language("fy", "fry", u"Western Frisian")
Language("wo", "wol", u"Wolof")
Language("xh", "xho", u"Xhosa")
Language("yi", "yid", u"Yiddish")
Language("yo", "yor", u"Yoruba")
Language("za", "zha", u"Zhuang")
Language("zu", "zul", u"Zulu")
Country("AF", u"AFGHANISTAN")
Country("AX", u"ÅLAND ISLANDS")
Country("AL", u"ALBANIA")
Country("DZ", u"ALGERIA")
Country("AS", u"AMERICAN SAMOA")
Country("AD", u"ANDORRA")
Country("AO", u"ANGOLA")
Country("AI", u"ANGUILLA")
Country("AQ", u"ANTARCTICA")
Country("AG", u"ANTIGUA AND BARBUDA")
Country("AR", u"ARGENTINA")
Country("AM", u"ARMENIA")
Country("AW", u"ARUBA")
Country("AU", u"AUSTRALIA")
Country("AT", u"AUSTRIA")
Country("AZ", u"AZERBAIJAN")
Country("BS", u"BAHAMAS")
Country("BH", u"BAHRAIN")
Country("BD", u"BANGLADESH")
Country("BB", u"BARBADOS")
Country("BY", u"BELARUS")
Country("BE", u"BELGIUM")
Country("BZ", u"BELIZE")
Country("BJ", u"BENIN")
Country("BM", u"BERMUDA")
Country("BT", u"BHUTAN")
Country("BO", u"BOLIVIA, PLURINATIONAL STATE OF")
Country("BQ", u"BONAIRE, SINT EUSTATIUS AND SABA")
Country("BA", u"BOSNIA AND HERZEGOVINA")
Country("BW", u"BOTSWANA")
Country("BV", u"BOUVET ISLAND")
Country("BR", u"BRAZIL")
Country("IO", u"BRITISH INDIAN OCEAN TERRITORY")
Country("BN", u"BRUNEI DARUSSALAM")
Country("BG", u"BULGARIA")
Country("BF", u"BURKINA FASO")
Country("BI", u"BURUNDI")
Country("KH", u"CAMBODIA")
Country("CM", u"CAMEROON")
Country("CA", u"CANADA")
Country("CV", u"CAPE VERDE")
Country("KY", u"CAYMAN ISLANDS")
Country("CF", u"CENTRAL AFRICAN REPUBLIC")
Country("TD", u"CHAD")
Country("CL", u"CHILE")
Country("CN", u"CHINA")
Country("CX", u"CHRISTMAS ISLAND")
Country("CC", u"COCOS (KEELING) ISLANDS")
Country("CO", u"COLOMBIA")
Country("KM", u"COMOROS")
Country("CG", u"CONGO")
Country("CD", u"CONGO, THE DEMOCRATIC REPUBLIC OF THE")
Country("CK", u"COOK ISLANDS")
Country("CR", u"COSTA RICA")
Country("CI", u"CÔTE D'IVOIRE")
Country("HR", u"CROATIA")
Country("CU", u"CUBA")
Country("CW", u"CURAÇAO")
Country("CY", u"CYPRUS")
Country("CZ", u"CZECH REPUBLIC")
Country("DK", u"DENMARK")
Country("DJ", u"DJIBOUTI")
Country("DM", u"DOMINICA")
Country("DO", u"DOMINICAN REPUBLIC")
Country("EC", u"ECUADOR")
Country("EG", u"EGYPT")
Country("SV", u"EL SALVADOR")
Country("GQ", u"EQUATORIAL GUINEA")
Country("ER", u"ERITREA")
Country("EE", u"ESTONIA")
Country("ET", u"ETHIOPIA")
Country("FK", u"FALKLAND ISLANDS (MALVINAS)")
Country("FO", u"FAROE ISLANDS")
Country("FJ", u"FIJI")
Country("FI", u"FINLAND")
Country("FR", u"FRANCE")
Country("GF", u"FRENCH GUIANA")
Country("PF", u"FRENCH POLYNESIA")
Country("TF", u"FRENCH SOUTHERN TERRITORIES")
Country("GA", u"GABON")
Country("GM", u"GAMBIA")
Country("GE", u"GEORGIA")
Country("DE", u"GERMANY")
Country("GH", u"GHANA")
Country("GI", u"GIBRALTAR")
Country("GR", u"GREECE")
Country("GL", u"GREENLAND")
Country("GD", u"GRENADA")
Country("GP", u"GUADELOUPE")
Country("GU", u"GUAM")
Country("GT", u"GUATEMALA")
Country("GG", u"GUERNSEY")
Country("GN", u"GUINEA")
Country("GW", u"GUINEA-BISSAU")
Country("GY", u"GUYANA")
Country("HT", u"HAITI")
Country("HM", u"HEARD ISLAND AND MCDONALD ISLANDS")
Country("VA", u"HOLY SEE (VATICAN CITY STATE)")
Country("HN", u"HONDURAS")
Country("HK", u"HONG KONG")
Country("HU", u"HUNGARY")
Country("IS", u"ICELAND")
Country("IN", u"INDIA")
Country("ID", u"INDONESIA")
Country("IR", u"IRAN, ISLAMIC REPUBLIC OF")
Country("IQ", u"IRAQ")
Country("IE", u"IRELAND")
Country("IM", u"ISLE OF MAN")
Country("IL", u"ISRAEL")
Country("IT", u"ITALY")
Country("JM", u"JAMAICA")
Country("JP", u"JAPAN")
Country("JE", u"JERSEY")
Country("JO", u"JORDAN")
Country("KZ", u"KAZAKHSTAN")
Country("KE", u"KENYA")
Country("KI", u"KIRIBATI")
Country("KP", u"KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF")
Country("KR", u"KOREA, REPUBLIC OF")
Country("KW", u"KUWAIT")
Country("KG", u"KYRGYZSTAN")
Country("LA", u"LAO PEOPLE'S DEMOCRATIC REPUBLIC")
Country("LV", u"LATVIA")
Country("LB", u"LEBANON")
Country("LS", u"LESOTHO")
Country("LR", u"LIBERIA")
Country("LY", u"LIBYA")
Country("LI", u"LIECHTENSTEIN")
Country("LT", u"LITHUANIA")
Country("LU", u"LUXEMBOURG")
Country("MO", u"MACAO")
Country("MK", u"MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF")
Country("MG", u"MADAGASCAR")
Country("MW", u"MALAWI")
Country("MY", u"MALAYSIA")
Country("MV", u"MALDIVES")
Country("ML", u"MALI")
Country("MT", u"MALTA")
Country("MH", u"MARSHALL ISLANDS")
Country("MQ", u"MARTINIQUE")
Country("MR", u"MAURITANIA")
Country("MU", u"MAURITIUS")
Country("YT", u"MAYOTTE")
Country("MX", u"MEXICO")
Country("FM", u"MICRONESIA, FEDERATED STATES OF")
Country("MD", u"MOLDOVA, REPUBLIC OF")
Country("MC", u"MONACO")
Country("MN", u"MONGOLIA")
Country("ME", u"MONTENEGRO")
Country("MS", u"MONTSERRAT")
Country("MA", u"MOROCCO")
Country("MZ", u"MOZAMBIQUE")
Country("MM", u"MYANMAR")
Country("NA", u"NAMIBIA")
Country("NR", u"NAURU")
Country("NP", u"NEPAL")
Country("NL", u"NETHERLANDS")
Country("NC", u"NEW CALEDONIA")
Country("NZ", u"NEW ZEALAND")
Country("NI", u"NICARAGUA")
Country("NE", u"NIGER")
Country("NG", u"NIGERIA")
Country("NU", u"NIUE")
Country("NF", u"NORFOLK ISLAND")
Country("MP", u"NORTHERN MARIANA ISLANDS")
Country("NO", u"NORWAY")
Country("OM", u"OMAN")
Country("PK", u"PAKISTAN")
Country("PW", u"PALAU")
Country("PS", u"PALESTINIAN TERRITORY, OCCUPIED")
Country("PA", u"PANAMA")
Country("PG", u"PAPUA NEW GUINEA")
Country("PY", u"PARAGUAY")
Country("PE", u"PERU")
Country("PH", u"PHILIPPINES")
Country("PN", u"PITCAIRN")
Country("PL", u"POLAND")
Country("PT", u"PORTUGAL")
Country("PR", u"PUERTO RICO")
Country("QA", u"QATAR")
Country("RE", u"RÉUNION")
Country("RO", u"ROMANIA")
Country("RU", u"RUSSIAN FEDERATION")
Country("RW", u"RWANDA")
Country("BL", u"SAINT BARTHÉLEMY")
Country("SH", u"SAINT HELENA, ASCENSION AND TRISTAN DA CUNHA")
Country("KN", u"SAINT KITTS AND NEVIS")
Country("LC", u"SAINT LUCIA")
Country("MF", u"SAINT MARTIN (FRENCH PART)")
Country("PM", u"SAINT PIERRE AND MIQUELON")
Country("VC", u"SAINT VINCENT AND THE GRENADINES")
Country("WS", u"SAMOA")
Country("SM", u"SAN MARINO")
Country("ST", u"SAO TOME AND PRINCIPE")
Country("SA", u"SAUDI ARABIA")
Country("SN", u"SENEGAL")
Country("RS", u"SERBIA")
Country("SC", u"SEYCHELLES")
Country("SL", u"SIERRA LEONE")
Country("SG", u"SINGAPORE")
Country("SX", u"SINT MAARTEN (DUTCH PART)")
Country("SK", u"SLOVAKIA")
Country("SI", u"SLOVENIA")
Country("SB", u"SOLOMON ISLANDS")
Country("SO", u"SOMALIA")
Country("ZA", u"SOUTH AFRICA")
Country("GS", u"SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS")
Country("SS", u"SOUTH SUDAN")
Country("ES", u"SPAIN")
Country("LK", u"SRI LANKA")
Country("SD", u"SUDAN")
Country("SR", u"SURINAME")
Country("SJ", u"SVALBARD AND JAN MAYEN")
Country("SZ", u"SWAZILAND")
Country("SE", u"SWEDEN")
Country("CH", u"SWITZERLAND")
Country("SY", u"SYRIAN ARAB REPUBLIC")
Country("TW", u"TAIWAN, PROVINCE OF CHINA")
Country("TJ", u"TAJIKISTAN")
Country("TZ", u"TANZANIA, UNITED REPUBLIC OF")
Country("TH", u"THAILAND")
Country("TL", u"TIMOR-LESTE")
Country("TG", u"TOGO")
Country("TK", u"TOKELAU")
Country("TO", u"TONGA")
Country("TT", u"TRINIDAD AND TOBAGO")
Country("TN", u"TUNISIA")
Country("TR", u"TURKEY")
Country("TM", u"TURKMENISTAN")
Country("TC", u"TURKS AND CAICOS ISLANDS")
Country("TV", u"TUVALU")
Country("UG", u"UGANDA")
Country("UA", u"UKRAINE")
Country("AE", u"UNITED ARAB EMIRATES")
Country("GB", u"UNITED KINGDOM")
Country("US", u"UNITED STATES")
Country("UM", u"UNITED STATES MINOR OUTLYING ISLANDS")
Country("UY", u"URUGUAY")
Country("UZ", u"UZBEKISTAN")
Country("VU", u"VANUATU")
Country("VE", u"VENEZUELA, BOLIVARIAN REPUBLIC OF")
Country("VN", u"VIET NAM")
Country("VG", u"VIRGIN ISLANDS, BRITISH")
Country("VI", u"VIRGIN ISLANDS, U.S.")
Country("WF", u"WALLIS AND FUTUNA")
Country("EH", u"WESTERN SAHARA")
Country("YE", u"YEMEN")
Country("ZM", u"ZAMBIA")
Country("ZW", u"ZIMBABWE")
| gpl-3.0 |
mbrukman/cloud-launcher | console/appengine/compute_api_base.py | 1 | 3530 | #!/usr/bin/python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Base classes and functions for the generated Compute API module."""
# Standard Python libraries.
import json
# Libraries we added to `third_party/python` via `requirements.txt`.
from apiclient import discovery as apiclient_discovery
from apiclient import errors as apiclient_errors
from apiclient import http as apiclient_http
import httplib2
# Libraries provided by App Engine.
import webapp2
# Local imports.
from oauth2helper import decorator
import safe_memcache as memcache
APP_NAME = 'Cloud Console'
APP_VERSION = '0.1'
USER_AGENT = '%s/%s (github.com/mbrukman/cloud-launcher/tree/master/console)' % (
APP_NAME, APP_VERSION)
# Timeout is in seconds.
MEMCACHE_TIMEOUT = 30
def Http():
"""Returns an instance of `httplib2.Http` with User-agent set."""
http = httplib2.Http(memcache)
return apiclient_http.set_user_agent(http, USER_AGENT)
class ComputeV1Base(webapp2.RequestHandler):
def _get(self, obj, method, args):
status_int = 200
response = {}
write_to_cache = False
memcache_key = self.request.path
memcache_value = memcache.get(memcache_key)
if memcache_value:
output = memcache_value
else:
http = decorator.credentials.authorize(Http())
service = apiclient_discovery.build('compute', 'v1', http=http)
try:
response = service.__dict__[obj]().__dict__[
method](**args).execute()
output = json.dumps(response, indent=2)
write_to_cache = True
except apiclient_errors.HttpError, e:
response = {
'error': repr(e),
'response': response,
}
output = json.dumps(response, indent=2)
status_int = 403
self.response.headers['Content-Type'] = 'application/json'
self.response.status_int = status_int
self.response.write(output)
if write_to_cache:
memcache.set(key=memcache_key, value=output, time=MEMCACHE_TIMEOUT)
def _post(self, obj, method, args):
status_int = 200
response = {}
http = decorator.credentials.authorize(Http())
service = apiclient_discovery.build('compute', 'v1', http=http)
try:
response = service.__dict__[obj]().__dict__[
method](**args).execute()
output = json.dumps(response, indent=2)
except apiclient_errors.HttpError, e:
response = {
'error': repr(e),
'response': response,
}
output = json.dumps(response, indent=2)
status_int = 403
self.response.headers['Content-Type'] = 'application/json'
self.response.status_int = status_int
self.response.write(output)
| apache-2.0 |
anistark/mozillians | vendor-local/lib/python/south/management/commands/datamigration.py | 92 | 5038 | """
Data migration creation command
"""
from __future__ import print_function
import sys
import os
import re
from optparse import make_option
try:
set
except NameError:
from sets import Set as set
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import models
from django.conf import settings
from south.migration import Migrations
from south.exceptions import NoMigrations
from south.creator import freezer
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--freeze', action='append', dest='freeze_list', type='string',
help='Freeze the specified app(s). Provide an app name with each; use the option multiple times for multiple apps'),
make_option('--stdout', action='store_true', dest='stdout', default=False,
help='Print the migration to stdout instead of writing it to a file.'),
)
help = "Creates a new template data migration for the given app"
usage_str = "Usage: ./manage.py datamigration appname migrationname [--stdout] [--freeze appname]"
def handle(self, app=None, name="", freeze_list=None, stdout=False, verbosity=1, **options):
verbosity = int(verbosity)
# Any supposed lists that are None become empty lists
freeze_list = freeze_list or []
# --stdout means name = -
if stdout:
name = "-"
# Only allow valid names
if re.search('[^_\w]', name) and name != "-":
self.error("Migration names should contain only alphanumeric characters and underscores.")
# If not name, there's an error
if not name:
self.error("You must provide a name for this migration.\n" + self.usage_str)
if not app:
self.error("You must provide an app to create a migration for.\n" + self.usage_str)
# Ensure that verbosity is not a string (Python 3)
try:
verbosity = int(verbosity)
except ValueError:
self.error("Verbosity must be an number.\n" + self.usage_str)
# Get the Migrations for this app (creating the migrations dir if needed)
migrations = Migrations(app, force_creation=True, verbose_creation=verbosity > 0)
# See what filename is next in line. We assume they use numbers.
new_filename = migrations.next_filename(name)
# Work out which apps to freeze
apps_to_freeze = self.calc_frozen_apps(migrations, freeze_list)
# So, what's in this file, then?
file_contents = self.get_migration_template() % {
"frozen_models": freezer.freeze_apps_to_string(apps_to_freeze),
"complete_apps": apps_to_freeze and "complete_apps = [%s]" % (", ".join(map(repr, apps_to_freeze))) or ""
}
# - is a special name which means 'print to stdout'
if name == "-":
print(file_contents)
# Write the migration file if the name isn't -
else:
fp = open(os.path.join(migrations.migrations_dir(), new_filename), "w")
fp.write(file_contents)
fp.close()
print("Created %s." % new_filename, file=sys.stderr)
def calc_frozen_apps(self, migrations, freeze_list):
"""
Works out, from the current app, settings, and the command line options,
which apps should be frozen.
"""
apps_to_freeze = []
for to_freeze in freeze_list:
if "." in to_freeze:
self.error("You cannot freeze %r; you must provide an app label, like 'auth' or 'books'." % to_freeze)
# Make sure it's a real app
if not models.get_app(to_freeze):
self.error("You cannot freeze %r; it's not an installed app." % to_freeze)
# OK, it's fine
apps_to_freeze.append(to_freeze)
if getattr(settings, 'SOUTH_AUTO_FREEZE_APP', True):
apps_to_freeze.append(migrations.app_label())
return apps_to_freeze
def error(self, message, code=1):
"""
Prints the error, and exits with the given code.
"""
print(message, file=sys.stderr)
sys.exit(code)
def get_migration_template(self):
return MIGRATION_TEMPLATE
MIGRATION_TEMPLATE = """# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Don't use "from appname.models import ModelName".
# Use orm.ModelName to refer to models in this application,
# and orm['appname.ModelName'] for models in other applications.
def backwards(self, orm):
"Write your backwards methods here."
models = %(frozen_models)s
%(complete_apps)s
symmetrical = True
"""
| bsd-3-clause |
tomn46037/python-myfitnesspal | tests/test_client.py | 2 | 11535 | from collections import OrderedDict
import datetime
from measurement.measures import Energy, Weight
from mock import patch
from myfitnesspal import Client
from .base import MFPTestCase
class TestClient(MFPTestCase):
def setUp(self):
self.arbitrary_username = 'alpha'
self.arbitrary_password = 'beta'
self.arbitrary_date1 = datetime.date(2015, 4, 20)
self.arbitrary_date2 = datetime.date(2015, 4, 28)
self.client = Client(
self.arbitrary_username,
self.arbitrary_password,
login=False
)
super(TestClient, self).setUp()
def test_get_measurement_ids(self):
document = self.get_html_document('measurements.html')
actual_ids = self.client._get_measurement_ids(document)
expected_ids = {
"Weight": 1,
"Body Fat": 91955886,
"Butt": 92738807,
"Bicep": 92738811,
"Quad": 92738815,
"Mid Section": 92738819,
"Shoulders": 92738861,
}
self.assertEquals(
expected_ids,
actual_ids,
)
def test_get_meals(self):
document = self.get_html_document('diary.html')
meals = self.client._get_meals(document)
self.assertEquals(
len(meals),
4,
)
def test_get_measurements(self):
with patch.object(self.client, '_get_document_for_url') as get_doc:
get_doc.return_value = self.get_html_document('measurements.html')
actual_measurements = self.client.get_measurements(
'Body Fat',
self.arbitrary_date1,
self.arbitrary_date2,
)
expected_measurements = OrderedDict(
[
(datetime.date(2015, 4, 28), 19.2),
(datetime.date(2015, 4, 27), 19.2),
(datetime.date(2015, 4, 26), 19.0),
(datetime.date(2015, 4, 25), 18.7),
(datetime.date(2015, 4, 23), 18.7),
(datetime.date(2015, 4, 22), 18.4),
(datetime.date(2015, 4, 21), 18.9),
(datetime.date(2015, 4, 20), 19.1),
]
)
self.assertEquals(
expected_measurements,
actual_measurements,
)
def test_get_day_unit_unaware(self):
self.client.unit_aware = False
with patch.object(self.client, '_get_document_for_url') as get_doc:
get_doc.return_value = self.get_html_document('diary.html')
day = self.client.get_date(self.arbitrary_date1)
expected_dict = {
"lunch": [],
"breakfast": [
{
"nutrition_information": {
"sodium": 380,
"carbohydrates": 44,
"calories": 240,
"fat": 6,
"sugar": 8,
"protein": 10,
},
"name": "Dave's Killer Bread - Blues Bread, 2 slice"
},
{
"nutrition_information": {
"sodium": 100,
"carbohydrates": 0,
"calories": 100,
"fat": 11,
"sugar": 0,
"protein": 0,
},
"name": (
"Earth Balance - "
"Natural Buttery Spread - Original, 1 tbsp (14g)"
)
}
],
"dinner": [
{
"nutrition_information": {
"sodium": 5,
"carbohydrates": 8,
"calories": 288,
"fat": 0,
"sugar": 0,
"protein": 0,
},
"name": "Wine - Pinot Noir Wine, 12 oz"
},
{
"nutrition_information": {
"sodium": 1166,
"carbohydrates": 64,
"calories": 690,
"fat": 48,
"sugar": 14,
"protein": 30,
},
"name": "Generic - Baked Macaroni and Cheese, 14 grams"
}
],
"snacks": [
{
"nutrition_information": {
"sodium": 80,
"carbohydrates": 3,
"calories": 170,
"fat": 2,
"sugar": 2,
"protein": 36,
},
"name": "Mrm - Dutch Chocolate Whey Protein, 2 scoop"
},
{
"nutrition_information": {
"sodium": 338,
"carbohydrates": 36,
"calories": 203,
"fat": 6,
"sugar": 34,
"protein": 2,
},
"name": "Drinks - Almond Milk (Vanilla), 18 oz"
},
{
"nutrition_information": {
"sodium": 0,
"carbohydrates": 48,
"calories": 588,
"fat": 0,
"sugar": 0,
"protein": 0,
},
"name": (
"Dogfish Head 90 Minute Ipa - "
"Beer, India Pale Ale, 24 oz"
)
}
]
}
actual_dict = day.get_as_dict()
self.assertEquals(
expected_dict,
actual_dict,
)
self.assertEquals(
day.date,
self.arbitrary_date1,
)
self.assertEquals(
day.goals,
{
'calories': 2500,
'carbohydrates': 343,
'fat': 84,
'protein': 93,
'sodium': 2500,
'sugar': 50,
}
)
self.assertEquals(
day.totals,
{
'calories': 2279,
'carbohydrates': 203,
'fat': 73,
'protein': 78,
'sodium': 2069,
'sugar': 58,
}
)
def test_get_day(self):
self.client.unit_aware = True
with patch.object(self.client, '_get_document_for_url') as get_doc:
get_doc.return_value = self.get_html_document('diary.html')
day = self.client.get_date(self.arbitrary_date1)
expected_dict = {
"lunch": [],
"breakfast": [
{
"nutrition_information": {
"sodium": Weight(mg=380),
"carbohydrates": Weight(g=44),
"calories": Energy(Calorie=240),
"fat": Weight(g=6),
"sugar": Weight(g=8),
"protein": Weight(g=10)
},
"name": "Dave's Killer Bread - Blues Bread, 2 slice"
},
{
"nutrition_information": {
"sodium": Weight(mg=100),
"carbohydrates": Weight(g=0),
"calories": Energy(Calorie=100),
"fat": Weight(g=11),
"sugar": Weight(g=0),
"protein": Weight(g=0)
},
"name": (
"Earth Balance - "
"Natural Buttery Spread - Original, 1 tbsp (14g)"
)
}
],
"dinner": [
{
"nutrition_information": {
"sodium": Weight(mg=5),
"carbohydrates": Weight(g=8),
"calories": Energy(Calorie=288),
"fat": Weight(g=0),
"sugar": Weight(g=0),
"protein": Weight(g=0)
},
"name": "Wine - Pinot Noir Wine, 12 oz"
},
{
"nutrition_information": {
"sodium": Weight(mg=1166),
"carbohydrates": Weight(g=64),
"calories": Energy(Calorie=690),
"fat": Weight(g=48),
"sugar": Weight(g=14),
"protein": Weight(g=30)
},
"name": "Generic - Baked Macaroni and Cheese, 14 grams"
}
],
"snacks": [
{
"nutrition_information": {
"sodium": Weight(mg=80),
"carbohydrates": Weight(g=3),
"calories": Energy(Calorie=170),
"fat": Weight(g=2),
"sugar": Weight(g=2),
"protein": Weight(g=36)
},
"name": "Mrm - Dutch Chocolate Whey Protein, 2 scoop"
},
{
"nutrition_information": {
"sodium": Weight(mg=338),
"carbohydrates": Weight(g=36),
"calories": Energy(Calorie=203),
"fat": Weight(g=6),
"sugar": Weight(g=34),
"protein": Weight(g=2)
},
"name": "Drinks - Almond Milk (Vanilla), 18 oz"
},
{
"nutrition_information": {
"sodium": Weight(mg=0),
"carbohydrates": Weight(g=48),
"calories": Energy(Calorie=588),
"fat": Weight(g=0),
"sugar": Weight(g=0),
"protein": Weight(g=0)
},
"name": (
"Dogfish Head 90 Minute Ipa - "
"Beer, India Pale Ale, 24 oz"
)
}
]
}
actual_dict = day.get_as_dict()
self.assertEquals(
expected_dict,
actual_dict,
)
self.assertEquals(
day.date,
self.arbitrary_date1,
)
self.assertEquals(
day.goals,
{
'calories': Energy(Calorie=2500),
'carbohydrates': Weight(g=343),
'fat': Weight(g=84),
'protein': Weight(g=93),
'sodium': Weight(mg=2500),
'sugar': Weight(g=50),
}
)
self.assertEquals(
day.totals,
{
'calories': Energy(Calorie=2279),
'carbohydrates': Weight(g=203),
'fat': Weight(g=73),
'protein': Weight(g=78),
'sodium': Weight(mg=2069),
'sugar': Weight(g=58),
}
)
| mit |
SummerLW/Perf-Insight-Report | third_party/gsutil/gslib/util.py | 8 | 40698 | # -*- coding: utf-8 -*-
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Static data and helper functions."""
from __future__ import absolute_import
import collections
import errno
import logging
import math
import multiprocessing
import os
import pkgutil
import re
import struct
import sys
import tempfile
import textwrap
import threading
import traceback
import xml.etree.ElementTree as ElementTree
import boto
from boto import config
import boto.auth
from boto.exception import NoAuthHandlerFound
from boto.gs.connection import GSConnection
from boto.provider import Provider
from boto.pyami.config import BotoConfigLocations
import httplib2
from oauth2client.client import HAS_CRYPTO
from retry_decorator import retry_decorator
import gslib
from gslib.exception import CommandException
from gslib.storage_url import StorageUrlFromString
from gslib.translation_helper import AclTranslation
from gslib.translation_helper import GenerationFromUrlAndString
from gslib.translation_helper import S3_ACL_MARKER_GUID
from gslib.translation_helper import S3_DELETE_MARKER_GUID
from gslib.translation_helper import S3_MARKER_GUIDS
# Detect platform types.
PLATFORM = str(sys.platform).lower()
IS_WINDOWS = 'win32' in PLATFORM
IS_CYGWIN = 'cygwin' in PLATFORM
IS_LINUX = 'linux' in PLATFORM
IS_OSX = 'darwin' in PLATFORM
# pylint: disable=g-import-not-at-top
if IS_WINDOWS:
from ctypes import c_int
from ctypes import c_uint64
from ctypes import c_char_p
from ctypes import c_wchar_p
from ctypes import windll
from ctypes import POINTER
from ctypes import WINFUNCTYPE
from ctypes import WinError
# pylint: disable=g-import-not-at-top
try:
# This module doesn't necessarily exist on Windows.
import resource
HAS_RESOURCE_MODULE = True
except ImportError, e:
HAS_RESOURCE_MODULE = False
ONE_KIB = 1024
ONE_MIB = 1024 * 1024
TWO_MIB = 2 * ONE_MIB
EIGHT_MIB = 8 * ONE_MIB
TEN_MIB = 10 * ONE_MIB
DEFAULT_FILE_BUFFER_SIZE = 8 * ONE_KIB
_DEFAULT_LINES = 25
# By default, the timeout for SSL read errors is infinite. This could
# cause gsutil to hang on network disconnect, so pick a more reasonable
# timeout.
SSL_TIMEOUT = 60
# Start with a progress callback every 64 KiB during uploads/downloads (JSON
# API). Callback implementation should back off until it hits the maximum size
# so that callbacks do not create huge amounts of log output.
START_CALLBACK_PER_BYTES = 1024*64
MAX_CALLBACK_PER_BYTES = 1024*1024*100
# Upload/download files in 8 KiB chunks over the HTTP connection.
TRANSFER_BUFFER_SIZE = 1024*8
# Default number of progress callbacks during transfer (XML API).
XML_PROGRESS_CALLBACKS = 10
# For files >= this size, output a message indicating that we're running an
# operation on the file (like hashing or gzipping) so it does not appear to the
# user that the command is hanging.
MIN_SIZE_COMPUTE_LOGGING = 100*1024*1024 # 100 MiB
NO_MAX = sys.maxint
UTF8 = 'utf-8'
VERSION_MATCHER = re.compile(r'^(?P<maj>\d+)(\.(?P<min>\d+)(?P<suffix>.*))?')
RELEASE_NOTES_URL = 'https://pub.storage.googleapis.com/gsutil_ReleaseNotes.txt'
# Binary exponentiation strings.
_EXP_STRINGS = [
(0, 'B', 'bit'),
(10, 'KiB', 'Kibit', 'K'),
(20, 'MiB', 'Mibit', 'M'),
(30, 'GiB', 'Gibit', 'G'),
(40, 'TiB', 'Tibit', 'T'),
(50, 'PiB', 'Pibit', 'P'),
(60, 'EiB', 'Eibit', 'E'),
]
global manager # pylint: disable=global-at-module-level
certs_file_lock = threading.Lock()
configured_certs_files = []
def _GenerateSuffixRegex():
"""Creates a suffix regex for human-readable byte counts."""
human_bytes_re = r'(?P<num>\d*\.\d+|\d+)\s*(?P<suffix>%s)?'
suffixes = []
suffix_to_si = {}
for i, si in enumerate(_EXP_STRINGS):
si_suffixes = [s.lower() for s in list(si)[1:]]
for suffix in si_suffixes:
suffix_to_si[suffix] = i
suffixes.extend(si_suffixes)
human_bytes_re %= '|'.join(suffixes)
matcher = re.compile(human_bytes_re)
return suffix_to_si, matcher
SUFFIX_TO_SI, MATCH_HUMAN_BYTES = _GenerateSuffixRegex()
SECONDS_PER_DAY = 3600 * 24
# On Unix-like systems, we will set the maximum number of open files to avoid
# hitting the limit imposed by the OS. This number was obtained experimentally.
MIN_ACCEPTABLE_OPEN_FILES_LIMIT = 1000
GSUTIL_PUB_TARBALL = 'gs://pub/gsutil.tar.gz'
Retry = retry_decorator.retry # pylint: disable=invalid-name
# Cache the values from this check such that they're available to all callers
# without needing to run all the checks again (some of these, such as calling
# multiprocessing.Manager(), are expensive operations).
cached_multiprocessing_is_available = None
cached_multiprocessing_is_available_stack_trace = None
cached_multiprocessing_is_available_message = None
# Enum class for specifying listing style.
class ListingStyle(object):
SHORT = 'SHORT'
LONG = 'LONG'
LONG_LONG = 'LONG_LONG'
def UsingCrcmodExtension(crcmod):
return (boto.config.get('GSUtil', 'test_assume_fast_crcmod', None) or
(getattr(crcmod, 'crcmod', None) and
getattr(crcmod.crcmod, '_usingExtension', None)))
def CheckFreeSpace(path):
"""Return path/drive free space (in bytes)."""
if IS_WINDOWS:
try:
# pylint: disable=invalid-name
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_wchar_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExW', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
except AttributeError:
get_disk_free_space_ex = WINFUNCTYPE(c_int, c_char_p,
POINTER(c_uint64),
POINTER(c_uint64),
POINTER(c_uint64))
get_disk_free_space_ex = get_disk_free_space_ex(
('GetDiskFreeSpaceExA', windll.kernel32), (
(1, 'lpszPathName'),
(2, 'lpFreeUserSpace'),
(2, 'lpTotalSpace'),
(2, 'lpFreeSpace'),))
def GetDiskFreeSpaceExErrCheck(result, unused_func, args):
if not result:
raise WinError()
return args[1].value
get_disk_free_space_ex.errcheck = GetDiskFreeSpaceExErrCheck
return get_disk_free_space_ex(os.getenv('SystemDrive'))
else:
(_, f_frsize, _, _, f_bavail, _, _, _, _, _) = os.statvfs(path)
return f_frsize * f_bavail
def CreateDirIfNeeded(dir_path, mode=0777):
"""Creates a directory, suppressing already-exists errors."""
if not os.path.exists(dir_path):
try:
# Unfortunately, even though we catch and ignore EEXIST, this call will
# output a (needless) error message (no way to avoid that in Python).
os.makedirs(dir_path, mode)
# Ignore 'already exists' in case user tried to start up several
# resumable uploads concurrently from a machine where no tracker dir had
# yet been created.
except OSError as e:
if e.errno != errno.EEXIST:
raise
def DivideAndCeil(dividend, divisor):
"""Returns ceil(dividend / divisor).
Takes care to avoid the pitfalls of floating point arithmetic that could
otherwise yield the wrong result for large numbers.
Args:
dividend: Dividend for the operation.
divisor: Divisor for the operation.
Returns:
Quotient.
"""
quotient = dividend // divisor
if (dividend % divisor) != 0:
quotient += 1
return quotient
def GetGsutilStateDir():
"""Returns the location of the directory for gsutil state files.
Certain operations, such as cross-process credential sharing and
resumable transfer tracking, need a known location for state files which
are created by gsutil as-needed.
This location should only be used for storing data that is required to be in
a static location.
Returns:
Path to directory for gsutil static state files.
"""
config_file_dir = config.get(
'GSUtil', 'state_dir',
os.path.expanduser(os.path.join('~', '.gsutil')))
CreateDirIfNeeded(config_file_dir)
return config_file_dir
def GetCredentialStoreFilename():
return os.path.join(GetGsutilStateDir(), 'credstore')
def GetGceCredentialCacheFilename():
return os.path.join(GetGsutilStateDir(), 'gcecredcache')
def GetTabCompletionLogFilename():
return os.path.join(GetGsutilStateDir(), 'tab-completion-logs')
def GetTabCompletionCacheFilename():
tab_completion_dir = os.path.join(GetGsutilStateDir(), 'tab-completion')
# Limit read permissions on the directory to owner for privacy.
CreateDirIfNeeded(tab_completion_dir, mode=0700)
return os.path.join(tab_completion_dir, 'cache')
def PrintTrackerDirDeprecationWarningIfNeeded():
# TODO: Remove this along with the tracker_dir config value 1 year after
# 4.6 release date. Use state_dir instead.
if config.has_option('GSUtil', 'resumable_tracker_dir'):
sys.stderr.write('Warning: you have set resumable_tracker_dir in your '
'.boto configuration file. This configuration option is '
'deprecated; please use the state_dir configuration '
'option instead.\n')
# Name of file where we keep the timestamp for the last time we checked whether
# a new version of gsutil is available.
PrintTrackerDirDeprecationWarningIfNeeded()
CreateDirIfNeeded(GetGsutilStateDir())
LAST_CHECKED_FOR_GSUTIL_UPDATE_TIMESTAMP_FILE = (
os.path.join(GetGsutilStateDir(), '.last_software_update_check'))
def HasConfiguredCredentials():
"""Determines if boto credential/config file exists."""
has_goog_creds = (config.has_option('Credentials', 'gs_access_key_id') and
config.has_option('Credentials', 'gs_secret_access_key'))
has_amzn_creds = (config.has_option('Credentials', 'aws_access_key_id') and
config.has_option('Credentials', 'aws_secret_access_key'))
has_oauth_creds = (
config.has_option('Credentials', 'gs_oauth2_refresh_token'))
has_service_account_creds = (
HAS_CRYPTO and
config.has_option('Credentials', 'gs_service_client_id') and
config.has_option('Credentials', 'gs_service_key_file'))
if (has_goog_creds or has_amzn_creds or has_oauth_creds or
has_service_account_creds):
return True
valid_auth_handler = None
try:
valid_auth_handler = boto.auth.get_auth_handler(
GSConnection.DefaultHost, config, Provider('google'),
requested_capability=['s3'])
# Exclude the no-op auth handler as indicating credentials are configured.
# Note we can't use isinstance() here because the no-op module may not be
# imported so we can't get a reference to the class type.
if getattr(getattr(valid_auth_handler, '__class__', None),
'__name__', None) == 'NoOpAuth':
valid_auth_handler = None
except NoAuthHandlerFound:
pass
return valid_auth_handler
def ConfigureNoOpAuthIfNeeded():
"""Sets up no-op auth handler if no boto credentials are configured."""
if not HasConfiguredCredentials():
if (config.has_option('Credentials', 'gs_service_client_id')
and not HAS_CRYPTO):
if os.environ.get('CLOUDSDK_WRAPPER') == '1':
raise CommandException('\n'.join(textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please reactivate your service account via the gcloud auth '
'command and ensure any gcloud packages necessary for '
'service accounts are present.')))
else:
raise CommandException('\n'.join(textwrap.wrap(
'Your gsutil is configured with an OAuth2 service account, but '
'you do not have PyOpenSSL or PyCrypto 2.6 or later installed. '
'Service account authentication requires one of these libraries; '
'please install either of them to proceed, or configure a '
'different type of credentials with "gsutil config".')))
else:
# With no boto config file the user can still access publicly readable
# buckets and objects.
from gslib import no_op_auth_plugin # pylint: disable=unused-variable
def GetConfigFilePath():
config_path = 'no config found'
for path in BotoConfigLocations:
try:
with open(path, 'r'):
config_path = path
break
except IOError:
pass
return config_path
def GetBotoConfigFileList():
"""Returns list of boto config files that exist."""
config_paths = boto.pyami.config.BotoConfigLocations
if 'AWS_CREDENTIAL_FILE' in os.environ:
config_paths.append(os.environ['AWS_CREDENTIAL_FILE'])
config_files = {}
for config_path in config_paths:
if os.path.exists(config_path):
config_files[config_path] = 1
cf_list = []
for config_file in config_files:
cf_list.append(config_file)
return cf_list
def GetCertsFile():
"""Configures and returns the CA Certificates file.
If one is already configured, use it. Otherwise, amend the configuration
(in boto.config) to use the cert roots distributed with gsutil.
Returns:
string filename of the certs file to use.
"""
certs_file = boto.config.get('Boto', 'ca_certificates_file', None)
if not certs_file:
with certs_file_lock:
if configured_certs_files:
disk_certs_file = configured_certs_files[0]
else:
disk_certs_file = os.path.abspath(
os.path.join(gslib.GSLIB_DIR, 'data', 'cacerts.txt'))
if not os.path.exists(disk_certs_file):
# If the file is not present on disk, this means the gslib module
# doesn't actually exist on disk anywhere. This can happen if it's
# being imported from a zip file. Unfortunately, we have to copy the
# certs file to a local temp file on disk because the underlying SSL
# socket requires it to be a filesystem path.
certs_data = pkgutil.get_data('gslib', 'data/cacerts.txt')
if not certs_data:
raise CommandException('Certificates file not found. Please '
'reinstall gsutil from scratch')
fd, fname = tempfile.mkstemp(suffix='.txt', prefix='gsutil-cacerts')
f = os.fdopen(fd, 'w')
f.write(certs_data)
f.close()
configured_certs_files.append(fname)
disk_certs_file = fname
certs_file = disk_certs_file
return certs_file
def GetCleanupFiles():
"""Returns a list of temp files to delete (if possible) when program exits."""
cleanup_files = []
if configured_certs_files:
cleanup_files += configured_certs_files
return cleanup_files
def ProxyInfoFromEnvironmentVar(proxy_env_var):
"""Reads proxy info from the environment and converts to httplib2.ProxyInfo.
Args:
proxy_env_var: Environment variable string to read, such as http_proxy or
https_proxy.
Returns:
httplib2.ProxyInfo constructed from the environment string.
"""
proxy_url = os.environ.get(proxy_env_var)
if not proxy_url or not proxy_env_var.lower().startswith('http'):
return httplib2.ProxyInfo(httplib2.socks.PROXY_TYPE_HTTP, None, 0)
proxy_protocol = proxy_env_var.lower().split('_')[0]
if not proxy_url.lower().startswith('http'):
# proxy_info_from_url requires a protocol, which is always http or https.
proxy_url = proxy_protocol + '://' + proxy_url
return httplib2.proxy_info_from_url(proxy_url, method=proxy_protocol)
def GetNewHttp(http_class=httplib2.Http, **kwargs):
"""Creates and returns a new httplib2.Http instance.
Args:
http_class: Optional custom Http class to use.
**kwargs: Arguments to pass to http_class constructor.
Returns:
An initialized httplib2.Http instance.
"""
proxy_info = httplib2.ProxyInfo(
proxy_type=3,
proxy_host=boto.config.get('Boto', 'proxy', None),
proxy_port=boto.config.getint('Boto', 'proxy_port', 0),
proxy_user=boto.config.get('Boto', 'proxy_user', None),
proxy_pass=boto.config.get('Boto', 'proxy_pass', None),
proxy_rdns=boto.config.get('Boto', 'proxy_rdns', False))
if not (proxy_info.proxy_host and proxy_info.proxy_port):
# Fall back to using the environment variable.
for proxy_env_var in ['http_proxy', 'https_proxy', 'HTTPS_PROXY']:
if proxy_env_var in os.environ and os.environ[proxy_env_var]:
proxy_info = ProxyInfoFromEnvironmentVar(proxy_env_var)
# Assume proxy_rnds is True if a proxy environment variable exists.
proxy_info.proxy_rdns = boto.config.get('Boto', 'proxy_rdns', True)
break
# Some installers don't package a certs file with httplib2, so use the
# one included with gsutil.
kwargs['ca_certs'] = GetCertsFile()
# Use a non-infinite SSL timeout to avoid hangs during network flakiness.
kwargs['timeout'] = SSL_TIMEOUT
http = http_class(proxy_info=proxy_info, **kwargs)
http.disable_ssl_certificate_validation = (not config.getbool(
'Boto', 'https_validate_certificates'))
return http
# Retry for 10 minutes with exponential backoff, which corresponds to
# the maximum Downtime Period specified in the GCS SLA
# (https://cloud.google.com/storage/sla)
def GetNumRetries():
return config.getint('Boto', 'num_retries', 23)
def GetMaxRetryDelay():
return config.getint('Boto', 'max_retry_delay', 32)
# Resumable downloads and uploads make one HTTP call per chunk (and must be
# in multiples of 256KiB). Overridable for testing.
def GetJsonResumableChunkSize():
chunk_size = config.getint('GSUtil', 'json_resumable_chunk_size',
1024*1024*100L)
if chunk_size == 0:
chunk_size = 1024*256L
elif chunk_size % 1024*256L != 0:
chunk_size += (1024*256L - (chunk_size % (1024*256L)))
return chunk_size
def _RoundToNearestExponent(num):
i = 0
while i+1 < len(_EXP_STRINGS) and num >= (2 ** _EXP_STRINGS[i+1][0]):
i += 1
return i, round(float(num) / 2 ** _EXP_STRINGS[i][0], 2)
def MakeHumanReadable(num):
"""Generates human readable string for a number of bytes.
Args:
num: The number, in bytes.
Returns:
A string form of the number using size abbreviations (KiB, MiB, etc.).
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][1])
def MakeBitsHumanReadable(num):
"""Generates human readable string for a number of bits.
Args:
num: The number, in bits.
Returns:
A string form of the number using bit size abbreviations (kbit, Mbit, etc.)
"""
i, rounded_val = _RoundToNearestExponent(num)
return '%g %s' % (rounded_val, _EXP_STRINGS[i][2])
def HumanReadableToBytes(human_string):
"""Tries to convert a human-readable string to a number of bytes.
Args:
human_string: A string supplied by user, e.g. '1M', '3 GiB'.
Returns:
An integer containing the number of bytes.
Raises:
ValueError: on an invalid string.
"""
human_string = human_string.lower()
m = MATCH_HUMAN_BYTES.match(human_string)
if m:
num = float(m.group('num'))
if m.group('suffix'):
power = _EXP_STRINGS[SUFFIX_TO_SI[m.group('suffix')]][0]
num *= (2.0 ** power)
num = int(round(num))
return num
raise ValueError('Invalid byte string specified: %s' % human_string)
def Percentile(values, percent, key=lambda x: x):
"""Find the percentile of a list of values.
Taken from: http://code.activestate.com/recipes/511478/
Args:
values: a list of numeric values. Note that the values MUST BE already
sorted.
percent: a float value from 0.0 to 1.0.
key: optional key function to compute value from each element of the list
of values.
Returns:
The percentile of the values.
"""
if not values:
return None
k = (len(values) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return key(values[int(k)])
d0 = key(values[int(f)]) * (c-k)
d1 = key(values[int(c)]) * (k-f)
return d0 + d1
def RemoveCRLFFromString(input_str):
"""Returns the input string with all \\n and \\r removed."""
return re.sub(r'[\r\n]', '', input_str)
def UnaryDictToXml(message):
"""Generates XML representation of a nested dict.
This dict contains exactly one top-level entry and an arbitrary number of
2nd-level entries, e.g. capturing a WebsiteConfiguration message.
Args:
message: The dict encoding the message.
Returns:
XML string representation of the input dict.
Raises:
Exception: if dict contains more than one top-level entry.
"""
if len(message) != 1:
raise Exception('Expected dict of size 1, got size %d' % len(message))
name, content = message.items()[0]
element_type = ElementTree.Element(name)
for element_property, value in sorted(content.items()):
node = ElementTree.SubElement(element_type, element_property)
node.text = value
return ElementTree.tostring(element_type)
def LookUpGsutilVersion(gsutil_api, url_str):
"""Looks up the gsutil version of the specified gsutil tarball URL.
Version is specified in the metadata field set on that object.
Args:
gsutil_api: gsutil Cloud API to use when retrieving gsutil tarball.
url_str: tarball URL to retrieve (such as 'gs://pub/gsutil.tar.gz').
Returns:
Version string if URL is a cloud URL containing x-goog-meta-gsutil-version
metadata, else None.
"""
url = StorageUrlFromString(url_str)
if url.IsCloudUrl():
obj = gsutil_api.GetObjectMetadata(url.bucket_name, url.object_name,
provider=url.scheme,
fields=['metadata'])
if obj.metadata and obj.metadata.additionalProperties:
for prop in obj.metadata.additionalProperties:
if prop.key == 'gsutil_version':
return prop.value
def GetGsutilVersionModifiedTime():
"""Returns unix timestamp of when the VERSION file was last modified."""
if not gslib.VERSION_FILE:
return 0
return int(os.path.getmtime(gslib.VERSION_FILE))
def IsRunningInteractively():
"""Returns True if currently running interactively on a TTY."""
return sys.stdout.isatty() and sys.stderr.isatty() and sys.stdin.isatty()
def _HttpsValidateCertifcatesEnabled():
return config.get('Boto', 'https_validate_certificates', True)
CERTIFICATE_VALIDATION_ENABLED = _HttpsValidateCertifcatesEnabled()
def _BotoIsSecure():
return config.get('Boto', 'is_secure', True)
BOTO_IS_SECURE = _BotoIsSecure()
def ResumableThreshold():
return config.getint('GSUtil', 'resumable_threshold', EIGHT_MIB)
def AddAcceptEncoding(headers):
"""Adds accept-encoding:gzip to the dictionary of headers."""
# If Accept-Encoding is not already set, set it to enable gzip.
if 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip'
# pylint: disable=too-many-statements
def PrintFullInfoAboutObject(bucket_listing_ref, incl_acl=True):
"""Print full info for given object (like what displays for gsutil ls -L).
Args:
bucket_listing_ref: BucketListingRef being listed.
Must have ref_type OBJECT and a populated root_object
with the desired fields.
incl_acl: True if ACL info should be output.
Returns:
Tuple (number of objects, object_length)
Raises:
Exception: if calling bug encountered.
"""
url_str = bucket_listing_ref.url_string
storage_url = StorageUrlFromString(url_str)
obj = bucket_listing_ref.root_object
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
num_bytes = obj.size
num_objs = 1
print '%s:' % url_str.encode(UTF8)
if obj.updated:
print '\tCreation time:\t\t%s' % obj.updated.strftime(
'%a, %d %b %Y %H:%M:%S GMT')
if obj.cacheControl:
print '\tCache-Control:\t\t%s' % obj.cacheControl
if obj.contentDisposition:
print '\tContent-Disposition:\t\t%s' % obj.contentDisposition
if obj.contentEncoding:
print '\tContent-Encoding:\t\t%s' % obj.contentEncoding
if obj.contentLanguage:
print '\tContent-Language:\t%s' % obj.contentLanguage
print '\tContent-Length:\t\t%s' % obj.size
print '\tContent-Type:\t\t%s' % obj.contentType
if obj.componentCount:
print '\tComponent-Count:\t%d' % obj.componentCount
marker_props = {}
if obj.metadata and obj.metadata.additionalProperties:
non_marker_props = []
for add_prop in obj.metadata.additionalProperties:
if add_prop.key not in S3_MARKER_GUIDS:
non_marker_props.append(add_prop)
else:
marker_props[add_prop.key] = add_prop.value
if non_marker_props:
print '\tMetadata:'
for ap in non_marker_props:
meta_string = '\t\t%s:\t\t%s' % (ap.key, ap.value)
print meta_string.encode(UTF8)
if obj.crc32c: print '\tHash (crc32c):\t\t%s' % obj.crc32c
if obj.md5Hash: print '\tHash (md5):\t\t%s' % obj.md5Hash
print '\tETag:\t\t\t%s' % obj.etag.strip('"\'')
if obj.generation:
generation_str = GenerationFromUrlAndString(storage_url, obj.generation)
print '\tGeneration:\t\t%s' % generation_str
if obj.metageneration:
print '\tMetageneration:\t\t%s' % obj.metageneration
if incl_acl:
# JSON API won't return acls as part of the response unless we have
# full control scope
if obj.acl:
print '\tACL:\t\t%s' % AclTranslation.JsonFromMessage(obj.acl)
elif S3_ACL_MARKER_GUID in marker_props:
print '\tACL:\t\t%s' % marker_props[S3_ACL_MARKER_GUID]
else:
print ('\tACL:\t\t\tACCESS DENIED. Note: you need OWNER '
'permission\n\t\t\t\ton the object to read its ACL.')
return (num_objs, num_bytes)
def CompareVersions(first, second):
"""Compares the first and second gsutil version strings.
For example, 3.33 > 3.7, and 4.1 is a greater major version than 3.33.
Does not handle multiple periods (e.g. 3.3.4) or complicated suffixes
(e.g., 3.3RC4 vs. 3.3RC5). A version string with a suffix is treated as
less than its non-suffix counterpart (e.g. 3.32 > 3.32pre).
Args:
first: First gsutil version string.
second: Second gsutil version string.
Returns:
(g, m):
g is True if first known to be greater than second, else False.
m is True if first known to be greater by at least 1 major version,
else False.
"""
m1 = VERSION_MATCHER.match(str(first))
m2 = VERSION_MATCHER.match(str(second))
# If passed strings we don't know how to handle, be conservative.
if not m1 or not m2:
return (False, False)
major_ver1 = int(m1.group('maj'))
minor_ver1 = int(m1.group('min')) if m1.group('min') else 0
suffix_ver1 = m1.group('suffix')
major_ver2 = int(m2.group('maj'))
minor_ver2 = int(m2.group('min')) if m2.group('min') else 0
suffix_ver2 = m2.group('suffix')
if major_ver1 > major_ver2:
return (True, True)
elif major_ver1 == major_ver2:
if minor_ver1 > minor_ver2:
return (True, False)
elif minor_ver1 == minor_ver2:
return (bool(suffix_ver2) and not suffix_ver1, False)
return (False, False)
def _IncreaseSoftLimitForResource(resource_name, fallback_value):
"""Sets a new soft limit for the maximum number of open files.
The soft limit is used for this process (and its children), but the
hard limit is set by the system and cannot be exceeded.
We will first try to set the soft limit to the hard limit's value; if that
fails, we will try to set the soft limit to the fallback_value iff this would
increase the soft limit.
Args:
resource_name: Name of the resource to increase the soft limit for.
fallback_value: Fallback value to be used if we couldn't set the
soft value to the hard value (e.g., if the hard value
is "unlimited").
Returns:
Current soft limit for the resource (after any changes we were able to
make), or -1 if the resource doesn't exist.
"""
# Get the value of the resource.
try:
(soft_limit, hard_limit) = resource.getrlimit(resource_name)
except (resource.error, ValueError):
# The resource wasn't present, so we can't do anything here.
return -1
# Try to set the value of the soft limit to the value of the hard limit.
if hard_limit > soft_limit: # Some OS's report 0 for "unlimited".
try:
resource.setrlimit(resource_name, (hard_limit, hard_limit))
return hard_limit
except (resource.error, ValueError):
# We'll ignore this and try the fallback value.
pass
# Try to set the value of the soft limit to the fallback value.
if soft_limit < fallback_value:
try:
resource.setrlimit(resource_name, (fallback_value, hard_limit))
return fallback_value
except (resource.error, ValueError):
# We couldn't change the soft limit, so just report the current
# value of the soft limit.
return soft_limit
else:
return soft_limit
def GetCloudApiInstance(cls, thread_state=None):
"""Gets a gsutil Cloud API instance.
Since Cloud API implementations are not guaranteed to be thread-safe, each
thread needs its own instance. These instances are passed to each thread
via the thread pool logic in command.
Args:
cls: Command class to be used for single-threaded case.
thread_state: Per thread state from this thread containing a gsutil
Cloud API instance.
Returns:
gsutil Cloud API instance.
"""
return thread_state or cls.gsutil_api
def GetFileSize(fp, position_to_eof=False):
"""Returns size of file, optionally leaving fp positioned at EOF."""
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos)
return cur_file_size
def GetStreamFromFileUrl(storage_url):
if storage_url.IsStream():
return sys.stdin
else:
return open(storage_url.object_name, 'rb')
def UrlsAreForSingleProvider(url_args):
"""Tests whether the URLs are all for a single provider.
Args:
url_args: Strings to check.
Returns:
True if URLs are for single provider, False otherwise.
"""
provider = None
url = None
for url_str in url_args:
url = StorageUrlFromString(url_str)
if not provider:
provider = url.scheme
elif url.scheme != provider:
return False
return provider is not None
def HaveFileUrls(args_to_check):
"""Checks whether args_to_check contain any file URLs.
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any file URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsFileUrl():
return True
return False
def HaveProviderUrls(args_to_check):
"""Checks whether args_to_check contains any provider URLs (like 'gs://').
Args:
args_to_check: Command-line argument subset to check.
Returns:
True if args_to_check contains any provider URLs.
"""
for url_str in args_to_check:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsCloudUrl() and storage_url.IsProvider():
return True
return False
# This must be defined at the module level for pickling across processes.
MultiprocessingIsAvailableResult = collections.namedtuple(
'MultiprocessingIsAvailableResult', ['is_available', 'stack_trace'])
def CheckMultiprocessingAvailableAndInit(logger=None):
"""Checks if multiprocessing is available.
There are some environments in which there is no way to use multiprocessing
logic that's built into Python (e.g., if /dev/shm is not available, then
we can't create semaphores). This simply tries out a few things that will be
needed to make sure the environment can support the pieces of the
multiprocessing module that we need.
If multiprocessing is available, this performs necessary initialization for
multiprocessing. See gslib.command.InitializeMultiprocessingVariables for
an explanation of why this is necessary.
Args:
logger: logging.logger to use for debug output.
Returns:
(multiprocessing_is_available, stack_trace):
multiprocessing_is_available: True iff the multiprocessing module is
available for use.
stack_trace: The stack trace generated by the call we tried that failed.
"""
# pylint: disable=global-variable-undefined
global cached_multiprocessing_is_available
global cached_multiprocessing_check_stack_trace
global cached_multiprocessing_is_available_message
if cached_multiprocessing_is_available is not None:
if logger:
logger.debug(cached_multiprocessing_check_stack_trace)
logger.warn(cached_multiprocessing_is_available_message)
return MultiprocessingIsAvailableResult(
is_available=cached_multiprocessing_is_available,
stack_trace=cached_multiprocessing_check_stack_trace)
if IS_WINDOWS:
message = """
Multiple processes are not supported on Windows. Operations requesting
parallelism will be executed with multiple threads in a single process only.
"""
if logger:
logger.warn(message)
return MultiprocessingIsAvailableResult(is_available=False,
stack_trace=None)
stack_trace = None
multiprocessing_is_available = True
message = """
You have requested multiple processes for an operation, but the
required functionality of Python\'s multiprocessing module is not available.
Operations requesting parallelism will be executed with multiple threads in a
single process only.
"""
try:
# Fails if /dev/shm (or some equivalent thereof) is not available for use
# (e.g., there's no implementation, or we can't write to it, etc.).
try:
multiprocessing.Value('i', 0)
except:
message += """
Please ensure that you have write access to both /dev/shm and /run/shm.
"""
raise # We'll handle this in one place below.
# Manager objects and Windows are generally a pain to work with, so try it
# out as a sanity check. This definitely works on some versions of Windows,
# but it's certainly possible that there is some unknown configuration for
# which it won't.
global manager # pylint: disable=global-variable-undefined
manager = multiprocessing.Manager()
# Check that the max number of open files is reasonable. Always check this
# after we're sure that the basic multiprocessing functionality is
# available, since this won't matter unless that's true.
limit = -1
if HAS_RESOURCE_MODULE:
# Try to set this with both resource names - RLIMIT_NOFILE for most Unix
# platforms, and RLIMIT_OFILE for BSD. Ignore AttributeError because the
# "resource" module is not guaranteed to know about these names.
try:
limit = max(limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_NOFILE,
MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
try:
limit = max(limit,
_IncreaseSoftLimitForResource(
resource.RLIMIT_OFILE, MIN_ACCEPTABLE_OPEN_FILES_LIMIT))
except AttributeError:
pass
if limit < MIN_ACCEPTABLE_OPEN_FILES_LIMIT:
message += ("""
Your max number of open files, %s, is too low to allow safe multiprocessing.
On Linux you can fix this by adding something like "ulimit -n 10000" to your
~/.bashrc or equivalent file and opening a new terminal.
On MacOS, you may also need to run a command like this once (in addition to the
above instructions), which might require a restart of your system to take
effect:
launchctl limit maxfiles 10000
Alternatively, edit /etc/launchd.conf with something like:
limit maxfiles 10000 10000
""" % limit)
raise Exception('Max number of open files, %s, is too low.' % limit)
except: # pylint: disable=bare-except
stack_trace = traceback.format_exc()
multiprocessing_is_available = False
if logger is not None:
logger.debug(stack_trace)
logger.warn(message)
# Set the cached values so that we never need to do this check again.
cached_multiprocessing_is_available = multiprocessing_is_available
cached_multiprocessing_check_stack_trace = stack_trace
cached_multiprocessing_is_available_message = message
return MultiprocessingIsAvailableResult(
is_available=cached_multiprocessing_is_available,
stack_trace=cached_multiprocessing_check_stack_trace)
def CreateLock():
"""Returns either a multiprocessing lock or a threading lock.
Use Multiprocessing lock iff we have access to the parts of the
multiprocessing module that are necessary to enable parallelism in operations.
Returns:
Multiprocessing or threading lock.
"""
if CheckMultiprocessingAvailableAndInit().is_available:
return manager.Lock()
else:
return threading.Lock()
def IsCloudSubdirPlaceholder(url, blr=None):
"""Determines if URL is a cloud subdir placeholder.
This function is needed because GUI tools (like the GCS cloud console) allow
users to create empty "folders" by creating a placeholder object; and parts
of gsutil need to treat those placeholder objects specially. For example,
gsutil rsync needs to avoid downloading those objects because they can cause
conflicts (see comments in rsync command for details).
We currently detect two cases:
- Cloud objects whose name ends with '_$folder$'
- Cloud objects whose name ends with '/'
Args:
url: The URL to be checked.
blr: BucketListingRef to check, or None if not available.
If None, size won't be checked.
Returns:
True/False.
"""
if not url.IsCloudUrl():
return False
url_str = url.url_string
if url_str.endswith('_$folder$'):
return True
if blr and blr.IsObject():
size = blr.root_object.size
else:
size = 0
return size == 0 and url_str.endswith('/')
def GetTermLines():
"""Returns number of terminal lines."""
# fcntl isn't supported in Windows.
try:
import fcntl # pylint: disable=g-import-not-at-top
import termios # pylint: disable=g-import-not-at-top
except ImportError:
return _DEFAULT_LINES
def ioctl_GWINSZ(fd): # pylint: disable=invalid-name
try:
return struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))[0]
except: # pylint: disable=bare-except
return 0 # Failure (so will retry on different file descriptor below).
# Try to find a valid number of lines from termio for stdin, stdout,
# or stderr, in that order.
ioc = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not ioc:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
ioc = ioctl_GWINSZ(fd)
os.close(fd)
except: # pylint: disable=bare-except
pass
if not ioc:
ioc = os.environ.get('LINES', _DEFAULT_LINES)
return int(ioc)
class GsutilStreamHandler(logging.StreamHandler):
"""A subclass of StreamHandler for use in gsutil."""
def flush(self):
# Note: we override the flush method here due to a python 2.6 bug. The
# python logging module will try to flush all stream handlers at exit.
# If the StreamHandler is pointing to a file that is already closed, the
# method throws an exception. Our unit tests temporarily redirect stderr,
# which causes the default StreamHandler to open its stream against a
# temporary file. By the time the process shuts down, the underlying file
# is closed, causing an exception. This was fixed in Python 2.7, but to
# remove the flake from Python 2.6, we maintain this here.
try:
logging.StreamHandler.flush(self)
except ValueError:
pass
def StdinIterator():
"""A generator function that returns lines from stdin."""
for line in sys.stdin:
# Strip CRLF.
yield line.rstrip()
| bsd-3-clause |
bahattincinic/arguman.org | web/blog/migrations/0001_initial.py | 7 | 1191 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markitup.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=255, verbose_name='Name')),
('slug', models.SlugField(unique=True, max_length=255, verbose_name='Slug')),
('content', markitup.fields.MarkupField(no_rendered_field=True, verbose_name='Content')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True, auto_now_add=True)),
('is_published', models.BooleanField(default=True, verbose_name='Published')),
('_content_rendered', models.TextField(editable=False, blank=True)),
],
options={
'ordering': ('-date_created',),
},
bases=(models.Model,),
),
]
| mit |
benroeder/moviepy | moviepy/video/compositing/on_color.py | 16 | 1041 | from moviepy.video.VideoClip import ColorClip
from moviepy.video.compositing.CompositeVideoClip import CompositeVideoClip
def on_color(clip, size=None, color=(0, 0, 0), pos=None, col_opacity=None):
"""
Returns a clip made of the current clip overlaid on a color
clip of a possibly bigger size. Can serve to flatten transparent
clips (ideal for previewing clips with masks).
:param size: size of the final clip. By default it will be the
size of the current clip.
:param bg_color: the background color of the final clip
:param pos: the position of the clip in the final clip.
:param col_opacity: should the added zones be transparent ?
"""
if size is None:
size = clip.size
if pos is None:
pos = 'center'
colorclip = ColorClip(size, color)
if col_opacity:
colorclip = colorclip.with_mask().set_opacity(col_opacity)
return CompositeVideoClip([colorclip, clip.set_pos(pos)],
transparent=(col_opacity is not None))
| mit |
jamestwebber/scipy | scipy/sparse/linalg/isolve/iterative.py | 6 | 27367 | """Iterative methods for solving linear systems"""
from __future__ import division, print_function, absolute_import
__all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr']
import warnings
import numpy as np
from . import _iterative
from scipy.sparse.linalg.interface import LinearOperator
from .utils import make_system
from scipy._lib._util import _aligned_zeros
from scipy._lib._threadsafety import non_reentrant
_type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'}
# Part of the docstring common to all iterative solvers
common_doc1 = \
"""
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}"""
common_doc2 = \
"""b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Preconditioner for A. The preconditioner should approximate the
inverse of A. Effective preconditioning dramatically improves the
rate of convergence, which implies that fewer iterations are needed
to reach a given error tolerance.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
"""
def _stoptest(residual, atol):
"""
Successful termination condition for the solvers.
"""
resid = np.linalg.norm(residual)
if resid <= atol:
return resid, 1
else:
return resid, 0
def _get_atol(tol, atol, bnrm2, get_residual, routine_name):
"""
Parse arguments for absolute tolerance in termination condition.
Parameters
----------
tol, atol : object
The arguments passed into the solver routine by user.
bnrm2 : float
2-norm of the rhs vector.
get_residual : callable
Callable ``get_residual()`` that returns the initial value of
the residual.
routine_name : str
Name of the routine.
"""
if atol is None:
warnings.warn("scipy.sparse.linalg.{name} called without specifying `atol`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `atol` explicitly, e.g., "
"``{name}(..., atol=0)``, or to retain the old behavior "
"``{name}(..., atol='legacy')``".format(name=routine_name),
category=DeprecationWarning, stacklevel=4)
atol = 'legacy'
tol = float(tol)
if atol == 'legacy':
# emulate old legacy behavior
resid = get_residual()
if resid <= tol:
return 'exit'
if bnrm2 == 0:
return tol
else:
return tol * float(bnrm2)
else:
return max(float(atol), tol * float(bnrm2))
def set_docstring(header, Ainfo, footer='', atol_default='0'):
def combine(fn):
fn.__doc__ = '\n'.join((header, common_doc1,
' ' + Ainfo.replace('\n', '\n '),
common_doc2, footer))
return fn
return combine
@set_docstring('Use BIConjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` and ``A^T x`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def bicg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A,M,x,b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec, rmatvec = A.matvec, A.rmatvec
psolve, rpsolve = M.matvec, M.rmatvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(6*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = psolve(work[slice2])
elif (ijob == 4):
work[slice1] = rpsolve(work[slice2])
elif (ijob == 5):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 6):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use BIConjugate Gradient STABilized iteration to solve '
'``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def bicgstab(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'bicgstabrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'bicgstab')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient iteration to solve ``Ax = b``.',
'The real or complex N-by-N matrix of the linear system.\n'
'``A`` must represent a hermitian, positive definite matrix.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cg(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cg')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(4*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@set_docstring('Use Conjugate Gradient Squared iteration to solve ``Ax = b``.',
'The real-valued N-by-N matrix of the linear system.\n'
'Alternatively, ``A`` can be a linear operator which can\n'
'produce ``Ax`` using, e.g.,\n'
'``scipy.sparse.linalg.LinearOperator``.')
@non_reentrant()
def cgs(A, b, x0=None, tol=1e-5, maxiter=None, M=None, callback=None, atol=None):
A, M, x, b, postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'cgsrevcom')
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'cgs')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(7*n,dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
if info == 1 and iter_ > 1:
# recompute residual and recheck, to avoid
# accumulating rounding error
work[slice1] = b - matvec(x)
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info == -10:
# termination due to breakdown: check for convergence
resid, ok = _stoptest(b - matvec(x), atol)
if ok:
info = 0
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
@non_reentrant()
def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None, callback=None,
restrt=None, atol=None, callback_type=None):
"""
Use Generalized Minimal RESidual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
* <0 : illegal input or breakdown
Other parameters
----------------
x0 : {array, matrix}
Starting guess for the solution (a vector of zeros by default).
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
restart : int, optional
Number of iterations between restarts. Larger values increase
iteration cost, but may be necessary for convergence.
Default is 20.
maxiter : int, optional
Maximum number of iterations (restart cycles). Iteration will stop
after maxiter steps even if the specified tolerance has not been
achieved.
M : {sparse matrix, dense matrix, LinearOperator}
Inverse of the preconditioner of A. M should approximate the
inverse of A and be easy to solve for (see Notes). Effective
preconditioning dramatically improves the rate of convergence,
which implies that fewer iterations are needed to reach a given
error tolerance. By default, no preconditioner is used.
callback : function
User-supplied function to call after each iteration. It is called
as `callback(args)`, where `args` are selected by `callback_type`.
callback_type : {'x', 'pr_norm', 'legacy'}, optional
Callback function argument requested:
- ``x``: current iterate (ndarray), called on every restart
- ``pr_norm``: relative (preconditioned) residual norm (float),
called on every inner iteration
- ``legacy`` (default): same as ``pr_norm``, but also changes the
meaning of 'maxiter' to count inner iterations instead of restart
cycles.
restrt : int, optional
DEPRECATED - use `restart` instead.
See Also
--------
LinearOperator
Notes
-----
A preconditioner, P, is chosen such that P is close to A but easy to solve
for. The preconditioner parameter required by this routine is
``M = P^-1``. The inverse should preferably not be calculated
explicitly. Rather, use the following template to produce M::
# Construct a linear operator that computes P^-1 * x.
import scipy.sparse.linalg as spla
M_x = lambda x: spla.spsolve(P, x)
M = spla.LinearOperator((n, n), M_x)
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import gmres
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = gmres(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
# Change 'restrt' keyword to 'restart'
if restrt is None:
restrt = restart
elif restart is not None:
raise ValueError("Cannot specify both restart and restrt keywords. "
"Preferably use 'restart' only.")
if callback is not None and callback_type is None:
# Warn about 'callback_type' semantic changes.
# Probably should be removed only in far future, Scipy 2.0 or so.
warnings.warn("scipy.sparse.linalg.gmres called without specifying `callback_type`. "
"The default value will be changed in a future release. "
"For compatibility, specify a value for `callback_type` explicitly, e.g., "
"``{name}(..., callback_type='pr_norm')``, or to retain the old behavior "
"``{name}(..., callback_type='legacy')``",
category=DeprecationWarning, stacklevel=3)
if callback_type is None:
callback_type = 'legacy'
if callback_type not in ('x', 'pr_norm', 'legacy'):
raise ValueError("Unknown callback_type: {!r}".format(callback_type))
if callback is None:
callback_type = 'none'
A, M, x, b,postprocess = make_system(A, M, x0, b)
n = len(b)
if maxiter is None:
maxiter = n*10
if restrt is None:
restrt = 20
restrt = min(restrt, n)
matvec = A.matvec
psolve = M.matvec
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'gmresrevcom')
bnrm2 = np.linalg.norm(b)
Mb_nrm2 = np.linalg.norm(psolve(b))
get_residual = lambda: np.linalg.norm(matvec(x) - b)
atol = _get_atol(tol, atol, bnrm2, get_residual, 'gmres')
if atol == 'exit':
return postprocess(x), 0
if bnrm2 == 0:
return postprocess(b), 0
# Tolerance passed to GMRESREVCOM applies to the inner iteration
# and deals with the left-preconditioned residual.
ptol_max_factor = 1.0
ptol = Mb_nrm2 * min(ptol_max_factor, atol / bnrm2)
resid = np.nan
presid = np.nan
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros((6+restrt)*n,dtype=x.dtype)
work2 = _aligned_zeros((restrt+1)*(2*restrt+2),dtype=x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
old_ijob = ijob
first_pass = True
resid_ready = False
iter_num = 1
while True:
olditer = iter_
x, iter_, presid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, restrt, work, work2, iter_, presid, info, ndx1, ndx2, ijob, ptol)
if callback_type == 'x' and iter_ != olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1): # gmres success, update last residual
if callback_type in ('pr_norm', 'legacy'):
if resid_ready:
callback(presid / bnrm2)
elif callback_type == 'x':
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(x)
elif (ijob == 2):
work[slice1] = psolve(work[slice2])
if not first_pass and old_ijob == 3:
resid_ready = True
first_pass = False
elif (ijob == 3):
work[slice2] *= sclr2
work[slice2] += sclr1*matvec(work[slice1])
if resid_ready:
if callback_type in ('pr_norm', 'legacy'):
callback(presid / bnrm2)
resid_ready = False
iter_num = iter_num+1
elif (ijob == 4):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
# Inner loop tolerance control
if info or presid > ptol:
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
else:
# Inner loop tolerance OK, but outer loop not.
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
if resid != 0:
ptol = presid * min(ptol_max_factor, atol / resid)
else:
ptol = presid * ptol_max_factor
old_ijob = ijob
ijob = 2
if callback_type == 'legacy':
# Legacy behavior
if iter_num > maxiter:
info = maxiter
break
if info >= 0 and not (resid <= atol):
# info isn't set appropriately otherwise
info = maxiter
return postprocess(x), info
@non_reentrant()
def qmr(A, b, x0=None, tol=1e-5, maxiter=None, M1=None, M2=None, callback=None,
atol=None):
"""Use Quasi-Minimal Residual iteration to solve ``Ax = b``.
Parameters
----------
A : {sparse matrix, dense matrix, LinearOperator}
The real-valued N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` and ``A^T x`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : {array, matrix}
Right hand side of the linear system. Has shape (N,) or (N,1).
Returns
-------
x : {array, matrix}
The converged solution.
info : integer
Provides convergence information:
0 : successful exit
>0 : convergence to tolerance not achieved, number of iterations
<0 : illegal input or breakdown
Other Parameters
----------------
x0 : {array, matrix}
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is ``'legacy'``, which emulates
a different legacy behavior.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : integer
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M1 : {sparse matrix, dense matrix, LinearOperator}
Left preconditioner for A.
M2 : {sparse matrix, dense matrix, LinearOperator}
Right preconditioner for A. Used together with the left
preconditioner M1. The matrix M1*A*M2 should have better
conditioned than A alone.
callback : function
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
See Also
--------
LinearOperator
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import qmr
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> b = np.array([2, 4, -1], dtype=float)
>>> x, exitCode = qmr(A, b)
>>> print(exitCode) # 0 indicates successful convergence
0
>>> np.allclose(A.dot(x), b)
True
"""
A_ = A
A, M, x, b, postprocess = make_system(A, None, x0, b)
if M1 is None and M2 is None:
if hasattr(A_,'psolve'):
def left_psolve(b):
return A_.psolve(b,'left')
def right_psolve(b):
return A_.psolve(b,'right')
def left_rpsolve(b):
return A_.rpsolve(b,'left')
def right_rpsolve(b):
return A_.rpsolve(b,'right')
M1 = LinearOperator(A.shape, matvec=left_psolve, rmatvec=left_rpsolve)
M2 = LinearOperator(A.shape, matvec=right_psolve, rmatvec=right_rpsolve)
else:
def id(b):
return b
M1 = LinearOperator(A.shape, matvec=id, rmatvec=id)
M2 = LinearOperator(A.shape, matvec=id, rmatvec=id)
n = len(b)
if maxiter is None:
maxiter = n*10
ltr = _type_conv[x.dtype.char]
revcom = getattr(_iterative, ltr + 'qmrrevcom')
get_residual = lambda: np.linalg.norm(A.matvec(x) - b)
atol = _get_atol(tol, atol, np.linalg.norm(b), get_residual, 'qmr')
if atol == 'exit':
return postprocess(x), 0
resid = atol
ndx1 = 1
ndx2 = -1
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
work = _aligned_zeros(11*n,x.dtype)
ijob = 1
info = 0
ftflag = True
iter_ = maxiter
while True:
olditer = iter_
x, iter_, resid, info, ndx1, ndx2, sclr1, sclr2, ijob = \
revcom(b, x, work, iter_, resid, info, ndx1, ndx2, ijob)
if callback is not None and iter_ > olditer:
callback(x)
slice1 = slice(ndx1-1, ndx1-1+n)
slice2 = slice(ndx2-1, ndx2-1+n)
if (ijob == -1):
if callback is not None:
callback(x)
break
elif (ijob == 1):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(work[slice1])
elif (ijob == 2):
work[slice2] *= sclr2
work[slice2] += sclr1*A.rmatvec(work[slice1])
elif (ijob == 3):
work[slice1] = M1.matvec(work[slice2])
elif (ijob == 4):
work[slice1] = M2.matvec(work[slice2])
elif (ijob == 5):
work[slice1] = M1.rmatvec(work[slice2])
elif (ijob == 6):
work[slice1] = M2.rmatvec(work[slice2])
elif (ijob == 7):
work[slice2] *= sclr2
work[slice2] += sclr1*A.matvec(x)
elif (ijob == 8):
if ftflag:
info = -1
ftflag = False
resid, info = _stoptest(work[slice1], atol)
ijob = 2
if info > 0 and iter_ == maxiter and not (resid <= atol):
# info isn't set appropriately otherwise
info = iter_
return postprocess(x), info
| bsd-3-clause |
brandonium21/snowflake | snowflakeEnv/lib/python2.7/site-packages/alembic/testing/fixtures.py | 2 | 4359 | # coding: utf-8
import io
import re
from sqlalchemy import create_engine, text, MetaData
import alembic
from alembic.compat import configparser
from alembic import util
from alembic.compat import string_types, text_type
from alembic.migration import MigrationContext
from alembic.environment import EnvironmentContext
from alembic.operations import Operations
from alembic.ddl.impl import _impls
from contextlib import contextmanager
from .plugin.plugin_base import SkipTest
from .assertions import _get_dialect, eq_
from . import mock
testing_config = configparser.ConfigParser()
testing_config.read(['test.cfg'])
if not util.sqla_094:
class TestBase(object):
# A sequence of database names to always run, regardless of the
# constraints below.
__whitelist__ = ()
# A sequence of requirement names matching testing.requires decorators
__requires__ = ()
# A sequence of dialect names to exclude from the test class.
__unsupported_on__ = ()
# If present, test class is only runnable for the *single* specified
# dialect. If you need multiple, use __unsupported_on__ and invert.
__only_on__ = None
# A sequence of no-arg callables. If any are True, the entire testcase is
# skipped.
__skip_if__ = None
def assert_(self, val, msg=None):
assert val, msg
# apparently a handful of tests are doing this....OK
def setup(self):
if hasattr(self, "setUp"):
self.setUp()
def teardown(self):
if hasattr(self, "tearDown"):
self.tearDown()
else:
from sqlalchemy.testing.fixtures import TestBase
def capture_db():
buf = []
def dump(sql, *multiparams, **params):
buf.append(str(sql.compile(dialect=engine.dialect)))
engine = create_engine("postgresql://", strategy="mock", executor=dump)
return engine, buf
_engs = {}
@contextmanager
def capture_context_buffer(**kw):
if kw.pop('bytes_io', False):
buf = io.BytesIO()
else:
buf = io.StringIO()
kw.update({
'dialect_name': "sqlite",
'output_buffer': buf
})
conf = EnvironmentContext.configure
def configure(*arg, **opt):
opt.update(**kw)
return conf(*arg, **opt)
with mock.patch.object(EnvironmentContext, "configure", configure):
yield buf
def op_fixture(dialect='default', as_sql=False, naming_convention=None):
impl = _impls[dialect]
class Impl(impl):
def __init__(self, dialect, as_sql):
self.assertion = []
self.dialect = dialect
self.as_sql = as_sql
# TODO: this might need to
# be more like a real connection
# as tests get more involved
self.connection = mock.Mock(dialect=dialect)
def _exec(self, construct, *args, **kw):
if isinstance(construct, string_types):
construct = text(construct)
assert construct.supports_execution
sql = text_type(construct.compile(dialect=self.dialect))
sql = re.sub(r'[\n\t]', '', sql)
self.assertion.append(
sql
)
opts = {}
if naming_convention:
if not util.sqla_092:
raise SkipTest(
"naming_convention feature requires "
"sqla 0.9.2 or greater")
opts['target_metadata'] = MetaData(naming_convention=naming_convention)
class ctx(MigrationContext):
def __init__(self, dialect='default', as_sql=False):
self.dialect = _get_dialect(dialect)
self.impl = Impl(self.dialect, as_sql)
self.opts = opts
self.as_sql = as_sql
def assert_(self, *sql):
# TODO: make this more flexible about
# whitespace and such
eq_(self.impl.assertion, list(sql))
def assert_contains(self, sql):
for stmt in self.impl.assertion:
if sql in stmt:
return
else:
assert False, "Could not locate fragment %r in %r" % (
sql,
self.impl.assertion
)
context = ctx(dialect, as_sql)
alembic.op._proxy = Operations(context)
return context
| bsd-2-clause |
pietergreyling/amphtml | validator/validator_gen_md.py | 26 | 7813 | #
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""Generates validator-generated.md"""
import os
def GenerateValidatorGeneratedMd(specfile, validator_pb2, text_format, out):
"""Main method for the markdown generator.
This method reads the specfile and emits Markdown to sys.stdout.
Args:
specfile: Path to validator.protoascii.
validator_pb2: The proto2 Python module generated from validator.proto.
text_format: The text_format module from the protobuf package, e.g.
google.protobuf.text_format.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
out.append('<!-- Generated by %s - do not edit. -->' %
os.path.basename(__file__))
out.append(
'<!-- WARNING: This does not fully reflect validator.protoascii yet. -->')
out.append('')
out.append('[Accelerated Mobile Pages Project](https://www.ampproject.org)')
out.append('')
out.append('# validator.protoascii')
out.append('')
rules = validator_pb2.ValidatorRules()
text_format.Merge(open(specfile).read(), rules)
if rules.HasField('spec_file_revision'):
out.append('* spec file revision: %d' % rules.spec_file_revision)
if rules.HasField('min_validator_revision_required'):
out.append('* minimum validator revision required: %d' %
rules.min_validator_revision_required)
out.append('')
out.append('Allowed Tags')
out.append('')
out.append('[TOC]')
out.append('')
for (field_desc, field_val) in rules.ListFields():
if field_desc.name == 'tags':
for tag_spec in field_val:
PrintTagSpec(validator_pb2, tag_spec, out)
def GetLayout(validator_pb2, layout_index):
"""Helper function that returns the AmpLayout.Layout name for a given index.
See amp.validator.AmpLayout.Layout in validator.proto for details.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
layout_index: integer representing a particular AmpLayout.Layout
Returns:
A string which represents the name for this supported layout.
"""
amp_layout = validator_pb2.DESCRIPTOR.message_types_by_name['AmpLayout']
layouts = amp_layout.fields_by_name['supported_layouts'].enum_type.values
return layouts[layout_index].name
def PrintAmpLayout(validator_pb2, amp_layout, out):
"""Prints a Markdown version of the given proto message (AmpLayout).
See amp.validator.AmpLayout in validator.proto for details of proto message.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
amp_layout: The AmpLayout message.
out: A list of lines to output (without newline characters), to which this
function will append.
"""
for layout in amp_layout.supported_layouts:
out.append('* %s' % UnicodeEscape(GetLayout(validator_pb2, layout)))
if amp_layout.defines_default_width:
out.append('* Defines Default Width')
if amp_layout.defines_default_height:
out.append('* Defines Default Height')
def PrintAttrSpec(attr_spec, out):
"""Prints a Markdown version of the given proto message (AttrSpec).
See amp.validator.AttrSpec in validator.proto for details of proto message.
Args:
attr_spec: The AttrSpec message.
out: A list of lines to output (without newline characters), to which this
function will append.
"""
out.append('* %s' % UnicodeEscape(attr_spec.name))
if attr_spec.alternative_names:
out.append(' * Alternative Names: %s' %
RepeatedFieldToString(attr_spec.alternative_names))
if attr_spec.mandatory:
out.append(' * Mandatory')
if attr_spec.mandatory_oneof:
out.append(' * Mandatory One of: %s' % attr_spec.mandatory_oneof)
if attr_spec.value:
out.append(' * Required Value: %s' % attr_spec.value)
def PrintTagSpec(validator_pb2, tag_spec, out):
"""Prints a Markdown version of the given proto message (TagSpec).
See amp.validator.TagSpec in validator.proto for details of proto message.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
tag_spec: The TagSpec message.
out: A list of lines to output (without newline characters), to which this
function will append.
"""
header = '## %s' % UnicodeEscape(tag_spec.tag_name)
if tag_spec.spec_name and (tag_spec.tag_name != tag_spec.spec_name):
header += ': %s' % UnicodeEscape(tag_spec.spec_name)
if tag_spec.deprecation:
header += ' (DEPRECATED)'
header += '{#%s_%s}' % (UnicodeEscape(tag_spec.tag_name).replace(' ', '_'),
UnicodeEscape(tag_spec.spec_name).replace(' ', '_'))
out.append('')
out.append(header)
out.append('')
if tag_spec.deprecation:
out.append('This is deprecated.')
out.append('Please see [%s](%s)' %
(UnicodeEscape(tag_spec.deprecation), tag_spec.deprecation_url))
out.append('')
if tag_spec.mandatory:
out.append('* Mandatory')
if tag_spec.mandatory_alternatives:
out.append('* Mandatory Alternatives: %s' %
UnicodeEscape(tag_spec.mandatory_alternatives))
if tag_spec.unique:
out.append('* Unique')
if tag_spec.mandatory_parent:
out.append('* Mandatory Parent: %s' %
UnicodeEscape(tag_spec.mandatory_parent))
if tag_spec.mandatory_ancestor:
out.append('* Mandatory Ancestor: %s' %
UnicodeEscape(tag_spec.mandatory_ancestor))
if tag_spec.mandatory_ancestor_suggested_alternative:
out.append('* Mandatory Ancestor Alternative: %s' %
UnicodeEscape(tag_spec.mandatory_ancestor_suggested_alternative))
if tag_spec.disallowed_ancestor:
out.append('* Disallowed Ancestor: %s' %
RepeatedFieldToString(tag_spec.disallowed_ancestor))
if tag_spec.also_requires_tag:
out.append('* Also Requires: %s' %
RepeatedFieldToString(tag_spec.also_requires_tag))
if tag_spec.attrs:
out.append('')
out.append('Allowed Attributes:')
out.append('')
for attr_spec in tag_spec.attrs:
PrintAttrSpec(attr_spec, out)
if (tag_spec.amp_layout.supported_layouts or
tag_spec.amp_layout.defines_default_width or
tag_spec.amp_layout.defines_default_height):
out.append('')
out.append('Allowed Layouts:')
out.append('')
PrintAmpLayout(validator_pb2, tag_spec.amp_layout, out)
if tag_spec.spec_url:
out.append('')
out.append('[Spec](%s)' % tag_spec.spec_url)
out.append('')
def RepeatedFieldToString(field):
"""Helper function which converts a list into an escaped string.
Args:
field: A list of strings.
Returns:
A string, segmented by commas.
"""
return ', '.join([UnicodeEscape(s) for s in field])
def UnderscoreToTitleCase(under_score):
"""Helper function which converts under_score names to TitleCase.
Args:
under_score: A name, segmented by under_scores.
Returns:
A name, segmented as TitleCase.
"""
segments = under_score.split('_')
return ' '.join([s.title() for s in segments])
def UnicodeEscape(string):
"""Helper function which escapes unicode characters.
Args:
string: A string which may contain unicode characters.
Returns:
An escaped string.
"""
return ('' + string).encode('unicode-escape')
| apache-2.0 |
sysbot/CouchPotatoServer | libs/html5lib/ihatexml.py | 1727 | 16581 | from __future__ import absolute_import, division, unicode_literals
import re
import warnings
from .constants import DataLossWarning
baseChar = """
[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] |
[#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] |
[#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] |
[#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 |
[#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] |
[#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] |
[#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] |
[#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] |
[#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 |
[#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] |
[#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] |
[#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D |
[#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] |
[#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] |
[#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] |
[#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] |
[#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] |
[#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] |
[#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 |
[#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] |
[#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] |
[#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] |
[#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] |
[#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] |
[#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] |
[#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] |
[#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] |
[#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] |
[#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] |
[#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A |
#x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 |
#x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] |
#x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] |
[#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] |
[#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C |
#x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 |
[#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] |
[#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] |
[#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 |
[#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] |
[#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B |
#x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE |
[#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] |
[#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 |
[#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] |
[#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """
[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] |
[#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 |
[#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] |
[#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] |
#x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] |
[#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] |
[#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 |
#x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] |
[#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC |
[#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] |
#x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] |
[#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] |
[#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] |
[#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] |
[#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] |
[#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] |
#x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 |
[#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] |
#x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] |
[#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] |
[#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] |
#x3099 | #x309A"""
digit = """
[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] |
[#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] |
[#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] |
[#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """
#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 |
#[#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
# Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1] * 2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i + j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i + j][1]
j += 1
i += j
return rv
# We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1] + 1, charList[i + 1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(chr(item[0])))
else:
rv.append(escapeRegexp(chr(item[0])) + "-" +
escapeRegexp(chr(item[1])))
return "[%s]" % "".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
return string
# output from the above
nonXmlNameBMPRegexp = re.compile('[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile('[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
# Simpler things
nonPubidCharRegexp = re.compile("[^\x20\x0D\x0Aa-zA-Z0-9\-\'()+,./:=?;!*#@$_%]")
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars=None,
dropXmlnsLocalName=False,
dropXmlnsAttrNs=False,
preventDoubleDashComments=False,
preventDashAtCommentEnd=False,
replaceFormFeedCharacters=True,
preventSingleQuotePubid=False):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.preventSingleQuotePubid = preventSingleQuotePubid
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
warnings.warn("Attributes cannot begin with xmlns", DataLossWarning)
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
warnings.warn("Attributes cannot be in the xml namespace", DataLossWarning)
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
warnings.warn("Comments cannot contain adjacent dashes", DataLossWarning)
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
for i in range(data.count("\x0C")):
warnings.warn("Text cannot contain U+000C", DataLossWarning)
data = data.replace("\x0C", " ")
# Other non-xml characters
return data
def coercePubid(self, data):
dataOutput = data
for char in nonPubidCharRegexp.findall(data):
warnings.warn("Coercing non-XML pubid", DataLossWarning)
replacement = self.getReplacementCharacter(char)
dataOutput = dataOutput.replace(char, replacement)
if self.preventSingleQuotePubid and dataOutput.find("'") >= 0:
warnings.warn("Pubid cannot contain single quote", DataLossWarning)
dataOutput = dataOutput.replace("'", self.getReplacementCharacter("'"))
return dataOutput
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
warnings.warn("Coercing non-XML name", DataLossWarning)
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
warnings.warn("Coercing non-XML name", DataLossWarning)
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U%05X" % ord(char)
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return chr(int(charcode[1:], 16))
| gpl-3.0 |
scholer/py2cytoscape | py2cytoscape/data/network_view.py | 1 | 6734 | # -*- coding: utf-8 -*-
import json
import pandas as pd
import requests
from py2cytoscape.data.edge_view import EdgeView
from py2cytoscape.data.node_view import NodeView
from . import BASE_URL, HEADERS
from py2cytoscape.data.util_network import NetworkUtil
BASE_URL_NETWORK = BASE_URL + 'networks'
class CyNetworkView(object):
def __init__(self, network=None, suid=None):
if network is None:
raise ValueError('Network is required.')
# Validate required argument
if pd.isnull(suid):
raise ValueError("View SUID is missing.")
else:
self.__network = network
self.__id = suid
self.__url = BASE_URL_NETWORK + '/' \
+ str(self.__network.get_id()) + '/views/' + str(self.__id)
def get_id(self):
"""
Get session-unique ID of this network view
:return: SUID as integer
"""
return self.__id
def get_model_id(self):
"""
Get network model SUID
:return: model SUID as integer
"""
return self.__network.get_id()
def get_node_views(self):
return self.__get_views('nodes')
def get_edge_views(self):
return self.__get_views('edges')
def get_node_views_as_dict(self):
return self.__get_views('nodes', format='dict')
def get_edge_views_as_dict(self):
return self.__get_views('edges', format='dict')
def get_network_view_as_dict(self):
return self.__get_views('network', format='dict')
def get_node_views_as_dataframe(self):
return self.__get_views('nodes', format='df')
def get_edge_views_as_dataframe(self):
return self.__get_views('edges', format='df')
def __get_views(self, obj_type=None, format='view'):
url = self.__url + '/' + obj_type
views = requests.get(url).json()
if format is 'dict':
if obj_type is 'network':
return self.__get_network_view_dict(views)
else:
return self.__get_view_dict(views)
elif format is 'view':
return self.__get_view_objects(views, obj_type)
else:
raise ValueError('Format not supported: ' + format)
def __get_view_objects(self, views, obj_type):
view_list = []
if obj_type is 'nodes':
for view in views:
view = NodeView(self, view['SUID'], obj_type)
view_list.append(view)
elif obj_type is 'edges':
for view in views:
view = EdgeView(self, view['SUID'], obj_type)
view_list.append(view)
else:
raise ValueError('No such object type: ' + obj_type)
return view_list
def __get_view_dict(self, views):
# reformat return value to simple dict
view_dict = {}
for view in views:
key = view['SUID']
values = view['view']
# Flatten the JSON
key_val_pair = {}
for entry in values:
vp = entry['visualProperty']
value = entry['value']
key_val_pair[vp] = value
view_dict[key] = key_val_pair
return view_dict
def __get_view_df(self, views):
# reformat return value to simple dict
view_dict = {}
for view in views:
key = view['SUID']
values = view['view']
# Flatten the JSON
key_val_pair = {}
for entry in values:
vp = entry['visualProperty']
value = entry['value']
key_val_pair[vp] = value
view_dict[key] = key_val_pair
return view_dict
def __get_network_view_dict(self, values):
# reformat return value to simple dict
view_dict = {}
# Flatten the JSON
for entry in values:
vp = entry['visualProperty']
value = entry['value']
view_dict[vp] = value
return view_dict
def update_node_views(self, visual_property=None, values=None, key_type='suid'):
self.__update_views(visual_property, values, 'nodes', key_type)
def batch_update_node_views(self, value_dataframe=None):
self.__batch_update(value_dataframe, 'nodes')
def batch_update_edge_views(self, value_dataframe=None):
self.__batch_update(value_dataframe, 'edges')
def update_edge_views(self, visual_property=None, values=None, key_type='suid'):
self.__update_views(visual_property, values, 'edges', key_type)
def update_network_view(self, visual_property=None, value=None):
"""
Updates single value for Network-related VP.
:param visual_property:
:param value:
:return:
"""
new_value = [
{
"visualProperty": visual_property,
"value": value
}
]
requests.put(self.__url + '/network', data=json.dumps(new_value),
headers=HEADERS)
def __update_views(self, visual_property, values,
object_type=None, key_type='suid'):
if key_type is 'name':
name2suid = NetworkUtil.name2suid(self.__network)
body = []
for key in values.keys():
if key_type is 'name':
suid = name2suid[key]
if suid is None:
continue
else:
suid = key
new_value = self.__create_new_value(suid, visual_property,
values[key])
body.append(new_value)
requests.put(self.__url + '/' + object_type, data=json.dumps(body), headers=HEADERS)
def __create_new_value(self, suid, visual_property, value):
return {
"SUID": suid,
"view": [
{
"visualProperty": visual_property,
"value": value
}
]
}
def __batch_update(self, df, object_type=None):
body = []
columns = df.columns
for index, row in df.iterrows():
entry = {
'SUID': int(index),
'view': self.__create_new_values_from_row(columns, row)
}
body.append(entry)
requests.put(self.__url + '/' + object_type, data=json.dumps(body), headers=HEADERS)
def __create_new_values_from_row(self, columns, row):
views = []
for column in columns:
view = {
"visualProperty": column,
"value": row[column]
}
views.append(view)
return views
| mit |
13693100472/linux | tools/perf/scripts/python/failed-syscalls-by-pid.py | 1996 | 2233 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
raw_syscalls__sys_exit(**locals())
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
Spiderlover/Toontown | toontown/quest/QuestMapGlobals.py | 6 | 5065 | from pandac.PandaModules import Point3
CogInfoPosTable = {'toontown_central_2100_english': Point3(-0.3, 0, -0.2),
'toontown_central_2200_english': Point3(0.2, 0, -0.2),
'toontown_central_2300_english': Point3(0.35, 0, -0.2),
'minnies_melody_land_4100_english': Point3(0.45, 0, -0.2),
'minnies_melody_land_4200_english': Point3(0.3, 0, -0.3),
'minnies_melody_land_4300_english': Point3(0.4, 0, -0.2),
'donalds_dock_1100_english': Point3(0.35, 0, -0.2),
'donalds_dock_1200_english': Point3(0.3, 0, -0.35),
'donalds_dock_1300_english': Point3(0.3, 0, -0.25),
'daisys_garden_5100_english': Point3(0.3, 0, -0.2),
'daisys_garden_5200_english': Point3(0.3, 0, -0.2),
'daisys_garden_5300_english': Point3(-0.3, 0, -0.3),
'the_burrrgh_3100_english': Point3(0.37, 0, -0.2),
'the_burrrgh_3200_english': Point3(0.35, 0, -0.35),
'the_burrrgh_3300_english': Point3(0.3, 0, -0.2),
'donalds_dreamland_9100_english': Point3(0.2, 0, -0.2),
'donalds_dreamland_9200_english': Point3(-0.25, 0, -0.2)}
HQPosTable = {'toontown_central_2100_english': Point3(-55, -350, 0),
'toontown_central_2200_english': Point3(-385, 70, 0),
'toontown_central_2300_english': Point3(490, 140, 0),
'minnies_melody_land_4100_english': Point3(-330, -40, 0),
'minnies_melody_land_4200_english': Point3(0, 555, 0),
'minnies_melody_land_4300_english': Point3(326, 172, 5),
'donalds_dock_1100_english': Point3(365, 100, 0),
'donalds_dock_1200_english': Point3(-25, -410, 0),
'donalds_dock_1300_english': Point3(210, -200, 0),
'daisys_garden_5100_english': Point3(357.935, 28.45, 0),
'daisys_garden_5200_english': Point3(345, 150, 0),
'daisys_garden_5300_english': Point3(-138.597, 77.1236, -0.5),
'the_burrrgh_3100_english': Point3(129.737, 51.9612, 0),
'the_burrrgh_3200_english': Point3(230, 200, 0),
'the_burrrgh_3300_english': Point3(268.052, 228.052, 0),
'donalds_dreamland_9100_english': Point3(-250, -160, 0),
'donalds_dreamland_9200_english': Point3(322.91, -67.1147, 0)}
FishingSpotPosTable = {'toontown_central_2100_english': Point3(4.45088, -658.174, 0.431656),
'toontown_central_2200_english': Point3(-223.506, 161.474, 0.477789),
'toontown_central_2300_english': Point3(512.4, -69.9997, 0.485466),
'minnies_melody_land_4100_english': Point3(-570.662, -101.811, 1.9065),
'minnies_melody_land_4200_english': Point3(-205.722, 238.179, 1.92272),
'minnies_melody_land_4300_english': Point3(705.052, -26.4757, 1.93412),
'donalds_dock_1100_english': Point3(368.148, -339.916, 0.419537),
'donalds_dock_1200_english': Point3(-394.913, -243.085, 0.462966),
'donalds_dock_1300_english': Point3(346.219, 88.2058, 0.643978),
'daisys_garden_5100_english': Point3(162.961, 57.4756, 0.360527),
'daisys_garden_5200_english': Point3(162.961, 113.59, 0.404519),
'daisys_garden_5300_english': Point3(120.848, -55.6079, 0.388995),
'the_burrrgh_3100_english': Point3(455.445, 15.6579, 0.376913),
'the_burrrgh_3200_english': Point3(340.009, 462.856, 0.415845),
'the_burrrgh_3300_english': Point3(54.17, 75.405, 0.141113),
'donalds_dreamland_9100_english': Point3(106.796, -197.427, 0.354809),
'donalds_dreamland_9200_english': Point3(239.646, -330.58, 0.347881)}
CornerPosTable = {'daisys_garden_5200_english': [Point3(781.219727, 476.963623, 0), Point3(-123.380096, -427.636261, 0)],
'minnies_melody_land_4100_english': [Point3(-98.880981, 349.36908, 0), Point3(-658.680908, -210.430862, 0)],
'donalds_dock_1300_english': [Point3(713.403076, 361.576324, 0), Point3(-83.596741, -435.423615, 0)],
'toontown_central_sz_english': [Point3(225.733368, 209.740448, 0), Point3(-207.066559, -223.059525, 0)],
'donalds_dreamland_9200_english': [Point3(420.817871, 204.719116, 0), Point3(-220.182007, -436.280853, 0)],
'the_burrrgh_3100_english': [Point3(535.648132, 267.004578, 0), Point3(59.248276, -209.395325, 0)],
'the_burrrgh_3300_english': [Point3(461.503418, 467.348511, 0), Point3(-27.896454, -22.051422, 0)],
'toontown_central_2200_english': [Point3(1.816254, 335.417358, 0), Point3(-641.983643, -308.382629, 0)],
'minnies_melody_land_4200_english': [Point3(283.292847, 658.077393, 0), Point3(-289.50708, 85.277466, 0)],
'donalds_dock_1200_english': [Point3(34.281967, -47.717041, 0), Point3(-464.517944, -546.516968, 0)],
'donalds_dock_1100_english': [Point3(596.439575, 146.409653, 0), Point3(28.039612, -421.990356, 0)],
'minnies_melody_land_4300_english': [Point3(801.294067, 430.992218, 0), Point3(-5.505768, -375.807678, 0)],
'daisys_garden_5300_english': [Point3(203.715881, 429.168274, 0), Point3(-390.284027, -164.83165, 0)],
'donalds_dreamland_9100_english': [Point3(279.354584, 111.658554, 0), Point3(-423.445221, -591.141357, 0)],
'daisys_garden_5100_english': [Point3(760.089722, 526.236206, 0), Point3(-139.510132, -373.3638, 0)],
'toontown_central_2300_english': [Point3(870.224243, 536.165771, 0), Point3(-89.175751, -423.234344, 0)],
'toontown_central_2100_english': [Point3(161.735336, -59.985107, 0), Point3(-512.664612, -734.385193, 0)],
'the_burrrgh_3200_english': [Point3(429.647949, 561.224304, 0), Point3(-31.951935, 99.624283, 0)]}
| mit |
photoninger/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 27 | 13431 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: "2.2"
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: lb.mydomain.com
user: admin
password: secret
name: New York
location: 222 West 23rd
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: string
sample: admin@root.local
description:
description: The description that was set for the datacenter.
returned: changed
type: string
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: string
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: string
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
HAS_DEVEL_IMPORTS = False
try:
# Sideband repository used for dev
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fqdn_name
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
HAS_DEVEL_IMPORTS = True
except ImportError:
# Upstream Ansible
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state'
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled'
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
else:
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = kwargs.pop('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def exists(self):
result = self.client.api.tm.gtm.datacenters.datacenter.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.gtm.datacenters.datacenter.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def remove_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
eayunstack/horizon | openstack_dashboard/test/api_tests/lbaas_tests.py | 25 | 16430 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from neutronclient.v2_0 import client
neutronclient = client.Client
class LbaasApiTests(test.APITestCase):
@test.create_stubs({neutronclient: ('create_vip',)})
def test_vip_create(self):
vip1 = self.api_vips.first()
form_data = {'address': vip1['address'],
'name': vip1['name'],
'description': vip1['description'],
'subnet_id': vip1['subnet_id'],
'protocol_port': vip1['protocol_port'],
'protocol': vip1['protocol'],
'pool_id': vip1['pool_id'],
'session_persistence': vip1['session_persistence'],
'connection_limit': vip1['connection_limit'],
'admin_state_up': vip1['admin_state_up']
}
vip = {'vip': self.api_vips.first()}
neutronclient.create_vip({'vip': form_data}).AndReturn(vip)
self.mox.ReplayAll()
ret_val = api.lbaas.vip_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.lbaas.Vip)
@test.create_stubs({neutronclient: ('create_vip',)})
def test_vip_create_skip_address_if_empty(self):
vip1 = self.api_vips.first()
vipform_data = {'name': vip1['name'],
'description': vip1['description'],
'subnet_id': vip1['subnet_id'],
'protocol_port': vip1['protocol_port'],
'protocol': vip1['protocol'],
'pool_id': vip1['pool_id'],
'session_persistence': vip1['session_persistence'],
'connection_limit': vip1['connection_limit'],
'admin_state_up': vip1['admin_state_up']
}
neutronclient.create_vip({'vip': vipform_data}).AndReturn(
{'vip': vipform_data})
self.mox.ReplayAll()
form_data = dict(vipform_data)
form_data['address'] = ""
ret_val = api.lbaas.vip_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.lbaas.Vip)
@test.create_stubs({neutronclient: ('list_vips',)})
def test_vip_list(self):
vips = {'vips': [{'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.0.100',
'name': 'vip1name',
'description': 'vip1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol_port': '80',
'protocol': 'HTTP',
'pool_id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'connection_limit': '10',
'admin_state_up': True
}, ]}
neutronclient.list_vips().AndReturn(vips)
self.mox.ReplayAll()
ret_val = api.lbaas.vip_list(self.request)
for v in ret_val:
self.assertIsInstance(v, api.lbaas.Vip)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_vip', 'show_pool'),
api.neutron: ('subnet_get', 'port_get')})
def test_vip_get(self):
vip = self.api_vips.first()
neutronclient.show_vip(vip['id']).AndReturn({'vip': vip})
api.neutron.subnet_get(self.request, vip['subnet_id']
).AndReturn(self.subnets.first())
api.neutron.port_get(self.request, vip['port_id']
).AndReturn(self.ports.first())
neutronclient.show_pool(vip['pool_id']
).AndReturn({'pool': self.api_pools.first()})
self.mox.ReplayAll()
ret_val = api.lbaas.vip_get(self.request, vip['id'])
self.assertIsInstance(ret_val, api.lbaas.Vip)
self.assertIsInstance(ret_val.subnet, api.neutron.Subnet)
self.assertEqual(vip['subnet_id'], ret_val.subnet.id)
self.assertIsInstance(ret_val.port, api.neutron.Port)
self.assertEqual(vip['port_id'], ret_val.port.id)
self.assertIsInstance(ret_val.pool, api.lbaas.Pool)
self.assertEqual(self.api_pools.first()['id'], ret_val.pool.id)
@test.create_stubs({neutronclient: ('update_vip',)})
def test_vip_update(self):
form_data = {'address': '10.0.0.100',
'name': 'vip1name',
'description': 'vip1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol_port': '80',
'protocol': 'HTTP',
'pool_id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'connection_limit': '10',
'admin_state_up': True
}
vip = {'vip': {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.0.100',
'name': 'vip1name',
'description': 'vip1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol_port': '80',
'protocol': 'HTTP',
'pool_id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'connection_limit': '10',
'admin_state_up': True
}}
neutronclient.update_vip(vip['vip']['id'], form_data).AndReturn(vip)
self.mox.ReplayAll()
ret_val = api.lbaas.vip_update(self.request,
vip['vip']['id'], **form_data)
self.assertIsInstance(ret_val, api.lbaas.Vip)
@test.create_stubs({neutronclient: ('create_pool',)})
def test_pool_create(self):
form_data = {'name': 'pool1name',
'description': 'pool1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'admin_state_up': True,
'provider': 'dummy'
}
pool = {'pool': {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'pool1name',
'description': 'pool1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'admin_state_up': True,
'provider': 'dummy'
}}
neutronclient.create_pool({'pool': form_data}).AndReturn(pool)
self.mox.ReplayAll()
ret_val = api.lbaas.pool_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.lbaas.Pool)
@test.create_stubs({neutronclient: ('list_pools', 'list_vips'),
api.neutron: ('subnet_list',)})
def test_pool_list(self):
pools = {'pools': self.api_pools.list()}
subnets = self.subnets.list()
vips = {'vips': self.api_vips.list()}
neutronclient.list_pools().AndReturn(pools)
api.neutron.subnet_list(self.request).AndReturn(subnets)
neutronclient.list_vips().AndReturn(vips)
self.mox.ReplayAll()
ret_val = api.lbaas.pool_list(self.request)
for v in ret_val:
self.assertIsInstance(v, api.lbaas.Pool)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_pool', 'show_vip',
'list_members',
'list_health_monitors',),
api.neutron: ('subnet_get',)})
def test_pool_get(self):
pool = self.pools.first()
subnet = self.subnets.first()
pool_dict = {'pool': self.api_pools.first()}
vip_dict = {'vip': self.api_vips.first()}
neutronclient.show_pool(pool.id).AndReturn(pool_dict)
api.neutron.subnet_get(self.request, subnet.id).AndReturn(subnet)
neutronclient.show_vip(pool.vip_id).AndReturn(vip_dict)
neutronclient.list_members(pool_id=pool.id).AndReturn(
{'members': self.api_members.list()})
neutronclient.list_health_monitors(id=pool.health_monitors).AndReturn(
{'health_monitors': [self.api_monitors.first()]})
self.mox.ReplayAll()
ret_val = api.lbaas.pool_get(self.request, pool.id)
self.assertIsInstance(ret_val, api.lbaas.Pool)
self.assertIsInstance(ret_val.vip, api.lbaas.Vip)
self.assertEqual(ret_val.vip.id, vip_dict['vip']['id'])
self.assertIsInstance(ret_val.subnet, api.neutron.Subnet)
self.assertEqual(ret_val.subnet.id, subnet.id)
self.assertEqual(2, len(ret_val.members))
self.assertIsInstance(ret_val.members[0], api.lbaas.Member)
self.assertEqual(1, len(ret_val.health_monitors))
self.assertIsInstance(ret_val.health_monitors[0],
api.lbaas.PoolMonitor)
@test.create_stubs({neutronclient: ('update_pool',)})
def test_pool_update(self):
form_data = {'name': 'pool1name',
'description': 'pool1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol': 'HTTPS',
'lb_method': 'LEAST_CONNECTION',
'admin_state_up': True
}
pool = {'pool': {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'pool1name',
'description': 'pool1description',
'subnet_id': '12381d38-c3eb-4fee-9763-12de3338041e',
'protocol': 'HTTPS',
'lb_method': 'LEAST_CONNECTION',
'admin_state_up': True
}}
neutronclient.update_pool(pool['pool']['id'],
form_data).AndReturn(pool)
self.mox.ReplayAll()
ret_val = api.lbaas.pool_update(self.request,
pool['pool']['id'], **form_data)
self.assertIsInstance(ret_val, api.lbaas.Pool)
@test.create_stubs({neutronclient: ('create_health_monitor',)})
def test_pool_health_monitor_create(self):
form_data = {'type': 'PING',
'delay': '10',
'timeout': '10',
'max_retries': '10',
'admin_state_up': True
}
monitor = {'health_monitor': {
'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'type': 'PING',
'delay': '10',
'timeout': '10',
'max_retries': '10',
'admin_state_up': True}}
neutronclient.create_health_monitor({
'health_monitor': form_data}).AndReturn(monitor)
self.mox.ReplayAll()
ret_val = api.lbaas.pool_health_monitor_create(
self.request, **form_data)
self.assertIsInstance(ret_val, api.lbaas.PoolMonitor)
@test.create_stubs({neutronclient: ('list_health_monitors',)})
def test_pool_health_monitor_list(self):
monitors = {'health_monitors': [
{'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'type': 'PING',
'delay': '10',
'timeout': '10',
'max_retries': '10',
'http_method': 'GET',
'url_path': '/monitor',
'expected_codes': '200',
'admin_state_up': True}, ]}
neutronclient.list_health_monitors().AndReturn(monitors)
self.mox.ReplayAll()
ret_val = api.lbaas.pool_health_monitor_list(self.request)
for v in ret_val:
self.assertIsInstance(v, api.lbaas.PoolMonitor)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_health_monitor',
'list_pools')})
def test_pool_health_monitor_get(self):
monitor = self.api_monitors.first()
neutronclient.show_health_monitor(
monitor['id']).AndReturn({'health_monitor': monitor})
neutronclient.list_pools(id=[p['pool_id'] for p in monitor['pools']]
).AndReturn({'pools': self.api_pools.list()})
self.mox.ReplayAll()
ret_val = api.lbaas.pool_health_monitor_get(
self.request, monitor['id'])
self.assertIsInstance(ret_val, api.lbaas.PoolMonitor)
self.assertEqual(2, len(ret_val.pools))
self.assertIsInstance(ret_val.pools[0], api.lbaas.Pool)
@test.create_stubs({neutronclient: ('create_member', )})
def test_member_create(self):
form_data = {'pool_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.1.2',
'protocol_port': '80',
'weight': '10',
'admin_state_up': True
}
member = {'member':
{'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'pool_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.1.2',
'protocol_port': '80',
'weight': '10',
'admin_state_up': True}}
neutronclient.create_member({'member': form_data}).AndReturn(member)
self.mox.ReplayAll()
ret_val = api.lbaas.member_create(self.request, **form_data)
self.assertIsInstance(ret_val, api.lbaas.Member)
@test.create_stubs({neutronclient: ('list_members', 'list_pools')})
def test_member_list(self):
members = {'members': self.api_members.list()}
pools = {'pools': self.api_pools.list()}
neutronclient.list_members().AndReturn(members)
neutronclient.list_pools().AndReturn(pools)
self.mox.ReplayAll()
ret_val = api.lbaas.member_list(self.request)
for v in ret_val:
self.assertIsInstance(v, api.lbaas.Member)
self.assertTrue(v.id)
@test.create_stubs({neutronclient: ('show_member', 'show_pool')})
def test_member_get(self):
member = self.members.first()
member_dict = {'member': self.api_members.first()}
pool_dict = {'pool': self.api_pools.first()}
neutronclient.show_member(member.id).AndReturn(member_dict)
neutronclient.show_pool(member.pool_id).AndReturn(pool_dict)
self.mox.ReplayAll()
ret_val = api.lbaas.member_get(self.request, member.id)
self.assertIsInstance(ret_val, api.lbaas.Member)
@test.create_stubs({neutronclient: ('update_member',)})
def test_member_update(self):
form_data = {'pool_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.1.4',
'protocol_port': '80',
'weight': '10',
'admin_state_up': True
}
member = {'member': {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'pool_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'address': '10.0.1.2',
'protocol_port': '80',
'weight': '10',
'admin_state_up': True
}}
neutronclient.update_member(member['member']['id'],
form_data).AndReturn(member)
self.mox.ReplayAll()
ret_val = api.lbaas.member_update(self.request,
member['member']['id'], **form_data)
self.assertIsInstance(ret_val, api.lbaas.Member)
| apache-2.0 |
bhargav2408/python-for-android | python-modules/twisted/twisted/names/test/test_names.py | 49 | 31329 | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.names.
"""
import socket, operator, copy
from twisted.trial import unittest
from twisted.internet import reactor, defer, error
from twisted.internet.defer import succeed
from twisted.names import client, server, common, authority, hosts, dns
from twisted.python import failure
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.names.dns import EFORMAT, ESERVER, ENAME, ENOTIMP, EREFUSED
from twisted.names.dns import Message
from twisted.names.client import Resolver
from twisted.names.test.test_client import StubPort
from twisted.python.compat import reduce
def justPayload(results):
return [r.payload for r in results[0]]
class NoFileAuthority(authority.FileAuthority):
def __init__(self, soa, records):
# Yes, skip FileAuthority
common.ResolverBase.__init__(self)
self.soa, self.records = soa, records
soa_record = dns.Record_SOA(
mname = 'test-domain.com',
rname = 'root.test-domain.com',
serial = 100,
refresh = 1234,
minimum = 7654,
expire = 19283784,
retry = 15,
ttl=1
)
reverse_soa = dns.Record_SOA(
mname = '93.84.28.in-addr.arpa',
rname = '93.84.28.in-addr.arpa',
serial = 120,
refresh = 54321,
minimum = 382,
expire = 11193983,
retry = 30,
ttl=3
)
my_soa = dns.Record_SOA(
mname = 'my-domain.com',
rname = 'postmaster.test-domain.com',
serial = 130,
refresh = 12345,
minimum = 1,
expire = 999999,
retry = 100,
)
test_domain_com = NoFileAuthority(
soa = ('test-domain.com', soa_record),
records = {
'test-domain.com': [
soa_record,
dns.Record_A('127.0.0.1'),
dns.Record_NS('39.28.189.39'),
dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all'),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid'),
dns.Record_MX(10, 'host.test-domain.com'),
dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know'),
dns.Record_CNAME('canonical.name.com'),
dns.Record_MB('mailbox.test-domain.com'),
dns.Record_MG('mail.group.someplace'),
dns.Record_TXT('A First piece of Text', 'a SecoNd piece'),
dns.Record_A6(0, 'ABCD::4321', ''),
dns.Record_A6(12, '0:0069::0', 'some.network.tld'),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net'),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?'),
dns.Record_MR('mail.redirect.or.whatever'),
dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box'),
dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com'),
dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text'),
dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP,
'\x12\x01\x16\xfe\xc1\x00\x01'),
dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:information@domain.tld!"),
dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF')],
'http.tcp.test-domain.com': [
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool')
],
'host.test-domain.com': [
dns.Record_A('123.242.1.5'),
dns.Record_A('0.255.0.255'),
],
'host-two.test-domain.com': [
#
# Python bug
# dns.Record_A('255.255.255.255'),
#
dns.Record_A('255.255.255.254'),
dns.Record_A('0.0.0.0')
],
'cname.test-domain.com': [
dns.Record_CNAME('test-domain.com')
],
'anothertest-domain.com': [
dns.Record_A('1.2.3.4')],
}
)
reverse_domain = NoFileAuthority(
soa = ('93.84.28.in-addr.arpa', reverse_soa),
records = {
'123.93.84.28.in-addr.arpa': [
dns.Record_PTR('test.host-reverse.lookup.com'),
reverse_soa
]
}
)
my_domain_com = NoFileAuthority(
soa = ('my-domain.com', my_soa),
records = {
'my-domain.com': [
my_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')
]
}
)
class ServerDNSTestCase(unittest.TestCase):
"""
Test cases for DNS server and client.
"""
def setUp(self):
self.factory = server.DNSServerFactory([
test_domain_com, reverse_domain, my_domain_com
], verbose=2)
p = dns.DNSDatagramProtocol(self.factory)
while 1:
listenerTCP = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
# It's simpler to do the stop listening with addCleanup,
# even though we might not end up using this TCP port in
# the test (if the listenUDP below fails). Cleaning up
# this TCP port sooner than "cleanup time" would mean
# adding more code to keep track of the Deferred returned
# by stopListening.
self.addCleanup(listenerTCP.stopListening)
port = listenerTCP.getHost().port
try:
listenerUDP = reactor.listenUDP(port, p, interface="127.0.0.1")
except error.CannotListenError:
pass
else:
self.addCleanup(listenerUDP.stopListening)
break
self.listenerTCP = listenerTCP
self.listenerUDP = listenerUDP
self.resolver = client.Resolver(servers=[('127.0.0.1', port)])
def tearDown(self):
"""
Clean up any server connections associated with the
L{DNSServerFactory} created in L{setUp}
"""
# It'd be great if DNSServerFactory had a method that
# encapsulated this task. At least the necessary data is
# available, though.
for conn in self.factory.connections[:]:
conn.transport.loseConnection()
def namesTest(self, d, r):
self.response = None
def setDone(response):
self.response = response
def checkResults(ignored):
if isinstance(self.response, failure.Failure):
raise self.response
results = justPayload(self.response)
assert len(results) == len(r), "%s != %s" % (map(str, results), map(str, r))
for rec in results:
assert rec in r, "%s not in %s" % (rec, map(str, r))
d.addBoth(setDone)
d.addCallback(checkResults)
return d
def testAddressRecord1(self):
"""Test simple DNS 'A' record queries"""
return self.namesTest(
self.resolver.lookupAddress('test-domain.com'),
[dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testAddressRecord2(self):
"""Test DNS 'A' record queries with multiple answers"""
return self.namesTest(
self.resolver.lookupAddress('host.test-domain.com'),
[dns.Record_A('123.242.1.5', ttl=19283784), dns.Record_A('0.255.0.255', ttl=19283784)]
)
def testAddressRecord3(self):
"""Test DNS 'A' record queries with edge cases"""
return self.namesTest(
self.resolver.lookupAddress('host-two.test-domain.com'),
[dns.Record_A('255.255.255.254', ttl=19283784), dns.Record_A('0.0.0.0', ttl=19283784)]
)
def testAuthority(self):
"""Test DNS 'SOA' record queries"""
return self.namesTest(
self.resolver.lookupAuthority('test-domain.com'),
[soa_record]
)
def testMailExchangeRecord(self):
"""Test DNS 'MX' record queries"""
return self.namesTest(
self.resolver.lookupMailExchange('test-domain.com'),
[dns.Record_MX(10, 'host.test-domain.com', ttl=19283784)]
)
def testNameserver(self):
"""Test DNS 'NS' record queries"""
return self.namesTest(
self.resolver.lookupNameservers('test-domain.com'),
[dns.Record_NS('39.28.189.39', ttl=19283784)]
)
def testHINFO(self):
"""Test DNS 'HINFO' record queries"""
return self.namesTest(
self.resolver.lookupHostInfo('test-domain.com'),
[dns.Record_HINFO(os='Linux', cpu='A Fast One, Dontcha know', ttl=19283784)]
)
def testPTR(self):
"""Test DNS 'PTR' record queries"""
return self.namesTest(
self.resolver.lookupPointer('123.93.84.28.in-addr.arpa'),
[dns.Record_PTR('test.host-reverse.lookup.com', ttl=11193983)]
)
def testCNAME(self):
"""Test DNS 'CNAME' record queries"""
return self.namesTest(
self.resolver.lookupCanonicalName('test-domain.com'),
[dns.Record_CNAME('canonical.name.com', ttl=19283784)]
)
def testCNAMEAdditional(self):
"""Test additional processing for CNAME records"""
return self.namesTest(
self.resolver.lookupAddress('cname.test-domain.com'),
[dns.Record_CNAME('test-domain.com', ttl=19283784), dns.Record_A('127.0.0.1', ttl=19283784)]
)
def testMB(self):
"""Test DNS 'MB' record queries"""
return self.namesTest(
self.resolver.lookupMailBox('test-domain.com'),
[dns.Record_MB('mailbox.test-domain.com', ttl=19283784)]
)
def testMG(self):
"""Test DNS 'MG' record queries"""
return self.namesTest(
self.resolver.lookupMailGroup('test-domain.com'),
[dns.Record_MG('mail.group.someplace', ttl=19283784)]
)
def testMR(self):
"""Test DNS 'MR' record queries"""
return self.namesTest(
self.resolver.lookupMailRename('test-domain.com'),
[dns.Record_MR('mail.redirect.or.whatever', ttl=19283784)]
)
def testMINFO(self):
"""Test DNS 'MINFO' record queries"""
return self.namesTest(
self.resolver.lookupMailboxInfo('test-domain.com'),
[dns.Record_MINFO(rmailbx='r mail box', emailbx='e mail box', ttl=19283784)]
)
def testSRV(self):
"""Test DNS 'SRV' record queries"""
return self.namesTest(
self.resolver.lookupService('http.tcp.test-domain.com'),
[dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl=19283784)]
)
def testAFSDB(self):
"""Test DNS 'AFSDB' record queries"""
return self.namesTest(
self.resolver.lookupAFSDatabase('test-domain.com'),
[dns.Record_AFSDB(subtype=1, hostname='afsdb.test-domain.com', ttl=19283784)]
)
def testRP(self):
"""Test DNS 'RP' record queries"""
return self.namesTest(
self.resolver.lookupResponsibility('test-domain.com'),
[dns.Record_RP(mbox='whatever.i.dunno', txt='some.more.text', ttl=19283784)]
)
def testTXT(self):
"""Test DNS 'TXT' record queries"""
return self.namesTest(
self.resolver.lookupText('test-domain.com'),
[dns.Record_TXT('A First piece of Text', 'a SecoNd piece', ttl=19283784),
dns.Record_TXT('Some more text, haha! Yes. \0 Still here?', ttl=19283784)]
)
def test_spf(self):
"""
L{DNSServerFactory} can serve I{SPF} resource records.
"""
return self.namesTest(
self.resolver.lookupSenderPolicy('test-domain.com'),
[dns.Record_SPF('v=spf1 mx/30 mx:example.org/30 -all', ttl=19283784),
dns.Record_SPF('v=spf1 +mx a:\0colo', '.example.com/28 -all not valid', ttl=19283784)]
)
def testWKS(self):
"""Test DNS 'WKS' record queries"""
return self.namesTest(
self.resolver.lookupWellKnownServices('test-domain.com'),
[dns.Record_WKS('12.54.78.12', socket.IPPROTO_TCP, '\x12\x01\x16\xfe\xc1\x00\x01', ttl=19283784)]
)
def testSomeRecordsWithTTLs(self):
result_soa = copy.copy(my_soa)
result_soa.ttl = my_soa.expire
return self.namesTest(
self.resolver.lookupAllRecords('my-domain.com'),
[result_soa,
dns.Record_A('1.2.3.4', ttl='1S'),
dns.Record_NS('ns1.domain', ttl='2M'),
dns.Record_NS('ns2.domain', ttl='3H'),
dns.Record_SRV(257, 16383, 43690, 'some.other.place.fool', ttl='4D')]
)
def testAAAA(self):
"""Test DNS 'AAAA' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupIPV6Address('test-domain.com'),
[dns.Record_AAAA('AF43:5634:1294:AFCB:56AC:48EF:34C3:01FF', ttl=19283784)]
)
def testA6(self):
"""Test DNS 'A6' record queries (IPv6)"""
return self.namesTest(
self.resolver.lookupAddress6('test-domain.com'),
[dns.Record_A6(0, 'ABCD::4321', '', ttl=19283784),
dns.Record_A6(12, '0:0069::0', 'some.network.tld', ttl=19283784),
dns.Record_A6(8, '0:5634:1294:AFCB:56AC:48EF:34C3:01FF', 'tra.la.la.net', ttl=19283784)]
)
def test_zoneTransfer(self):
"""
Test DNS 'AXFR' queries (Zone transfer)
"""
default_ttl = soa_record.expire
results = [copy.copy(r) for r in reduce(operator.add, test_domain_com.records.values())]
for r in results:
if r.ttl is None:
r.ttl = default_ttl
return self.namesTest(
self.resolver.lookupZone('test-domain.com').addCallback(lambda r: (r[0][:-1],)),
results
)
def testSimilarZonesDontInterfere(self):
"""Tests that unrelated zones don't mess with each other."""
return self.namesTest(
self.resolver.lookupAddress("anothertest-domain.com"),
[dns.Record_A('1.2.3.4', ttl=19283784)]
)
def test_NAPTR(self):
"""
Test DNS 'NAPTR' record queries.
"""
return self.namesTest(
self.resolver.lookupNamingAuthorityPointer('test-domain.com'),
[dns.Record_NAPTR(100, 10, "u", "sip+E2U",
"!^.*$!sip:information@domain.tld!",
ttl=19283784)])
class DNSServerFactoryTests(unittest.TestCase):
"""
Tests for L{server.DNSServerFactory}.
"""
def _messageReceivedTest(self, methodName, message):
"""
Assert that the named method is called with the given message when
it is passed to L{DNSServerFactory.messageReceived}.
"""
# Make it appear to have some queries so that
# DNSServerFactory.allowQuery allows it.
message.queries = [None]
receivedMessages = []
def fakeHandler(message, protocol, address):
receivedMessages.append((message, protocol, address))
class FakeProtocol(object):
def writeMessage(self, message):
pass
protocol = FakeProtocol()
factory = server.DNSServerFactory(None)
setattr(factory, methodName, fakeHandler)
factory.messageReceived(message, protocol)
self.assertEqual(receivedMessages, [(message, protocol, None)])
def test_notifyMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_NOTIFY} on to L{DNSServerFactory.handleNotify}.
"""
# RFC 1996, section 4.5
opCode = 4
self._messageReceivedTest('handleNotify', Message(opCode=opCode))
def test_updateMessageReceived(self):
"""
L{DNSServerFactory.messageReceived} passes messages with an opcode
of C{OP_UPDATE} on to L{DNSServerFactory.handleOther}.
This may change if the implementation ever covers update messages.
"""
# RFC 2136, section 1.3
opCode = 5
self._messageReceivedTest('handleOther', Message(opCode=opCode))
def test_connectionTracking(self):
"""
The C{connectionMade} and C{connectionLost} methods of
L{DNSServerFactory} cooperate to keep track of all
L{DNSProtocol} objects created by a factory which are
connected.
"""
protoA, protoB = object(), object()
factory = server.DNSServerFactory()
factory.connectionMade(protoA)
self.assertEqual(factory.connections, [protoA])
factory.connectionMade(protoB)
self.assertEqual(factory.connections, [protoA, protoB])
factory.connectionLost(protoA)
self.assertEqual(factory.connections, [protoB])
factory.connectionLost(protoB)
self.assertEqual(factory.connections, [])
class HelperTestCase(unittest.TestCase):
def testSerialGenerator(self):
f = self.mktemp()
a = authority.getSerial(f)
for i in range(20):
b = authority.getSerial(f)
self.failUnless(a < b)
a = b
class AXFRTest(unittest.TestCase):
def setUp(self):
self.results = None
self.d = defer.Deferred()
self.d.addCallback(self._gotResults)
self.controller = client.AXFRController('fooby.com', self.d)
self.soa = dns.RRHeader(name='fooby.com', type=dns.SOA, cls=dns.IN, ttl=86400, auth=False,
payload=dns.Record_SOA(mname='fooby.com',
rname='hooj.fooby.com',
serial=100,
refresh=200,
retry=300,
expire=400,
minimum=500,
ttl=600))
self.records = [
self.soa,
dns.RRHeader(name='fooby.com', type=dns.NS, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.MX, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_MX(preference=10, exchange='mail.mv3d.com', ttl=700)),
dns.RRHeader(name='fooby.com', type=dns.A, cls=dns.IN, ttl=700, auth=False,
payload=dns.Record_A(address='64.123.27.105', ttl=700)),
self.soa
]
def _makeMessage(self):
# hooray they all have the same message format
return dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1, rCode=0, trunc=0, maxSize=0)
def testBindAndTNamesStyle(self):
# Bind style = One big single message
m = self._makeMessage()
m.queries = [dns.Query('fooby.com', dns.AXFR, dns.IN)]
m.answers = self.records
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
def _gotResults(self, result):
self.results = result
def testDJBStyle(self):
# DJB style = message per record
records = self.records[:]
while records:
m = self._makeMessage()
m.queries = [] # DJB *doesn't* specify any queries.. hmm..
m.answers = [records.pop(0)]
self.controller.messageReceived(m, None)
self.assertEquals(self.results, self.records)
class HostsTestCase(unittest.TestCase):
def setUp(self):
f = open('EtcHosts', 'w')
f.write('''
1.1.1.1 EXAMPLE EXAMPLE.EXAMPLETHING
1.1.1.2 HOOJY
::1 ip6thingy
''')
f.close()
self.resolver = hosts.Resolver('EtcHosts')
def testGetHostByName(self):
data = [('EXAMPLE', '1.1.1.1'),
('EXAMPLE.EXAMPLETHING', '1.1.1.1'),
('HOOJY', '1.1.1.2'),
]
ds = [self.resolver.getHostByName(n).addCallback(self.assertEqual, ip)
for n, ip in data]
return defer.gatherResults(ds)
def testLookupAddress(self):
d = self.resolver.lookupAddress('HOOJY')
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.2'))
return d
def testIPv6(self):
d = self.resolver.lookupIPV6Address('ip6thingy')
d.addCallback(self.assertEqual, '::1')
return d
testIPv6.skip = 'IPv6 support is not in our hosts resolver yet'
def testNotImplemented(self):
return self.assertFailure(self.resolver.lookupMailExchange('EXAMPLE'),
NotImplementedError)
def testQuery(self):
d = self.resolver.query(dns.Query('EXAMPLE'))
d.addCallback(lambda x: self.assertEqual(x[0][0].payload.dottedQuad(),
'1.1.1.1'))
return d
def testNotFound(self):
return self.assertFailure(self.resolver.lookupAddress('foueoa'),
dns.DomainError)
def test_searchFileFor(self):
"""
L{searchFileFor} parses hosts(5) files and returns the address for
the given name, or C{None} if the name is not found.
"""
tmp = self.mktemp()
f = open(tmp, 'w')
f.write('127.0.1.1 helmut.example.org helmut\n')
f.write('# a comment\n')
f.write('::1 localhost ip6-localhost ip6-loopback\n')
f.close()
self.assertEquals(hosts.searchFileFor(tmp, 'helmut'), '127.0.1.1')
self.assertEquals(hosts.searchFileFor(tmp, 'ip6-localhost'), '::1')
self.assertIdentical(hosts.searchFileFor(tmp, 'blah'), None)
class FakeDNSDatagramProtocol(object):
def __init__(self):
self.queries = []
self.transport = StubPort()
def query(self, address, queries, timeout=10, id=None):
self.queries.append((address, queries, timeout, id))
return defer.fail(dns.DNSQueryTimeoutError(queries))
def removeResend(self, id):
# Ignore this for the time being.
pass
class RetryLogic(unittest.TestCase):
testServers = [
'1.2.3.4',
'4.3.2.1',
'a.b.c.d',
'z.y.x.w']
def testRoundRobinBackoff(self):
addrs = [(x, 53) for x in self.testServers]
r = client.Resolver(resolv=None, servers=addrs)
r.protocol = proto = FakeDNSDatagramProtocol()
return r.lookupAddress("foo.example.com"
).addCallback(self._cbRoundRobinBackoff
).addErrback(self._ebRoundRobinBackoff, proto
)
def _cbRoundRobinBackoff(self, result):
raise unittest.FailTest("Lookup address succeeded, should have timed out")
def _ebRoundRobinBackoff(self, failure, fakeProto):
failure.trap(defer.TimeoutError)
# Assert that each server is tried with a particular timeout
# before the timeout is increased and the attempts are repeated.
for t in (1, 3, 11, 45):
tries = fakeProto.queries[:len(self.testServers)]
del fakeProto.queries[:len(self.testServers)]
tries.sort()
expected = list(self.testServers)
expected.sort()
for ((addr, query, timeout, id), expectedAddr) in zip(tries, expected):
self.assertEquals(addr, (expectedAddr, 53))
self.assertEquals(timeout, t)
self.failIf(fakeProto.queries)
class ResolvConfHandling(unittest.TestCase):
def testMissing(self):
resolvConf = self.mktemp()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
def testEmpty(self):
resolvConf = self.mktemp()
fObj = file(resolvConf, 'w')
fObj.close()
r = client.Resolver(resolv=resolvConf)
self.assertEquals(r.dynServers, [('127.0.0.1', 53)])
r._parseCall.cancel()
class FilterAnswersTests(unittest.TestCase):
"""
Test L{twisted.names.client.Resolver.filterAnswers}'s handling of various
error conditions it might encounter.
"""
def setUp(self):
# Create a resolver pointed at an invalid server - we won't be hitting
# the network in any of these tests.
self.resolver = Resolver(servers=[('0.0.0.0', 0)])
def test_truncatedMessage(self):
"""
Test that a truncated message results in an equivalent request made via
TCP.
"""
m = Message(trunc=True)
m.addQuery('example.com')
def queryTCP(queries):
self.assertEqual(queries, m.queries)
response = Message()
response.answers = ['answer']
response.authority = ['authority']
response.additional = ['additional']
return succeed(response)
self.resolver.queryTCP = queryTCP
d = self.resolver.filterAnswers(m)
d.addCallback(
self.assertEqual, (['answer'], ['authority'], ['additional']))
return d
def _rcodeTest(self, rcode, exc):
m = Message(rCode=rcode)
err = self.resolver.filterAnswers(m)
err.trap(exc)
def test_formatError(self):
"""
Test that a message with a result code of C{EFORMAT} results in a
failure wrapped around L{DNSFormatError}.
"""
return self._rcodeTest(EFORMAT, DNSFormatError)
def test_serverError(self):
"""
Like L{test_formatError} but for C{ESERVER}/L{DNSServerError}.
"""
return self._rcodeTest(ESERVER, DNSServerError)
def test_nameError(self):
"""
Like L{test_formatError} but for C{ENAME}/L{DNSNameError}.
"""
return self._rcodeTest(ENAME, DNSNameError)
def test_notImplementedError(self):
"""
Like L{test_formatError} but for C{ENOTIMP}/L{DNSNotImplementedError}.
"""
return self._rcodeTest(ENOTIMP, DNSNotImplementedError)
def test_refusedError(self):
"""
Like L{test_formatError} but for C{EREFUSED}/L{DNSQueryRefusedError}.
"""
return self._rcodeTest(EREFUSED, DNSQueryRefusedError)
def test_refusedErrorUnknown(self):
"""
Like L{test_formatError} but for an unrecognized error code and
L{DNSUnknownError}.
"""
return self._rcodeTest(EREFUSED + 1, DNSUnknownError)
class AuthorityTests(unittest.TestCase):
"""
Tests for the basic response record selection code in L{FileAuthority}
(independent of its fileness).
"""
def test_recordMissing(self):
"""
If a L{FileAuthority} has a zone which includes an I{NS} record for a
particular name and that authority is asked for another record for the
same name which does not exist, the I{NS} record is not included in the
authority section of the response.
"""
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
str(soa_record.mname): [
soa_record,
dns.Record_NS('1.2.3.4'),
]})
d = authority.lookupAddress(str(soa_record.mname))
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [
dns.RRHeader(
str(soa_record.mname), soa_record.TYPE,
ttl=soa_record.expire, payload=soa_record,
auth=True)])
self.assertEquals(additional, [])
def _referralTest(self, method):
"""
Create an authority and make a request against it. Then verify that the
result is a referral, including no records in the answers or additional
sections, but with an I{NS} record in the authority section.
"""
subdomain = 'example.' + str(soa_record.mname)
nameserver = dns.Record_NS('1.2.3.4')
authority = NoFileAuthority(
soa=(str(soa_record.mname), soa_record),
records={
subdomain: [
nameserver,
]})
d = getattr(authority, method)(subdomain)
result = []
d.addCallback(result.append)
answer, authority, additional = result[0]
self.assertEquals(answer, [])
self.assertEquals(
authority, [dns.RRHeader(
subdomain, dns.NS, ttl=soa_record.expire,
payload=nameserver, auth=False)])
self.assertEquals(additional, [])
def test_referral(self):
"""
When an I{NS} record is found for a child zone, it is included in the
authority section of the response. It is marked as non-authoritative if
the authority is not also authoritative for the child zone (RFC 2181,
section 6.1).
"""
self._referralTest('lookupAddress')
def test_allRecordsReferral(self):
"""
A referral is also generated for a request of type C{ALL_RECORDS}.
"""
self._referralTest('lookupAllRecords')
class NoInitialResponseTestCase(unittest.TestCase):
def test_no_answer(self):
"""
If a request returns a L{dns.NS} response, but we can't connect to the
given server, the request fails with the error returned at connection.
"""
def query(self, *args):
# Pop from the message list, so that it blows up if more queries
# are run than expected.
return succeed(messages.pop(0))
def queryProtocol(self, *args, **kwargs):
return defer.fail(socket.gaierror("Couldn't connect"))
resolver = Resolver(servers=[('0.0.0.0', 0)])
resolver._query = query
messages = []
# Let's patch dns.DNSDatagramProtocol.query, as there is no easy way to
# customize it.
self.patch(dns.DNSDatagramProtocol, "query", queryProtocol)
records = [
dns.RRHeader(name='fooba.com', type=dns.NS, cls=dns.IN, ttl=700,
auth=False,
payload=dns.Record_NS(name='ns.twistedmatrix.com',
ttl=700))]
m = dns.Message(id=999, answer=1, opCode=0, recDes=0, recAv=1, auth=1,
rCode=0, trunc=0, maxSize=0)
m.answers = records
messages.append(m)
return self.assertFailure(
resolver.getHostByName("fooby.com"), socket.gaierror)
| apache-2.0 |
Tigerwhit4/taiga-back | taiga/auth/backends.py | 18 | 2934 | # Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Authentication backends for rest framework.
This module exposes two backends: session and token.
The first (session) is a modified version of standard
session authentication backend of restframework with
csrf token disabled.
And the second (token) implements own version of oauth2
like authentiacation but with selfcontained tokens. Thats
makes authentication totally stateles.
It uses django signing framework for create new
selfcontained tokens. This trust tokes from external
fraudulent modifications.
"""
import re
from django.conf import settings
from taiga.base.api.authentication import BaseAuthentication
from .tokens import get_user_for_token
class Session(BaseAuthentication):
"""
Session based authentication like the standard
`taiga.base.api.authentication.SessionAuthentication`
but with csrf disabled (for obvious reasons because
it is for api.
NOTE: this is only for api web interface. Is not used
for common api usage and should be disabled on production.
"""
def authenticate(self, request):
http_request = request._request
user = getattr(http_request, 'user', None)
if not user or not user.is_active:
return None
return (user, None)
class Token(BaseAuthentication):
"""
Self-contained stateles authentication implementatrion
that work similar to oauth2.
It uses django signing framework for trust data stored
in the token.
"""
auth_rx = re.compile(r"^Bearer (.+)$")
def authenticate(self, request):
if "HTTP_AUTHORIZATION" not in request.META:
return None
token_rx_match = self.auth_rx.search(request.META["HTTP_AUTHORIZATION"])
if not token_rx_match:
return None
token = token_rx_match.group(1)
max_age_auth_token = getattr(settings, "MAX_AGE_AUTH_TOKEN", None)
user = get_user_for_token(token, "authentication",
max_age=max_age_auth_token)
return (user, token)
def authenticate_header(self, request):
return 'Bearer realm="api"'
| agpl-3.0 |
The-Fonz/adventure-track | backend/adventures/db.py | 1 | 6697 | import sys
import asyncio
import asyncpg
import unittest
import datetime
from os import environ
import jsonschema
from ..schemas import JSON_SCHEMA_LOCATION_GPS_POINT
from ..utils import db_test_case_factory, record_to_dict, records_to_dict, convert_to_datetime, getLogger, MicroserviceDb, friendlyhash
logger = getLogger('adventures.db')
SQL_CREATE_TABLE_ADVENTURES = '''
CREATE TABLE adventures
(
id SERIAL PRIMARY KEY,
name VARCHAR(255),
created TIMESTAMP,
-- User position/content will be visible after start, until stop
start TIMESTAMP,
-- Null if open-ended
stop TIMESTAMP,
description TEXT,
-- Friendly identifier for links
url_hash CHAR(8) UNIQUE,
-- Specific tracking stuff or similar
header_includes TEXT,
-- URL to image for showing on social media etc.
preview_img VARCHAR(255)
);
CREATE TABLE adventure_logos
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
name VARCHAR(255),
url VARCHAR(255),
imgsrc VARCHAR(255)
);
CREATE TYPE user_adventure_role AS ENUM ('athlete', 'content_creator');
CREATE TABLE adventures_users_link
(
id SERIAL PRIMARY KEY,
adventure_id INTEGER REFERENCES adventures(id),
-- Is owned by other microservice so no db constraint
user_id INTEGER,
role user_adventure_role
);
-- Fast joins
CREATE INDEX adventures_users_link_adventure_id_index ON adventures_users_link(adventure_id);
CREATE INDEX adventures_users_link_user_id_index ON adventures_users_link(user_id);
'''
class Db(MicroserviceDb):
async def create_tables(self):
return await self.pool.execute(SQL_CREATE_TABLE_ADVENTURES)
async def insert_adventure(self, adv):
created = datetime.datetime.utcnow()
url_hash = await friendlyhash()
id = await self.pool.fetchval('''
INSERT INTO adventures (id, name, created, start, stop, description, url_hash) VALUES (DEFAULT, $1, $2, $3, $4, $5, $6)
RETURNING id;
''', adv['name'], created, adv.get('start'), adv.get('stop'), adv.get('description', None), url_hash)
return id
async def get_adventures(self, limit=100):
"Get adventures sorted by creation datetime"
# Order descending by stop datetime first, with open-ended first,
# then order equal datetimes (all nulls) by start date
recs = await self.pool.fetch('''
SELECT * FROM adventures ORDER BY stop DESC NULLS FIRST, start DESC LIMIT $1;
''', limit)
return await records_to_dict(recs)
async def get_adventure_by_id(self, adv_id):
"Get adventure and logos"
rec = await self.pool.fetchrow('SELECT * FROM adventures WHERE id = $1;', adv_id)
out = await record_to_dict(rec)
recs_logos = await self.pool.fetch('SELECT * FROM adventure_logos WHERE adventure_id = $1;', adv_id);
out['logos'] = await records_to_dict(recs_logos)
print(out)
return out
async def get_adventure_by_hash(self, adv_hash):
# First look up id
adv_id = await self.pool.fetchval('SELECT id FROM adventures WHERE url_hash = $1;', adv_hash)
if not adv_id:
return None
# Now get by id, to make use of any joins that get_adventure_by_id does
return await self.get_adventure_by_id(adv_id)
async def insert_adventure_user_link(self, link):
id = await self.pool.fetchval('''
INSERT INTO adventures_users_link (adventure_id, user_id, role) VALUES ($1, $2, $3) RETURNING id;
''', link['adventure_id'], link['user_id'], link.get('role', 'athlete'))
return id
async def get_adventure_user_link(self, link_id):
rec = await self.pool.fetchrow('''SELECT * FROM adventures_users_link WHERE id = $1;''', link_id)
return await record_to_dict(rec)
async def get_adventure_user_links_by_user_id(self, user_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE user_id=$1;''', user_id)
if not recs:
return None
return await records_to_dict(recs)
async def get_adventure_links(self, adv_id):
recs = await self.pool.fetch('''SELECT * FROM adventures_users_link WHERE adventure_id = $1;''', adv_id)
if not recs:
return None
return await records_to_dict(recs)
class AdventuresDbTestCase(db_test_case_factory(Db)):
adv = {
'name': 'TestAdventure',
'description': 'Hi there!'
}
link = {
'adventure_id': -1,
'user_id': -100
}
def test_insert_and_retrieve(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
self.assertIsInstance(adv_id, int)
adv_db = self.lru(self.db.get_adventure_by_id(adv_id))
self.assertEqual(len(adv_db['url_hash']), 8)
self.assertDictContainsSubset(self.adv, adv_db)
# Can fail if someone else inserted adventure in the meantime, but very unlikely
adv_recent = self.lru(self.db.get_adventures())[0]
self.assertDictContainsSubset(self.adv, adv_recent)
def test_link_fail(self):
self.assertRaises(asyncpg.exceptions.ForeignKeyViolationError,
self.awrap(self.db.insert_adventure_user_link), self.link)
def test_link(self):
adv_id = self.lru(self.db.insert_adventure(self.adv))
link = self.link.copy()
link['adventure_id'] = adv_id
link_id = self.lru(self.db.insert_adventure_user_link(link))
self.assertIsInstance(link_id, int)
link_retrieved = self.lru(self.db.get_adventure_user_link(link_id))
# Test default
self.assertEqual(link_retrieved['role'], 'athlete')
# Test retrieving via adventure
links = self.lru(self.db.get_adventure_links(adv_id))
self.assertEqual(len(links), 1)
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--create', action='store_true',
help="Create db tables and indexes")
parser.add_argument('--test', action='store_true',
help="Test db")
args = parser.parse_args()
if args.create:
l = asyncio.get_event_loop()
db = l.run_until_complete(Db.create())
l.run_until_complete(db.create_tables())
if args.test:
# Pass only system name, ignore other args
unittest.main(verbosity=1, argv=sys.argv[:1])
| mit |
tiagocoutinho/qarbon | qarbon/qt/designer/plugins/base.py | 1 | 5728 | # ----------------------------------------------------------------------------
# This file is part of qarbon (http://qarbon.rtfd.org/)
#
# Copyright (c) 2013 European Synchrotron Radiation Facility, Grenoble, France
#
# Distributed under the terms of the GNU Lesser General Public License,
# either version 3 of the License, or (at your option) any later version.
# See LICENSE.txt for more info.
# ----------------------------------------------------------------------------
"""internal module defining base WidgetPlugin class to interface between
qarbon and QtDesigner"""
__all__ = ["DesignerBaseWidgetPlugin", "DesignerBaseContainerExtension"]
import inspect
from qarbon import log
from qarbon.external.qt import QtGui, QtDesigner
from qarbon.qt.gui.icon import getIcon
from qarbon.qt.designer.plugins.factory import ExtensionType, \
QarbonWidgetExtensionFactory, registerExtension
#
# Class is hidden to prevent QtDesigner from instantiating it
#
class DesignerBaseWidgetPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
"""Qarbon"s custom base widget plugin class for QtDesginer"""
def __init__(self, parent=None):
QtDesigner.QPyDesignerCustomWidgetPlugin.__init__(self)
self.__initialized = False
def initialize(self, formEditor):
"""Overwrite if necessary. Don"t forget to call this method in case you
want the generic extensions in your widget."""
if self.isInitialized():
return
if self.isContainer():
log.debug("[start] registering container extension for %s...",
self.name())
container_extension = self.getContainerExtensionClass()
registerExtension(ExtensionType.ContainerExtension,
self.getWidgetClass(),
container_extension)
manager = formEditor.extensionManager()
self.__extension_factory = QarbonWidgetExtensionFactory(manager)
manager.registerExtensions(self.__extension_factory,
ExtensionType.ContainerExtension.value)
log.debug("[ done] registering container extension for %s",
self.name())
self.__initialized = True
def isInitialized(self):
return self.__initialized
def getWidgetClass(self):
return self.WidgetClass
def _getWidgetClassName(self):
return self.getWidgetClass().__name__
def createWidget(self, parent):
try:
w = self.getWidgetClass()(parent=parent)
except Exception:
log.error("Designer plugin error creating %s " \
"(see debug stream for details)", self.name())
log.debug("Details:", exc_info=1)
w = None
return w
def getWidgetInfo(self, key, dft=None):
if not hasattr(self, "_widgetInfo"):
self._widgetInfo = self.getWidgetClass().getQtDesignerPluginInfo()
return self._widgetInfo.get(key, dft)
def name(self):
return self._getWidgetClassName()
def group(self):
return self.getWidgetInfo("group", "Qarbon Widgets")
def getIconName(self):
return self.getWidgetInfo("icon")
def icon(self):
return getIcon(self.getIconName())
def domXml(self):
name = str(self.name())
lowerName = name[0].lower() + name[1:]
return """<widget class="%s" name="%s" />\n""" % (name, lowerName)
def includeFile(self):
return inspect.getmodule(self.getWidgetClass()).__name__
def toolTip(self):
tooltip = self.getWidgetInfo("tooltip")
if tooltip is None:
tooltip = "A %s" % self._getWidgetClassName()
return tooltip
def whatsThis(self):
whatsthis = self.getWidgetInfo("whatsthis")
if whatsthis is None:
whatsthis = "This is a %s widget" % self._getWidgetClassName()
return whatsthis
def isContainer(self):
return self.getWidgetInfo("container", False)
def getContainerExtensionClass(self):
return self.getWidgetInfo("container_extension",
DesignerBaseContainerExtension)
class DesignerBaseContainerExtension(QtDesigner.QPyDesignerContainerExtension):
def __init__(self, widget, parent=None):
super(DesignerBaseContainerExtension, self).__init__(parent)
self.__widget = widget
def pluginWidget(self):
return self.__widget
class DesignerBaseSingleContainerExtension(DesignerBaseContainerExtension):
def __init__(self, widget, parent=None):
super(DesignerBaseSingleContainerExtension, self).__init__(widget,
parent=parent)
self.__content_widget = None
def addWidget(self, widget):
if self.count() > 0:
QtGui.QMessageBox.warning(None, "Error adding page",
"Cannot have more than one page",
buttons=QtGui.QMessageBox.Ok,
defaultButton=QtGui.QMessageBox.Ok)
return
self.pluginWidget().setContent(widget)
self.__content_widget = widget
def count(self):
if self.__content_widget is None:
return 0
return 1
def currentIndex(self):
if self.count() > 0:
return 0
return -1
def insertWidget(self, index, widget):
self.addWidget(widget)
def remove(self, index):
self.pluginWidget().setContent(None)
def setCurrentIndex(self, index):
pass
def widget(self, index):
return self.__content_widget
| lgpl-3.0 |
anuruddhal/jinja2 | tests/test_ext.py | 22 | 17823 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.ext
~~~~~~~~~~~~~~~~~~~~
Tests for the extensions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import pytest
from jinja2 import Environment, DictLoader, contextfunction, nodes
from jinja2.exceptions import TemplateAssertionError
from jinja2.ext import Extension
from jinja2.lexer import Token, count_newlines
from jinja2._compat import BytesIO, itervalues, text_type
importable_object = 23
_gettext_re = re.compile(r'_\((.*?)\)(?s)')
i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'plural2.html': '{% trans user_count=get_user_count() %}{{ user_count }}s'
'{% pluralize %}{{ user_count }}p{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s")|format(num=user_count) }}'
}
newstyle_i18n_templates = {
'master.html': '<title>{{ page_title|default(_("missing")) }}</title>'
'{% block body %}{% endblock %}',
'child.html': '{% extends "master.html" %}{% block body %}'
'{% trans %}watch out{% endtrans %}{% endblock %}',
'plural.html': '{% trans user_count %}One user online{% pluralize %}'
'{{ user_count }} users online{% endtrans %}',
'stringformat.html': '{{ _("User: %(num)s", num=user_count) }}',
'ngettext.html': '{{ ngettext("%(num)s apple", "%(num)s apples", apples) }}',
'ngettext_long.html': '{% trans num=apples %}{{ num }} apple{% pluralize %}'
'{{ num }} apples{% endtrans %}',
'transvars1.html': '{% trans %}User: {{ num }}{% endtrans %}',
'transvars2.html': '{% trans num=count %}User: {{ num }}{% endtrans %}',
'transvars3.html': '{% trans count=num %}User: {{ count }}{% endtrans %}',
'novars.html': '{% trans %}%(hello)s{% endtrans %}',
'vars.html': '{% trans %}{{ foo }}%(foo)s{% endtrans %}',
'explicitvars.html': '{% trans foo="42" %}%(foo)s{% endtrans %}'
}
languages = {
'de': {
'missing': u'fehlend',
'watch out': u'pass auf',
'One user online': u'Ein Benutzer online',
'%(user_count)s users online': u'%(user_count)s Benutzer online',
'User: %(num)s': u'Benutzer: %(num)s',
'User: %(count)s': u'Benutzer: %(count)s',
'%(num)s apple': u'%(num)s Apfel',
'%(num)s apples': u'%(num)s Äpfel'
}
}
@contextfunction
def gettext(context, string):
language = context.get('LANGUAGE', 'en')
return languages.get(language, {}).get(string, string)
@contextfunction
def ngettext(context, s, p, n):
language = context.get('LANGUAGE', 'en')
if n != 1:
return languages.get(language, {}).get(p, p)
return languages.get(language, {}).get(s, s)
i18n_env = Environment(
loader=DictLoader(i18n_templates),
extensions=['jinja2.ext.i18n']
)
i18n_env.globals.update({
'_': gettext,
'gettext': gettext,
'ngettext': ngettext
})
newstyle_i18n_env = Environment(
loader=DictLoader(newstyle_i18n_templates),
extensions=['jinja2.ext.i18n']
)
newstyle_i18n_env.install_gettext_callables(gettext, ngettext, newstyle=True)
class TestExtension(Extension):
tags = set(['test'])
ext_attr = 42
def parse(self, parser):
return nodes.Output([self.call_method('_dump', [
nodes.EnvironmentAttribute('sandboxed'),
self.attr('ext_attr'),
nodes.ImportedName(__name__ + '.importable_object'),
nodes.ContextReference()
])]).set_lineno(next(parser.stream).lineno)
def _dump(self, sandboxed, ext_attr, imported_object, context):
return '%s|%s|%s|%s' % (
sandboxed,
ext_attr,
imported_object,
context.blocks
)
class PreprocessorExtension(Extension):
def preprocess(self, source, name, filename=None):
return source.replace('[[TEST]]', '({{ foo }})')
class StreamFilterExtension(Extension):
def filter_stream(self, stream):
for token in stream:
if token.type == 'data':
for t in self.interpolate(token):
yield t
else:
yield token
def interpolate(self, token):
pos = 0
end = len(token.value)
lineno = token.lineno
while 1:
match = _gettext_re.search(token.value, pos)
if match is None:
break
value = token.value[pos:match.start()]
if value:
yield Token(lineno, 'data', value)
lineno += count_newlines(token.value)
yield Token(lineno, 'variable_begin', None)
yield Token(lineno, 'name', 'gettext')
yield Token(lineno, 'lparen', None)
yield Token(lineno, 'string', match.group(1))
yield Token(lineno, 'rparen', None)
yield Token(lineno, 'variable_end', None)
pos = match.end()
if pos < end:
yield Token(lineno, 'data', token.value[pos:])
@pytest.mark.ext
class TestExtensions():
def test_extend_late(self):
env = Environment()
env.add_extension('jinja2.ext.autoescape')
t = env.from_string(
'{% autoescape true %}{{ "<test>" }}{% endautoescape %}')
assert t.render() == '<test>'
def test_loop_controls(self):
env = Environment(extensions=['jinja2.ext.loopcontrols'])
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item % 2 == 0 %}{% continue %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '13'
tmpl = env.from_string('''
{%- for item in [1, 2, 3, 4] %}
{%- if item > 2 %}{% break %}{% endif -%}
{{ item }}
{%- endfor %}''')
assert tmpl.render() == '12'
def test_do(self):
env = Environment(extensions=['jinja2.ext.do'])
tmpl = env.from_string('''
{%- set items = [] %}
{%- for char in "foo" %}
{%- do items.append(loop.index0 ~ char) %}
{%- endfor %}{{ items|join(', ') }}''')
assert tmpl.render() == '0f, 1o, 2o'
def test_with(self):
env = Environment(extensions=['jinja2.ext.with_'])
tmpl = env.from_string('''\
{% with a=42, b=23 -%}
{{ a }} = {{ b }}
{% endwith -%}
{{ a }} = {{ b }}\
''')
assert [x.strip() for x in tmpl.render(a=1, b=2).splitlines()] \
== ['42 = 23', '1 = 2']
def test_extension_nodes(self):
env = Environment(extensions=[TestExtension])
tmpl = env.from_string('{% test %}')
assert tmpl.render() == 'False|42|23|{}'
def test_identifier(self):
assert TestExtension.identifier == __name__ + '.TestExtension'
def test_rebinding(self):
original = Environment(extensions=[TestExtension])
overlay = original.overlay()
for env in original, overlay:
for ext in itervalues(env.extensions):
assert ext.environment is env
def test_preprocessor_extension(self):
env = Environment(extensions=[PreprocessorExtension])
tmpl = env.from_string('{[[TEST]]}')
assert tmpl.render(foo=42) == '{(42)}'
def test_streamfilter_extension(self):
env = Environment(extensions=[StreamFilterExtension])
env.globals['gettext'] = lambda x: x.upper()
tmpl = env.from_string('Foo _(bar) Baz')
out = tmpl.render()
assert out == 'Foo BAR Baz'
def test_extension_ordering(self):
class T1(Extension):
priority = 1
class T2(Extension):
priority = 2
env = Environment(extensions=[T1, T2])
ext = list(env.iter_extensions())
assert ext[0].__class__ is T1
assert ext[1].__class__ is T2
@pytest.mark.ext
class TestInternationalization():
def test_trans(self):
tmpl = i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_trans_plural_with_functions(self):
tmpl = i18n_env.get_template('plural2.html')
def get_user_count():
get_user_count.called += 1
return 1
get_user_count.called = 0
assert tmpl.render(LANGUAGE='de', get_user_count=get_user_count) \
== '1s'
assert get_user_count.called == 1
def test_complex_plural(self):
tmpl = i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('ascii')) # make python 3 happy
assert list(babel_extract(source,
('gettext', 'ngettext', '_'), [], {})) == [
(2, 'gettext', u'Hello World', []),
(3, 'gettext', u'Hello World', []),
(4, 'ngettext', (u'%(users)s user', u'%(users)s users', None), [])
]
def test_comment_extract(self):
from jinja2.ext import babel_extract
source = BytesIO('''
{# trans first #}
{{ gettext('Hello World') }}
{% trans %}Hello World{% endtrans %}{# trans second #}
{#: third #}
{% trans %}{{ users }} user{% pluralize %}{{ users }} users{% endtrans %}
'''.encode('utf-8')) # make python 3 happy
assert list(babel_extract(source,
('gettext', 'ngettext', '_'),
['trans', ':'], {})) == [
(3, 'gettext', u'Hello World', ['first']),
(4, 'gettext', u'Hello World', ['second']),
(6, 'ngettext', (u'%(users)s user', u'%(users)s users', None),
['third'])
]
@pytest.mark.ext
class TestNewstyleInternationalization():
def test_trans(self):
tmpl = newstyle_i18n_env.get_template('child.html')
assert tmpl.render(LANGUAGE='de') == '<title>fehlend</title>pass auf'
def test_trans_plural(self):
tmpl = newstyle_i18n_env.get_template('plural.html')
assert tmpl.render(LANGUAGE='de', user_count=1) \
== 'Ein Benutzer online'
assert tmpl.render(LANGUAGE='de', user_count=2) == '2 Benutzer online'
def test_complex_plural(self):
tmpl = newstyle_i18n_env.from_string(
'{% trans foo=42, count=2 %}{{ count }} item{% '
'pluralize count %}{{ count }} items{% endtrans %}')
assert tmpl.render() == '2 items'
pytest.raises(TemplateAssertionError, i18n_env.from_string,
'{% trans foo %}...{% pluralize bar %}...{% endtrans %}')
def test_trans_stringformatting(self):
tmpl = newstyle_i18n_env.get_template('stringformat.html')
assert tmpl.render(LANGUAGE='de', user_count=5) == 'Benutzer: 5'
def test_newstyle_plural(self):
tmpl = newstyle_i18n_env.get_template('ngettext.html')
assert tmpl.render(LANGUAGE='de', apples=1) == '1 Apfel'
assert tmpl.render(LANGUAGE='de', apples=5) == u'5 Äpfel'
def test_autoescape_support(self):
env = Environment(extensions=['jinja2.ext.autoescape',
'jinja2.ext.i18n'])
env.install_gettext_callables(
lambda x: u'<strong>Wert: %(name)s</strong>',
lambda s, p, n: s, newstyle=True)
t = env.from_string('{% autoescape ae %}{{ gettext("foo", name='
'"<test>") }}{% endautoescape %}')
assert t.render(ae=True) == '<strong>Wert: <test></strong>'
assert t.render(ae=False) == '<strong>Wert: <test></strong>'
def test_num_used_twice(self):
tmpl = newstyle_i18n_env.get_template('ngettext_long.html')
assert tmpl.render(apples=5, LANGUAGE='de') == u'5 Äpfel'
def test_num_called_num(self):
source = newstyle_i18n_env.compile('''
{% trans num=3 %}{{ num }} apple{% pluralize
%}{{ num }} apples{% endtrans %}
''', raw=True)
# quite hacky, but the only way to properly test that. The idea is
# that the generated code does not pass num twice (although that
# would work) for better performance. This only works on the
# newstyle gettext of course
assert re.search(r"l_ngettext, u?'\%\(num\)s apple', u?'\%\(num\)s "
r"apples', 3", source) is not None
def test_trans_vars(self):
t1 = newstyle_i18n_env.get_template('transvars1.html')
t2 = newstyle_i18n_env.get_template('transvars2.html')
t3 = newstyle_i18n_env.get_template('transvars3.html')
assert t1.render(num=1, LANGUAGE='de') == 'Benutzer: 1'
assert t2.render(count=23, LANGUAGE='de') == 'Benutzer: 23'
assert t3.render(num=42, LANGUAGE='de') == 'Benutzer: 42'
def test_novars_vars_escaping(self):
t = newstyle_i18n_env.get_template('novars.html')
assert t.render() == '%(hello)s'
t = newstyle_i18n_env.get_template('vars.html')
assert t.render(foo='42') == '42%(foo)s'
t = newstyle_i18n_env.get_template('explicitvars.html')
assert t.render() == '%(foo)s'
@pytest.mark.ext
class TestAutoEscape():
def test_scoped_setting(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape false %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=False)
tmpl = env.from_string('''
{{ "<HelloWorld>" }}
{% autoescape true %}
{{ "<HelloWorld>" }}
{% endautoescape %}
{{ "<HelloWorld>" }}
''')
assert tmpl.render().split() == \
[u'<HelloWorld>', u'<HelloWorld>', u'<HelloWorld>']
def test_nonvolatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{{ {"foo": "<test>"}|xmlattr|escape }}')
assert tmpl.render() == ' foo="<test>"'
tmpl = env.from_string('{% autoescape false %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render() == ' foo="&lt;test&gt;"'
def test_volatile(self):
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
tmpl = env.from_string('{% autoescape foo %}{{ {"foo": "<test>"}'
'|xmlattr|escape }}{% endautoescape %}')
assert tmpl.render(foo=False) == ' foo="&lt;test&gt;"'
assert tmpl.render(foo=True) == ' foo="<test>"'
def test_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmpl = env.from_string(
'{% autoescape true %}{% set x = "<x>" %}{{ x }}'
'{% endautoescape %}{{ x }}{{ "<y>" }}')
assert tmpl.render(x=1) == '<x>1<y>'
def test_volatile_scoping(self):
env = Environment(extensions=['jinja2.ext.autoescape'])
tmplsource = '''
{% autoescape val %}
{% macro foo(x) %}
[{{ x }}]
{% endmacro %}
{{ foo().__class__.__name__ }}
{% endautoescape %}
{{ '<testing>' }}
'''
tmpl = env.from_string(tmplsource)
assert tmpl.render(val=True).split()[0] == 'Markup'
assert tmpl.render(val=False).split()[0] == text_type.__name__
# looking at the source we should see <testing> there in raw
# (and then escaped as well)
env = Environment(extensions=['jinja2.ext.autoescape'])
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
env = Environment(extensions=['jinja2.ext.autoescape'],
autoescape=True)
pysource = env.compile(tmplsource, raw=True)
assert '<testing>\\n' in pysource
| bsd-3-clause |
bud4/samba | source4/dsdb/tests/python/ldap_syntaxes.py | 8 | 16098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tests for LDAP syntaxes
import optparse
import sys
import time
import random
import uuid
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import SubunitOptions, TestProgram
import samba.getopt as options
from samba.auth import system_session
from ldb import SCOPE_BASE, SCOPE_SUBTREE, LdbError
from ldb import ERR_CONSTRAINT_VIOLATION
from ldb import ERR_INVALID_ATTRIBUTE_SYNTAX
from ldb import ERR_ENTRY_ALREADY_EXISTS
import samba.tests
parser = optparse.OptionParser("ldap_syntaxes.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args[0]
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
class SyntaxTests(samba.tests.TestCase):
def setUp(self):
super(SyntaxTests, self).setUp()
self.ldb = samba.tests.connect_samdb(host, credentials=creds,
session_info=system_session(lp), lp=lp)
self.base_dn = self.ldb.domain_dn()
self.schema_dn = self.ldb.get_schema_basedn().get_linearized()
self._setup_dn_string_test()
self._setup_dn_binary_test()
def _setup_dn_string_test(self):
"""Testing DN+String syntax"""
attr_name = "test-Attr-DN-String" + time.strftime("%s", time.gmtime())
attr_ldap_display_name = attr_name.replace("-", "")
ldif = """
dn: CN=%s,%s""" % (attr_name, self.schema_dn) + """
ldapDisplayName: """ + attr_ldap_display_name + """
objectClass: top
objectClass: attributeSchema
cn: """ + attr_name + """
attributeId: 1.3.6.1.4.1.7165.4.6.1.1.""" + str(random.randint(1,100000)) + """
attributeSyntax: 2.5.5.14
omSyntax: 127
omObjectClass: \x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0C
isSingleValued: FALSE
schemaIdGuid: """ + str(uuid.uuid4()) + """
systemOnly: FALSE
"""
self.ldb.add_ldif(ldif)
# search for created attribute
res = []
res = self.ldb.search("cn=%s,%s" % (attr_name, self.schema_dn), scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["lDAPDisplayName"][0], attr_ldap_display_name)
self.assertTrue("schemaIDGUID" in res[0])
class_name = "test-Class-DN-String" + time.strftime("%s", time.gmtime())
class_ldap_display_name = class_name.replace("-", "")
ldif = """
dn: CN=%s,%s""" % (class_name, self.schema_dn) + """
objectClass: top
objectClass: classSchema
adminDescription: """ + class_name + """
adminDisplayName: """ + class_name + """
cn: """ + class_name + """
governsId: 1.3.6.1.4.1.7165.4.6.2.1.""" + str(random.randint(1,100000)) + """
schemaIdGuid: """ + str(uuid.uuid4()) + """
objectClassCategory: 1
subClassOf: organizationalPerson
systemMayContain: """ + attr_ldap_display_name + """
systemOnly: FALSE
"""
self.ldb.add_ldif(ldif)
# search for created objectclass
res = []
res = self.ldb.search("cn=%s,%s" % (class_name, self.schema_dn), scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["lDAPDisplayName"][0], class_ldap_display_name)
self.assertEquals(res[0]["defaultObjectCategory"][0], res[0]["distinguishedName"][0])
self.assertTrue("schemaIDGUID" in res[0])
# store the class and the attribute
self.dn_string_class_ldap_display_name = class_ldap_display_name
self.dn_string_attribute = attr_ldap_display_name
self.dn_string_class_name = class_name
def _setup_dn_binary_test(self):
"""Testing DN+Binary syntaxes"""
attr_name = "test-Attr-DN-Binary" + time.strftime("%s", time.gmtime())
attr_ldap_display_name = attr_name.replace("-", "")
ldif = """
dn: CN=%s,%s""" % (attr_name, self.schema_dn) + """
ldapDisplayName: """ + attr_ldap_display_name + """
objectClass: top
objectClass: attributeSchema
cn: """ + attr_name + """
attributeId: 1.3.6.1.4.1.7165.4.6.1.2.""" + str(random.randint(1,100000)) + """
attributeSyntax: 2.5.5.7
omSyntax: 127
omObjectClass: \x2A\x86\x48\x86\xF7\x14\x01\x01\x01\x0B
isSingleValued: FALSE
schemaIdGuid: """ + str(uuid.uuid4()) + """
systemOnly: FALSE
"""
self.ldb.add_ldif(ldif)
# search for created attribute
res = []
res = self.ldb.search("cn=%s,%s" % (attr_name, self.schema_dn), scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["lDAPDisplayName"][0], attr_ldap_display_name)
self.assertTrue("schemaIDGUID" in res[0])
class_name = "test-Class-DN-Binary" + time.strftime("%s", time.gmtime())
class_ldap_display_name = class_name.replace("-", "")
ldif = """
dn: CN=%s,%s""" % (class_name, self.schema_dn) + """
objectClass: top
objectClass: classSchema
adminDescription: """ + class_name + """
adminDisplayName: """ + class_name + """
cn: """ + class_name + """
governsId: 1.3.6.1.4.1.7165.4.6.2.2.""" + str(random.randint(1,100000)) + """
schemaIdGuid: """ + str(uuid.uuid4()) + """
objectClassCategory: 1
subClassOf: organizationalPerson
systemMayContain: """ + attr_ldap_display_name + """
systemOnly: FALSE
"""
self.ldb.add_ldif(ldif)
# search for created objectclass
res = []
res = self.ldb.search("cn=%s,%s" % (class_name, self.schema_dn), scope=SCOPE_BASE, attrs=["*"])
self.assertEquals(len(res), 1)
self.assertEquals(res[0]["lDAPDisplayName"][0], class_ldap_display_name)
self.assertEquals(res[0]["defaultObjectCategory"][0], res[0]["distinguishedName"][0])
self.assertTrue("schemaIDGUID" in res[0])
# store the class and the attribute
self.dn_binary_class_ldap_display_name = class_ldap_display_name
self.dn_binary_attribute = attr_ldap_display_name
self.dn_binary_class_name = class_name
def _get_object_ldif(self, object_name, class_name, class_ldap_display_name, attr_name, attr_value):
# add object with correct syntax
ldif = """
dn: CN=%s,CN=Users,%s"""% (object_name, self.base_dn) + """
objectClass: organizationalPerson
objectClass: person
objectClass: """ + class_ldap_display_name + """
objectClass: top
cn: """ + object_name + """
instanceType: 4
objectCategory: CN=%s,%s"""% (class_name, self.schema_dn) + """
distinguishedName: CN=%s,CN=Users,%s"""% (object_name, self.base_dn) + """
name: """ + object_name + """
""" + attr_name + attr_value + """
"""
return ldif
def test_dn_string(self):
# add object with correct value
object_name1 = "obj-DN-String1" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name1, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCDE:" + self.base_dn)
self.ldb.add_ldif(ldif)
# search by specifying the DN part only
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=%s)" % (self.dn_string_attribute, self.base_dn))
self.assertEquals(len(res), 0)
# search by specifying the string part only
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=S:5:ABCDE)" % self.dn_string_attribute)
self.assertEquals(len(res), 0)
# search by DN+Stirng
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=S:5:ABCDE:%s)" % (self.dn_string_attribute, self.base_dn))
self.assertEquals(len(res), 1)
# add object with wrong format
object_name2 = "obj-DN-String2" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name2, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCD:" + self.base_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_INVALID_ATTRIBUTE_SYNTAX)
# add object with the same dn but with different string value in case
ldif = self._get_object_ldif(object_name1, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:abcde:" + self.base_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with the same dn but with different string value
ldif = self._get_object_ldif(object_name1, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:FGHIJ:" + self.base_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with the same dn but with different dn and string value
ldif = self._get_object_ldif(object_name1, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:FGHIJ:" + self.schema_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with the same dn but with different dn value
ldif = self._get_object_ldif(object_name1, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCDE:" + self.schema_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with GUID instead of DN
object_name3 = "obj-DN-String3" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name3, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCDE:<GUID=%s>" % str(uuid.uuid4()))
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# add object with SID instead of DN
object_name4 = "obj-DN-String4" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name4, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCDE:<SID=%s>" % self.ldb.get_domain_sid())
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# add object with random string instead of DN
object_name5 = "obj-DN-String5" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name5, self.dn_string_class_name, self.dn_string_class_ldap_display_name,
self.dn_string_attribute, ": S:5:ABCDE:randomSTRING")
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
def test_dn_binary(self):
# add obeject with correct value
object_name1 = "obj-DN-Binary1" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name1, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:1234:" + self.base_dn)
self.ldb.add_ldif(ldif)
# search by specifyingthe DN part
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=%s)" % (self.dn_binary_attribute, self.base_dn))
self.assertEquals(len(res), 0)
# search by specifying the binary part
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=B:4:1234)" % self.dn_binary_attribute)
self.assertEquals(len(res), 0)
# search by DN+Binary
res = self.ldb.search(base=self.base_dn,
scope=SCOPE_SUBTREE,
expression="(%s=B:4:1234:%s)" % (self.dn_binary_attribute, self.base_dn))
self.assertEquals(len(res), 1)
# add object with wrong format - 5 bytes instead of 4, 8, 16, 32...
object_name2 = "obj-DN-Binary2" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name2, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:5:67890:" + self.base_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_INVALID_ATTRIBUTE_SYNTAX)
# add object with the same dn but with different binary value
ldif = self._get_object_ldif(object_name1, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:5678:" + self.base_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with the same dn but with different binary and dn value
ldif = self._get_object_ldif(object_name1, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:5678:" + self.schema_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with the same dn but with different dn value
ldif = self._get_object_ldif(object_name1, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:1234:" + self.schema_dn)
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_ENTRY_ALREADY_EXISTS)
# add object with GUID instead of DN
object_name3 = "obj-DN-Binary3" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name3, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:1234:<GUID=%s>" % str(uuid.uuid4()))
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# add object with SID instead of DN
object_name4 = "obj-DN-Binary4" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name4, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:1234:<SID=%s>" % self.ldb.get_domain_sid())
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
# add object with random string instead of DN
object_name5 = "obj-DN-Binary5" + time.strftime("%s", time.gmtime())
ldif = self._get_object_ldif(object_name5, self.dn_binary_class_name, self.dn_binary_class_ldap_display_name,
self.dn_binary_attribute, ": B:4:1234:randomSTRING")
try:
self.ldb.add_ldif(ldif)
except LdbError, (num, _):
self.assertEquals(num, ERR_CONSTRAINT_VIOLATION)
TestProgram(module=__name__, opts=subunitopts)
| gpl-3.0 |
raytung/Slice | Slice/helper.py | 1 | 1240 | from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
def get_sorted_model(request, model):
sort = request.GET.get('sort', None)
obj = model
if sort == "price_lth":
obj = model.order_by('cost_per_unit')
elif sort == "price_htl":
obj = model.order_by('-cost_per_unit')
elif sort == "end_lth":
obj = model.order_by('end_date')
elif sort =="end_htl":
obj = model.order_by('-end_date')
elif sort == "alpha_lth":
obj = model.order_by('title')
elif sort == "alpha_htl":
obj = model.order_by('-title')
elif sort == "end_date_lth":
obj = model.order_by('end_date')
elif sort == "end_date_htl":
obj = model.order_by('-end_date')
elif sort == "start_date_lth":
obj = model.order_by('start_date')
elif sort == "start_date_htl":
obj = model.order_by('-start_date')
return obj
def get_paginator(obj, request, per_page=5):
paginator = Paginator(obj, per_page)
page = request.GET.get('page')
try:
temp = paginator.page(page)
except PageNotAnInteger:
temp = paginator.page(1)
except EmptyPage:
temp = paginator.page(paginator.num_pages)
return temp, paginator.num_pages
| mit |
pompiduskus/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
django-silk/silk | project/tests/test_config_meta.py | 2 | 1493 | from django.test import TestCase
from mock import NonCallableMock
from silk.collector import DataCollector
from silk.middleware import SilkyMiddleware
from .util import delete_all_models
from silk.config import SilkyConfig
from silk.models import Request
def fake_get_response():
def fake_response():
return 'hello world'
return fake_response
class TestConfigMeta(TestCase):
def _mock_response(self):
response = NonCallableMock()
response._headers = {}
response.status_code = 200
response.queries = []
response.get = response._headers.get
response.content = ''
return response
def _execute_request(self):
delete_all_models(Request)
DataCollector().configure(Request.objects.create())
response = self._mock_response()
SilkyMiddleware(fake_get_response)._process_response('', response)
self.assertTrue(response.status_code == 200)
objs = Request.objects.all()
self.assertEqual(objs.count(), 1)
r = objs[0]
return r
def test_enabled(self):
SilkyConfig().SILKY_META = True
r = self._execute_request()
self.assertTrue(r.meta_time is not None or
r.meta_num_queries is not None or
r.meta_time_spent_queries is not None)
def test_disabled(self):
SilkyConfig().SILKY_META = False
r = self._execute_request()
self.assertFalse(r.meta_time)
| mit |
GinnyN/towerofdimensions-django | django-social-auth/social_auth/utils.py | 5 | 7928 | import time
import random
import hashlib
import urlparse
import urllib
from urllib2 import urlopen
import logging
from collections import defaultdict
from datetime import timedelta, tzinfo
from django.conf import settings
from django.db.models import Model
from django.contrib.contenttypes.models import ContentType
from django.utils.functional import SimpleLazyObject
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
try:
from django.utils.crypto import get_random_string as random_string
except ImportError: # django < 1.4
# Implementation borrowed from django 1.4
def random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
if not using_sysrandom:
random.seed(hashlib.sha256('%s%s%s' % (random.getstate(),
time.time(),
settings.SECRET_KEY))
.digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
try:
from django.utils.timezone import utc as django_utc
except ImportError: # django < 1.4
class UTC(tzinfo):
"""UTC implementation taken from django 1.4."""
def __repr__(self):
return '<UTC>'
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return timedelta(0)
django_utc = UTC()
try:
from django.utils.crypto import constant_time_compare as ct_compare
except ImportError: # django < 1.4
def ct_compare(val1, val2):
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
try:
from django.utils.functional import empty
except ImportError: # django < 1.4
empty = None
get_random_string = random_string
constant_time_compare = ct_compare
utc = django_utc
def sanitize_log_data(secret, data=None, leave_characters=4):
"""
Clean private/secret data from log statements and other data.
Assumes data and secret are strings. Replaces all but the first
`leave_characters` of `secret`, as found in `data`, with '*'.
If no data is given, all but the first `leave_characters` of secret
are simply replaced and returned.
"""
replace_secret = (secret[:leave_characters] +
(len(secret) - leave_characters) * '*')
if data:
return data.replace(secret, replace_secret)
return replace_secret
def sanitize_redirect(host, redirect_to):
"""
Given the hostname and an untrusted URL to redirect to,
this method tests it to make sure it isn't garbage/harmful
and returns it, else returns None, similar as how's it done
on django.contrib.auth.views.
>>> print sanitize_redirect('myapp.com', None)
None
>>> print sanitize_redirect('myapp.com', '')
None
>>> print sanitize_redirect('myapp.com', {})
None
>>> print sanitize_redirect('myapp.com', 'http://notmyapp.com/path/')
None
>>> print sanitize_redirect('myapp.com', 'http://myapp.com/path/')
http://myapp.com/path/
>>> print sanitize_redirect('myapp.com', '/path/')
/path/
"""
# Quick sanity check.
if not redirect_to:
return None
# Heavier security check, don't allow redirection to a different host.
try:
netloc = urlparse.urlparse(redirect_to)[1]
except TypeError: # not valid redirect_to value
return None
if netloc and netloc != host:
return None
return redirect_to
def group_backend_by_type(items, key=lambda x: x):
"""Group items by backend type."""
# Beware of cyclical imports!
from social_auth.backends import \
get_backends, OpenIdAuth, BaseOAuth, BaseOAuth2
result = defaultdict(list)
backends = get_backends()
for item in items:
backend = backends[key(item)]
if issubclass(backend, OpenIdAuth):
result['openid'].append(item)
elif issubclass(backend, BaseOAuth2):
result['oauth2'].append(item)
elif issubclass(backend, BaseOAuth):
result['oauth'].append(item)
return dict(result)
def setting(name, default=None):
"""Return setting value for given name or default value."""
return getattr(settings, name, default)
def backend_setting(backend, name, default=None):
"""
Looks for setting value following these rules:
1. Search for <backend_name> prefixed setting
2. Search for setting given by name
3. Return default
"""
backend_setting_name = '{0}_{1}'.format(
backend.AUTH_BACKEND.name.upper().replace('-', '_'),
name
)
if hasattr(settings, backend_setting_name):
return setting(backend_setting_name)
elif hasattr(settings, name):
return setting(name)
else:
return default
logger = None
if not logger:
logger = logging.getLogger('SocialAuth')
logger.setLevel(logging.DEBUG)
def log(level, *args, **kwargs):
"""Small wrapper around logger functions."""
{'debug': logger.debug,
'error': logger.error,
'exception': logger.exception,
'warn': logger.warn}[level](*args, **kwargs)
def model_to_ctype(val):
"""Converts values that are instance of Model to a dictionary
with enough information to retrieve the instance back later."""
if isinstance(val, Model):
val = {
'pk': val.pk,
'ctype': ContentType.objects.get_for_model(val).pk
}
return val
def ctype_to_model(val):
"""Converts back the instance saved by model_to_ctype function."""
if isinstance(val, dict) and 'pk' in val and 'ctype' in val:
ctype = ContentType.objects.get_for_id(val['ctype'])
ModelClass = ctype.model_class()
val = ModelClass.objects.get(pk=val['pk'])
return val
def clean_partial_pipeline(request):
"""Cleans any data for partial pipeline."""
name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')
# Check for key to avoid flagging the session as modified unnecessary
if name in request.session:
request.session.pop(name, None)
def log_exceptions_to_messages(request, backend, err):
"""Log exception messages to messages app if it's installed."""
if 'django.contrib.messages' in setting('INSTALLED_APPS'):
from django.contrib.messages.api import error
name = backend.AUTH_BACKEND.name
error(request, unicode(err), extra_tags='social-auth %s' % name)
def url_add_parameters(url, params):
"""Adds parameters to URL, parameter will be repeated if already present"""
if params:
fragments = list(urlparse.urlparse(url))
fragments[4] = urllib.urlencode(urlparse.parse_qsl(fragments[4]) +
params.items())
url = urlparse.urlunparse(fragments)
return url
class LazyDict(SimpleLazyObject):
"""Lazy dict initialization."""
def __getitem__(self, name):
if self._wrapped is empty:
self._setup()
return self._wrapped[name]
def __setitem__(self, name, value):
if self._wrapped is empty:
self._setup()
self._wrapped[name] = value
def dsa_urlopen(*args, **kwargs):
"""Like urllib2.urlopen but sets a timeout defined by
SOCIAL_AUTH_URLOPEN_TIMEOUT setting if defined (and not already in
kwargs)."""
timeout = setting('SOCIAL_AUTH_URLOPEN_TIMEOUT')
if timeout and 'timeout' not in kwargs:
kwargs['timeout'] = timeout
return urlopen(*args, **kwargs)
if __name__ == '__main__':
import doctest
doctest.testmod()
| bsd-3-clause |
lmorchard/badger | apps/badges/views.py | 1 | 17108 | import hashlib
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.template.defaultfilters import slugify
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext, ugettext_lazy as _
from django.core import validators
from django.core.exceptions import ValidationError
from django.contrib.auth.models import User
from badges.models import Badge, BadgeNomination
from badges.models import BadgeAward, BadgeAwardee
from badges.models import badge_file_path
from badges.forms import BadgeForm, BadgeNominationForm
from badges.forms import BadgeNominationDecisionForm
from notification import models as notification
from django.core.exceptions import ObjectDoesNotExist
from socialconnect import publishing
from socialconnect.models import UserOauthAssociation
from avatar.templatetags.avatar_tags import avatar_url
from badges.templatetags.badge_tags import badge_url
from voting.models import Vote
def index(request):
"""Browse badges"""
# TODO: This needs some heavy caching:
badge_list = Vote.objects.get_top(Badge, 25)
return render_to_response('badges/badge_index.html', {
'badge_list': badge_list
}, context_instance=RequestContext(request))
#import pinax.apps.profiles.views
def profile(request, username, template_name="profiles/profile.html",
extra_context=None):
"""Much simplified version of Pinax profile view, minus friendship and
invitation features."""
other_user = get_object_or_404(User, username=username)
can_show_hidden = (
request.user == other_user or
request.user.is_staff or
request.user.is_superuser
)
show_hidden = (
can_show_hidden and
request.GET.get('show_hidden', False) is not False
)
awarded_badges = list(Badge.objects.get_badges_for_user(other_user, show_hidden))
if not request.user.is_authenticated():
is_me = False
else:
if request.user == other_user:
is_me = True
else:
is_me = False
return render_to_response(template_name, dict({
"can_show_hidden": can_show_hidden,
"show_hidden": show_hidden,
"profile_user": other_user,
"awarded_badges": awarded_badges,
"is_me": is_me,
"other_user": other_user,
}), context_instance=RequestContext(request))
#return pinax.apps.profiles.views.profile(request, username, template_name, {
# "can_show_hidden": can_show_hidden,
# "show_hidden": show_hidden,
# "profile_user": user,
# "awarded_badges": awarded_badges,
#})
@login_required
def create(request):
"""Create a new badge"""
if request.method == "POST":
form = BadgeForm(request.POST)
if form.is_valid():
new_badge = form.save(commit=False)
new_badge.creator = request.user
new_badge.slug = slugify(new_badge.title)
if 'main_image' in request.FILES:
path = badge_file_path(slug=new_badge.slug,
filename=hashlib.md5(request.FILES['main_image'].name).hexdigest())
new_badge.main_image = path
new_file = new_badge.main_image.storage.save(path,
request.FILES['main_image'])
new_badge.save()
return HttpResponseRedirect(reverse(
'badges.views.badge_details',
args=(new_badge.slug,)
))
else:
form = BadgeForm()
return render_to_response('badges/create.html', {
'form': form
}, context_instance=RequestContext(request))
@login_required
def edit(request, badge_slug):
badge = get_object_or_404(Badge, slug=badge_slug)
perms = badge.get_permissions(request.user)
if not perms['editing']:
return HttpResponseForbidden(_('access denied'))
if request.method == "POST":
form = BadgeForm(request.POST, instance=badge)
if form.is_valid():
new_badge = form.save(commit=False)
new_badge.creator = request.user
new_badge.slug = slugify(new_badge.title)
if 'main_image' in request.FILES:
path = badge_file_path(slug=new_badge.slug,
filename=hashlib.md5(request.FILES['main_image'].name).hexdigest())
new_badge.main_image = path
new_file = new_badge.main_image.storage.save(path,
request.FILES['main_image'])
new_badge.save()
return HttpResponseRedirect(reverse(
'badges.views.badge_details',
args=(new_badge.slug,)
))
else:
form = BadgeForm(instance=badge)
return render_to_response('badges/edit.html', {
'form': form
}, context_instance=RequestContext(request))
def badge_details(request, badge_slug):
"""Show details on a badge"""
badge = get_object_or_404(Badge, slug=badge_slug)
nomination_form = BadgeNominationForm()
perms = badge.get_permissions(request.user)
if not request.user.is_authenticated():
nominations = None
elif badge.allows_nomination_listing_by(request.user):
# List all nominations for this badge
nominations = BadgeNomination.objects.filter(badge=badge).exclude(approved=True)
else:
# List only own nominations for this badge
nominations = BadgeNomination.objects.filter(badge=badge,
nominator=request.user).exclude(approved=True)
award_users = list(BadgeAward.objects.get_users_for_badge(badge))
if not request.user.is_authenticated():
unclaimed_awards = []
else:
unclaimed_awards = BadgeAward.objects.filter(claimed=False,
ignored=False, badge=badge, awardee__user=request.user)
return render_to_response('badges/badge_detail.html', {
'badge': badge,
'nomination_form': nomination_form,
'nominations': nominations,
'award_users': award_users,
'permissions': perms,
'unclaimed_awards': unclaimed_awards
}, context_instance=RequestContext(request))
@login_required
def nomination_create(request, badge_slug):
""" """
badge = get_object_or_404(Badge, slug=badge_slug)
nomination_form = BadgeNominationForm()
perms = badge.get_permissions(request.user)
if request.method == "POST":
if (request.user.is_authenticated and
request.POST.get('action_nominate', None) is not None):
if not perms['nomination']:
return HttpResponseForbidden(_('access denied'))
nomination_form = BadgeNominationForm(request.POST)
nomination_form.context = {"badge": badge, "nominator": request.user}
if nomination_form.is_valid():
nominee_value = nomination_form.cleaned_data['nominee']
badge_awardee, created = \
BadgeAwardee.objects.get_or_create_by_user_or_email(
nominee_value)
nomination = badge.nominate(request.user, badge_awardee,
nomination_form.cleaned_data['reason_why'])
messages.add_message(request, messages.SUCCESS,
ugettext("%s nominated for %s" % (nomination.nominee, badge)))
return HttpResponseRedirect(reverse(
'badges.views.badge_details', args=(badge.slug,)))
return render_to_response('badges/nomination_create.html', {
'badge': badge,
'nomination_form': nomination_form,
'permissions': perms,
}, context_instance=RequestContext(request))
def nomination_details(request, badge_slug, nomination_id):
"""Display details on a nomination"""
badge = get_object_or_404(Badge, slug=badge_slug)
nomination = get_object_or_404(BadgeNomination, badge=badge, id=nomination_id)
perms = nomination.get_permissions(request.user)
if not perms['viewing']:
return HttpResponseForbidden(_('access denied'))
if request.method == "POST":
do_jump = False
decision_form = BadgeNominationDecisionForm(request.POST)
if request.POST.get('action_approve', None) is not None:
if not perms['approval']:
return HttpResponseForbidden(_('access denied'))
new_award = nomination.approve(request.user,
request.POST.get('reason_why', ''))
messages.add_message(
request, messages.SUCCESS,
ugettext("nomination approved for %s" % (new_award))
)
do_jump = True
if decision_form.is_valid() and request.POST.get('action_reject', None) is not None:
if not perms["rejection"]:
return HttpResponseForbidden(_('access denied'))
messages.add_message(
request, messages.SUCCESS,
ugettext("nomination rejected for %s" % (nomination))
)
nomination.reject(request.user,
decision_form.cleaned_data['reason_why'])
do_jump = 'badge'
if do_jump is not False:
if do_jump is True:
jump_val = request.POST.get('jump', 'award')
else:
jump_val = do_jump
if jump_val == 'badge':
return HttpResponseRedirect(reverse('badge_details',
args=(badge.slug,)))
else:
return HttpResponseRedirect(reverse('badge_nomination',
args=(badge.slug, nomination.id,)))
else:
decision_form = BadgeNominationDecisionForm()
return render_to_response('badges/nomination_detail.html', {
'nomination': nomination,
'decision_form': decision_form,
'permissions': perms
}, context_instance=RequestContext(request))
def award_details(request, badge_slug, awardee_name, award_id):
"""Display details on an award"""
badge = get_object_or_404(Badge, slug=badge_slug)
try:
validators.validate_email(awardee_name)
award = get_object_or_404(BadgeAward, badge=badge, id=award_id,
awardee__email=awardee_name)
except ValidationError:
awardee_user = User.objects.get(username__exact=awardee_name)
award = get_object_or_404(BadgeAward, badge=badge, id=award_id,
awardee__user=awardee_user)
perms = award.get_permissions(request.user)
if not perms['viewing']:
return HttpResponseForbidden(_('access denied'))
if request.method == "POST":
do_jump = False
if award.claimed == False and award.allows_claim_by(request.user):
if request.POST.get('action_reject_award', None) is not None:
award.reject(request.user)
messages.add_message(request, messages.SUCCESS,
_("Badge award rejected"))
do_jump = "badge"
elif request.POST.get('action_ignore_award', None) is not None:
award.ignore(request.user)
messages.add_message(request, messages.SUCCESS,
_("Badge award ignored"))
do_jump = "badge"
elif request.POST.get('action_claim_award', None) is not None:
publish_services = request.POST.getlist('publish')
if publish_services:
_publish_award(award, request, publish_services)
award.claim(request.user)
messages.add_message(request, messages.SUCCESS,
_("Badge award claimed"))
do_jump = True
if do_jump is not False:
if do_jump is True:
jump_val = request.POST.get('jump', 'award')
else:
jump_val = do_jump
if jump_val == 'badge':
return HttpResponseRedirect(reverse('badge_details',
args=(badge.slug,)))
else:
return HttpResponseRedirect(reverse('badge_award',
args=(award.badge.slug, award.awardee.user.username,
award.id,)))
return render_to_response('badges/award_detail.html', {
'badge': badge,
'award': award,
'awardee': award.awardee,
'permissions': perms
}, context_instance=RequestContext(request))
def _publish_award(award, request, services):
"""Convenience method for sharing an award claim"""
if not services:
return
for service in services:
try:
assoc = UserOauthAssociation.objects.get(
user=request.user, auth_type=service)
publishing.publish(
assoc,
# TODO: Templatize this badge claimed message and / or allow user customization
_('I just claimed the badge "%s"!') % award.badge.title,
request.build_absolute_uri(award.get_absolute_url()),
request.build_absolute_uri(badge_url(award.badge, 64)),
award.badge.title,
award.badge.description
)
messages.add_message(request, messages.SUCCESS,
_("Published update to %s") % (service))
except publishing.PublisherFailed, e:
# TODO: Log a service failure here?
# TODO: Create a flash notification for failures (eg. de-auth'd, etc)
messages.add_message(request, messages.ERROR,
_("Publishing to %s failed because: %s") % (service, e.message))
continue
except UserOauthAssociation.DoesNotExist:
continue
def award_history(request, badge_slug, awardee_name):
"""Detailed history of awards for a badge and user"""
badge = get_object_or_404(Badge, slug=badge_slug)
award_user = get_object_or_404(User, username__exact=awardee_name)
awards = BadgeAward.objects.filter(badge=badge,
awardee__user=award_user,
claimed=True).exclude(hidden=True).order_by('-updated_at')
return render_to_response('badges/award_list.html', {
'badge': badge,
'awards': awards,
'award_user': award_user,
}, context_instance=RequestContext(request))
@login_required
def awardee_verify(request, awardee_claim_code):
"""Accept verification of an awardee identity given a valid code"""
awardee = get_object_or_404(BadgeAwardee, claim_code=awardee_claim_code)
if not awardee.verify(request.user):
return HttpResponseForbidden(_('not yours'))
awards = BadgeAward.objects.filter(awardee=awardee).exclude(claimed=True)
if awards.count() == 1:
# If there's a single award, just redirect to it.
messages.add_message(request, messages.SUCCESS,
_("Award eligibility confirmed"))
return HttpResponseRedirect(awards[0].get_absolute_url())
elif awards.count() > 0:
# If multiple awards, redirect to notifications. Might be confusing.
messages.add_message(request, messages.SUCCESS,
_("Multiple awards confirmed, check your notifications."))
return HttpResponseRedirect(reverse('notification_notices'))
@login_required
def award_show_hide_bulk(request, badge_slug, awardee_name):
badge = get_object_or_404(Badge, slug=badge_slug)
award_user = get_object_or_404(User, username__exact=awardee_name)
awards = BadgeAward.objects.filter(badge=badge,
awardee__user=award_user, claimed=True).order_by('-updated_at')
# Get the permissions for all awards, gather the ones allowing showhide
perms_set = ( ( a, a.get_permissions(request.user) ) for a in awards )
awards_to_showhide = [ p[0] for p in perms_set if p[1]['showhide'] ]
if len(awards_to_showhide) == 0:
# No awards allowed showhide, so forbidden overall
return HttpResponseForbidden(_('access denied'))
action = request.POST.get('action', request.GET.get('action', 'hide'))
if request.method == "POST":
if request.POST.get('confirm', False) is not False:
action_method = (action == 'hide') and 'hide' or 'show'
for award in awards_to_showhide:
getattr(award, action_method)()
return HttpResponseRedirect(reverse(
'profile_detail', args=[request.user.username]
))
return render_to_response('badges/award_show_hide.html', {
'action': action,
'badge': badge,
'awards': awards_to_showhide,
'award_user': award_user,
}, context_instance=RequestContext(request))
@login_required
def award_show_hide_single(request, badge_slug, awardee_name, award_id):
pass
| bsd-3-clause |
gferreira/hTools2_extension | hTools2.roboFontExt/lib/hTools2/extras/equalize.py | 1 | 4304 | # -*- coding: utf-8 -*-
# equalize.py
# by Jens Kutilek
# https://github.com/jenskutilek/Curve-Equalizer
#-----------------------
# EQMethods/geometry.py
#-----------------------
from math import atan2, cos, pi, sin, sqrt
# helper functions
def getTriangleArea(a, b, c):
return (b.x - a.x) * (c.y - a.y) - (c.x - a.x) * (b.y - a.y)
def isOnLeft(a, b, c):
if getTriangleArea(a, b, c) > 0:
return True
return False
def isOnRight(a, b, c):
if getTriangleArea(a, b, c) < 0:
return True
return False
def isCollinear(a, b, c):
if getTriangleArea(a, b, c) == 0:
return True
return False
def distance(p0, p1, doRound=False):
# Calculate the distance between two points
d = sqrt((p0.x - p1.x) ** 2 + (p0.y - p1.y) ** 2)
if doRound:
return int(round(d))
else:
return d
# Triangle Geometry
def getTriangleAngles(p0, p1, p2, p3):
# Calculate the angles
alpha1 = atan2(p3.y - p0.y, p3.x - p0.x)
alpha2 = atan2(p1.y - p0.y, p1.x - p0.x)
alpha = alpha1 - alpha2
gamma1 = atan2(p3.x - p0.x, p3.y - p0.y)
gamma2 = atan2(p3.x - p2.x, p3.y - p2.y)
gamma = gamma1 - gamma2
beta = pi - alpha - gamma
return alpha, beta, gamma
def getTriangleSides(p0, p1, p2, p3):
alpha, beta, gamma = getTriangleAngles(p0, p1, p2, p3)
# Calculate the sides of the triangle
b = abs(distance(p0, p3))
a = b * sin(alpha) / sin(beta)
c = b * sin(gamma) / sin(beta)
return a, b, c
def getNewCoordinates(targetPoint, referencePoint, alternateReferencePoint, distance):
if targetPoint.y == referencePoint.y and targetPoint.x == referencePoint.x:
phi = atan2(
alternateReferencePoint.y - referencePoint.y,
alternateReferencePoint.x - referencePoint.x
)
else:
phi = atan2(
targetPoint.y - referencePoint.y,
targetPoint.x - referencePoint.x
)
x = referencePoint.x + cos(phi) * distance
y = referencePoint.y + sin(phi) * distance
return (x, y)
#----------------------
# EQMethods/Balance.py
#----------------------
from math import atan2
def eqBalance(p0, p1, p2, p3):
# check angles of the bcps
# in-point BCPs will report angle = 0
alpha = atan2(p1.y - p0.y, p1.x - p0.x)
beta = atan2(p2.y - p3.y, p2.x - p3.x)
if abs(alpha - beta) >= 0.7853981633974483: # 45°
# check if both handles are on the same side of the curve
if isOnLeft(p0, p3, p1) and isOnLeft(p0, p3, p2) or isOnRight(p0, p3, p1) and isOnRight(p0, p3, p2):
a, b, c = getTriangleSides(p0, p1, p2, p3)
# Calculate current handle lengths as percentage of triangle side length
ca = distance(p3, p2) / a
cc = distance(p0, p1) / c
# Make new handle length the average of both handle lenghts
handle_percentage = (ca + cc) / 2.0
# Scale triangle sides a and c by requested handle length
a = a * handle_percentage
c = c * handle_percentage
# move first control point
p1.x, p1.y = getNewCoordinates(p1, p0, p2, c)
# move second control point
p2.x, p2.y = getNewCoordinates(p2, p3, p1, a)
return p1, p2
#-----------------
# added functions
#-----------------
def equalize_curves(reference_glyph):
'''Balance handles in glyph.'''
modify_glyph = reference_glyph
reference_glyph.prepareUndo(undoTitle="equalize curves")
for contourIndex in range(len(reference_glyph.contours)):
reference_contour = reference_glyph.contours[contourIndex]
modify_contour = modify_glyph.contours[contourIndex]
for i in range(len(reference_contour.segments)):
reference_segment = reference_contour[i]
modify_segment = modify_contour[i]
if reference_segment.type == "curve":
# first pt is last pt of previous segment
p0 = modify_contour[i-1][-1]
if len(modify_segment.points) == 3:
p1, p2, p3 = modify_segment.points
p1, p2 = eqBalance(p0, p1, p2, p3)
p1.round()
p2.round()
reference_glyph.update()
reference_glyph.performUndo()
| bsd-3-clause |
Novasoft-India/OperERP-AM-Motors | openerp/addons/web/session.py | 54 | 6589 | import datetime
import babel
import dateutil.relativedelta
import logging
import time
import traceback
import sys
import xmlrpclib
import openerp
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# OpenERPSession RPC openerp backend access
#----------------------------------------------------------
class AuthenticationError(Exception):
pass
class SessionExpiredException(Exception):
pass
class Service(object):
def __init__(self, session, service_name):
self.session = session
self.service_name = service_name
def __getattr__(self, method):
def proxy_method(*args):
result = self.session.send(self.service_name, method, *args)
return result
return proxy_method
class Model(object):
def __init__(self, session, model):
self.session = session
self.model = model
self.proxy = self.session.proxy('object')
def __getattr__(self, method):
def proxy(*args, **kw):
result = self.proxy.execute_kw(self.session._db, self.session._uid, self.session._password, self.model, method, args, kw)
# reorder read
if method == "read":
if isinstance(result, list) and len(result) > 0 and "id" in result[0]:
index = {}
for r in result:
index[r['id']] = r
result = [index[x] for x in args[0] if x in index]
return result
return proxy
def search_read(self, domain=None, fields=None, offset=0, limit=None, order=None, context=None):
record_ids = self.search(domain or [], offset, limit or False, order or False, context or {})
if not record_ids: return []
records = self.read(record_ids, fields or [], context or {})
return records
class OpenERPSession(object):
"""
An OpenERP RPC session, a given user can own multiple such sessions
in a web session.
.. attribute:: context
The session context, a ``dict``. Can be reloaded by calling
:meth:`openerpweb.openerpweb.OpenERPSession.get_context`
.. attribute:: domains_store
A ``dict`` matching domain keys to evaluable (but non-literal) domains.
Used to store references to non-literal domains which need to be
round-tripped to the client browser.
"""
def __init__(self):
self._creation_time = time.time()
self._db = False
self._uid = False
self._login = False
self._password = False
self._suicide = False
self.context = {}
self.jsonp_requests = {} # FIXME use a LRU
def send(self, service_name, method, *args):
code_string = u"warning -- %s\n\n%s"
try:
return openerp.netsvc.dispatch_rpc(service_name, method, args)
except openerp.osv.osv.except_osv, e:
raise xmlrpclib.Fault(code_string % (e.name, e.value), '')
except openerp.exceptions.Warning, e:
raise xmlrpclib.Fault(code_string % ("Warning", e), '')
except openerp.exceptions.AccessError, e:
raise xmlrpclib.Fault(code_string % ("AccessError", e), '')
except openerp.exceptions.AccessDenied, e:
raise xmlrpclib.Fault('AccessDenied', openerp.tools.ustr(e))
except openerp.exceptions.DeferredException, e:
formatted_info = "".join(traceback.format_exception(*e.traceback))
raise xmlrpclib.Fault(openerp.tools.ustr(e), formatted_info)
except Exception, e:
formatted_info = "".join(traceback.format_exception(*(sys.exc_info())))
raise xmlrpclib.Fault(openerp.tools.ustr(e), formatted_info)
def proxy(self, service):
return Service(self, service)
def bind(self, db, uid, login, password):
self._db = db
self._uid = uid
self._login = login
self._password = password
def authenticate(self, db, login, password, env=None):
uid = self.proxy('common').authenticate(db, login, password, env)
self.bind(db, uid, login, password)
if uid: self.get_context()
return uid
def assert_valid(self, force=False):
"""
Ensures this session is valid (logged into the openerp server)
"""
if self._uid and not force:
return
# TODO use authenticate instead of login
uid = self.proxy("common").login(self._db, self._login, self._password)
if not uid:
raise AuthenticationError("Authentication failure")
def ensure_valid(self):
if self._uid:
try:
self.assert_valid(True)
except Exception:
self._uid = None
def execute(self, model, func, *l, **d):
self.assert_valid()
model = self.model(model)
r = getattr(model, func)(*l, **d)
return r
def exec_workflow(self, model, id, signal):
self.assert_valid()
r = self.proxy('object').exec_workflow(self._db, self._uid, self._password, model, signal, id)
return r
def model(self, model):
""" Get an RPC proxy for the object ``model``, bound to this session.
:param model: an OpenERP model name
:type model: str
:rtype: a model object
"""
if self._db == False:
raise SessionExpiredException("Session expired")
return Model(self, model)
def get_context(self):
""" Re-initializes the current user's session context (based on
his preferences) by calling res.users.get_context() with the old
context
:returns: the new context
"""
assert self._uid, "The user needs to be logged-in to initialize his context"
self.context = self.model('res.users').context_get() or {}
self.context['uid'] = self._uid
self._fix_lang(self.context)
return self.context
def _fix_lang(self, context):
""" OpenERP provides languages which may not make sense and/or may not
be understood by the web client's libraries.
Fix those here.
:param dict context: context to fix
"""
lang = context['lang']
# inane OpenERP locale
if lang == 'ar_AR':
lang = 'ar'
# lang to lang_REGION (datejs only handles lang_REGION, no bare langs)
if lang in babel.core.LOCALE_ALIASES:
lang = babel.core.LOCALE_ALIASES[lang]
context['lang'] = lang or 'en_US'
# vim:et:ts=4:sw=4:
| agpl-3.0 |
dynasticorpheus/domoticz | plugins/examples/Denon4306.py | 2 | 11823 | #
# Denon AVR 4306 Plugin
#
# Author: Dnpwwo, 2016
#
# Plugin parameter definition below will be parsed during startup and copied into Manifest.xml, this will then drive the user interface in the Hardware web page
#
"""
<plugin key="Denon4306" version="1.0.0" name="Denon 4306 Amplifier" author="dnpwwo" wikilink="http://www.domoticz.com/wiki/plugins/Denon.html" externallink="http://www.denon.co.uk/uk">
<params>
<param field="Address" label="IP Address" width="200px" required="true" default="127.0.0.1"/>
<param field="Port" label="Port" width="30px" required="true" default="23"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
isConnected = False
isStarting = False
nextConnect = 3
oustandingPings = 0
# Device Status - When off set device values to negative
powerOn = False
mainSource = 0
mainVolume1 = 0
zone2Source = 0
zone2Volume = 0
zone3Source = 0
zone3Volume = 0
selectorMap = {0:'OFF',10:'DVD',20:'VDP',30:'TV',40:'CD',50:'DBS',60:'Tuner',70:'Phono',80:'VCR-1',90:'VCR-2',100:'V.Aux',110:'CDR/Tape',120:'AuxNet',130:'AuxIPod'}
def onStart():
if Parameters["Mode6"] == "Debug":
Domoticz.Debugging(1)
if (len(Devices) == 0):
Domoticz.Device(Name="Main Zone", Unit=1, Type=244, Subtype=62, Switchtype=18, Image=5, Options="LevelActions:fHx8fHx8fHx8fHx8fA==;LevelNames:T2ZmfERWRHxWRFB8VFZ8Q0R8REJTfFR1bmVyfFBob25vfFZDUi0xfFZDUi0yfFYuQXV4fENEUi9UYXBlfEF1eE5ldHxBdXhJUG9k;LevelOffHidden:ZmFsc2U=;SelectorStyle:MQ==").Create()
Domoticz.Device(Name="Main Volume", Unit=2, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
Domoticz.Device(Name="Zone 2", Unit=3, Type=244, Subtype=62, Switchtype=18, Image=5, Options="LevelActions:fHx8fHx8fHx8fHx8fA==;LevelNames:T2ZmfERWRHxWRFB8VFZ8Q0R8REJTfFR1bmVyfFBob25vfFZDUi0xfFZDUi0yfFYuQXV4fENEUi9UYXBlfEF1eE5ldHxBdXhJUG9k;LevelOffHidden:ZmFsc2U=;SelectorStyle:MQ==").Create()
Domoticz.Device(Name="Volume 2", Unit=4, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
Domoticz.Device(Name="Zone 3", Unit=5, Type=244, Subtype=62, Switchtype=18, Image=5, Options="LevelActions:fHx8fHx8fHx8fHx8fA==;LevelNames:T2ZmfERWRHxWRFB8VFZ8Q0R8REJTfFR1bmVyfFBob25vfFZDUi0xfFZDUi0yfFYuQXV4fENEUi9UYXBlfEF1eE5ldHxBdXhJUG9k;LevelOffHidden:ZmFsc2U=;SelectorStyle:MQ==").Create()
Domoticz.Device(Name="Volume 3", Unit=6, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
Domoticz.Log("Devices created.")
DumpConfigToLog()
Domoticz.Transport("TCP/IP", Parameters["Address"], Parameters["Port"])
Domoticz.Protocol("Line")
Domoticz.Connect()
return
def onConnect(Status, Description):
global isConnected
if (Status == 0):
isConnected = True
Domoticz.Log("Connected successfully to: "+Parameters["Address"]+":"+Parameters["Port"])
Domoticz.Send('PW?\r')
else:
isConnected = False
Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Parameters["Address"]+":"+Parameters["Port"])
Domoticz.Debug("Failed to connect ("+str(Status)+") to: "+Parameters["Address"]+":"+Parameters["Port"]+" with error: "+Description)
# Turn devices off in Domoticz
for Key in Devices:
UpdateDevice(Key, 0, Devices[Key].sValue)
return True
def onMessage(Data):
global oustandingPings, isStarting
global selectorMap, powerOn, mainSource, mainVolume1, zone2Source, zone2Volume, zone3Source, zone3Volume
oustandingPings = oustandingPings - 1
Domoticz.Debug("onMessage ("+str(isStarting)+") called with Data: '"+str(Data)+"'")
Data = Data.strip()
action = Data[0:2]
detail = Data[2:]
if (action == "PW"): # Power Status
if (detail == "STANDBY"):
powerOn = False
elif (detail == "ON"):
if (powerOn == False):
Domoticz.Send('ZM?\r')
isStarting = True
powerOn = True
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "ZM"): # Main Zone on/off
if (detail == "ON"):
Domoticz.Send('SI?\r')
mainVolume1 = abs(mainVolume1)
elif (detail == "OFF"):
mainSource = 0
mainVolume1 = abs(mainVolume1)*-1
if (isStarting == True): Domoticz.Send('MU?\r')
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "SI"): # Main Zone Source Input
for key, value in selectorMap.items():
if (detail == value): mainSource = key
if (isStarting == True): Domoticz.Send('MV?\r')
elif (action == "MV"): # Master Volume
if (detail.isdigit()):
mainVolume1 = int(detail)
Domoticz.Send('MU?\r')
elif (detail[0:3] == "MAX"): Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
else: Domoticz.Log("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "MU"): # Overall Mute
if (detail == "ON"): mainVolume1 = abs(mainVolume1)*-1
elif (detail == "OFF"): mainVolume1 = abs(mainVolume1)
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
if (isStarting == True): Domoticz.Send('Z2?\r')
elif (action == "Z2"): # Zone 2
if (detail.isdigit()):
zone2Volume = int(detail)
else:
for key, value in selectorMap.items():
if (detail == value): zone2Source = key
if (zone2Source == 0): zone2Volume = abs(zone2Volume)*-1
else: zone2Volume = abs(zone2Volume)*-1
if (isStarting == True): Domoticz.Send('Z3?\r')
elif (action == "Z3"): # Zone 3
isStarting = False
if (detail.isdigit()):
zone3Volume = int(detail)
else:
for key, value in selectorMap.items():
if (detail == value): zone3Source = key
if (zone3Source == 0): zone3Volume = abs(zone3Volume)*-1
else: zone3Volume = abs(zone3Volume)*-1
elif (action == "SS"):
Domoticz.Debug("Message '"+action+"' ignored.")
else:
Domoticz.Error("Unknown message '"+action+"' ignored.")
SyncDevices()
return
def onCommand(Unit, Command, Level, Hue):
global selectorMap, powerOn, mainSource, mainVolume1, zone2Source, zone2Volume, zone3Source, zone3Volume
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Command = Command.strip()
action, sep, params = Command.partition(' ')
if (powerOn == False):
Domoticz.Send('PWON\r') # Any commands sent within 4 seconds of this will potentially be ignored
else:
if (action == "On"):
if (Unit == 1): Domoticz.Send('ZMON\r')
elif (Unit == 2): Domoticz.Send('MUOFF\r')
elif (Unit == 3): Domoticz.Send('Z2ON\r')
elif (Unit == 4): Domoticz.Send('Z2MUOFF\r')
elif (Unit == 5): Domoticz.Send('Z3ON\r')
elif (Unit == 6): Domoticz.Send('Z3MUOFF\r')
else: Domoticz.Error( "Unknown Unit number in command "+str(Unit)+".")
elif (action == "Set"):
if (params.capitalize() == 'Level') or (Command.lower() == 'Volume'):
if (Unit == 1): # Main selector
Domoticz.Send('SI'+selectorMap[Level]+'\r')
elif (Unit == 2): # Volume control
Domoticz.Send('MV'+str(Level)+'\r')
elif (Unit == 3): # Zone 2 selector
Domoticz.Send('Z2'+selectorMap[Level]+'\r')
elif (Unit == 4): # Zone 2 Volume control
Domoticz.Send('Z2'+str(Level)+'\r')
elif (Unit == 5): # Zone 3 selector
Domoticz.Send('Z3'+selectorMap[Level]+'\r')
elif (Unit == 6): # Zone 3 Volume control
Domoticz.Send('Z3'+str(Level)+'\r')
SyncDevices()
elif (action == "Off"):
if (Unit == 1): Domoticz.Send('ZMOFF\r')
elif (Unit == 2): Domoticz.Send('MUON\r')
elif (Unit == 3): Domoticz.Send('Z2OFF\r')
elif (Unit == 4): Domoticz.Send('Z2MUON\r')
elif (Unit == 5): Domoticz.Send('Z3OFF\r')
elif (Unit == 6): Domoticz.Send('Z3MUON\r')
else: Domoticz.Error( "Unknown Unit number in command "+str(Unit)+".")
else:
Domoticz.Error("Unhandled action '"+action+"' ignored, options are On/Set/Off")
return
def onDisconnect():
global isConnected, powerOn
isConnected = False
powerOn = False
Domoticz.Log("Device has disconnected.")
SyncDevices()
return
def onStop():
Domoticz.Log("onStop called")
return 8
def onHeartbeat():
global isConnected, nextConnect, oustandingPings
if (isConnected == True):
if (oustandingPings > 5):
Domoticz.Disconnect()
nextConnect = 0
else:
Domoticz.Send('PW?\r')
oustandingPings = oustandingPings + 1
else:
# if not connected try and reconnected every 3 heartbeats
oustandingPings = 0
nextConnect = nextConnect - 1
if (nextConnect <= 0):
nextConnect = 3
Domoticz.Connect()
return
def SyncDevices():
global powerOn, mainSource, mainVolume1, zone2Source, zone2Volume, zone3Source, zone3Volume
if (powerOn == False):
UpdateDevice(1, 0, "0")
UpdateDevice(2, 0, str(mainVolume1))
UpdateDevice(3, 0, "0")
UpdateDevice(4, 0, str(zone2Volume))
UpdateDevice(5, 0, "0")
UpdateDevice(6, 0, str(zone3Volume))
else:
UpdateDevice(1, mainSource, str(mainSource))
if (mainVolume1 <= 0): UpdateDevice(2, 0, str(abs(mainVolume1)))
else: UpdateDevice(2, 2, str(mainVolume1))
UpdateDevice(3, zone2Source, str(zone2Source))
if (zone2Volume <= 0): UpdateDevice(4, 0, str(abs(zone2Volume)))
else: UpdateDevice(4, 2, str(zone2Volume))
UpdateDevice(5, zone3Source, str(zone3Source))
if (zone3Volume <= 0): UpdateDevice(6, 0, str(abs(zone3Volume)))
else: UpdateDevice(6, 2, str(zone3Volume))
return
def UpdateDevice(Unit, nValue, sValue):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue):
Devices[Unit].Update(nValue, str(sValue))
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Device ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
| gpl-3.0 |
kkspeed/emacs.d | site-lisp/Pymacs/contrib/rebox/Pymacs/rebox.py | 10 | 40164 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 1991-1998, 2000, 2002, 2003 Progiciels Bourbeau-Pinard inc.
# François Pinard <pinard@iro.umontreal.ca>, 1991-04.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""\
Handling of boxed comments in various box styles.
The user documentation for this tool may be found at:
http://pymacs.progiciels-bpi.ca/rebox.html
"""
## Note: a double hash comment introduces a group of functions or methods.
__metatype__ = type
import re, sys
## Batch specific features.
def main(*arguments):
refill = True
style = None
tabify = False
verbose = False
width = 79
import getopt
options, arguments = getopt.getopt(arguments, 'ns:tvw:', ['help'])
for option, value in options:
if option == '--help':
sys.stdout.write(__doc__)
sys.exit(0)
elif option == '-n':
refill = False
elif option == '-s':
style = int(value)
elif option == '-t':
tabify = True
elif option == '-v':
verbose = True
elif option == '-w':
width = int(value)
if len(arguments) == 0:
text = sys.stdin.read()
elif len(arguments) == 1:
handle = file(arguments[0])
text = handle.read()
handle.close()
else:
sys.stderr.write("Invalid usage, try `rebox --help' for help.\n")
sys.exit(1)
old_style, new_style, text, position = engine(
text, style=style, width=width, refill=refill, tabify=tabify)
if text is None:
sys.stderr.write("* Cannot rebox to style %d.\n" % new_style)
sys.exit(1)
sys.stdout.write(text)
if verbose:
if old_style == new_style:
sys.stderr.write("Reboxed with style %d.\n" % old_style)
else:
sys.stderr.write("Reboxed from style %d to %d.\n"
% (old_style, new_style))
## Emacs specific features.
def pymacs_load_hook():
global interactions, lisp, Let, region, comment, set_default_style
from Pymacs import lisp, Let
emacs_rebox = Emacs_Rebox()
# Declare functions for Emacs to import.
interactions = {}
region = emacs_rebox.region
interactions[region] = 'P'
comment = emacs_rebox.comment
interactions[comment] = 'P'
set_default_style = emacs_rebox.set_default_style
class Emacs_Rebox:
def __init__(self):
self.default_style = None
def set_default_style(self, style):
"""\
Set the default style to STYLE.
"""
self.default_style = style
def region(self, flag):
"""\
Rebox the boxed comment in the current region, obeying FLAG.
"""
self.emacs_engine(flag, self.find_region)
def comment(self, flag):
"""\
Rebox the surrounding boxed comment, obeying FLAG.
"""
self.emacs_engine(flag, self.find_comment)
def emacs_engine(self, flag, find_limits):
"""\
Rebox text while obeying FLAG. Call FIND_LIMITS to discover the extent
of the boxed comment.
"""
# `C-u -' means that box style is to be decided interactively.
if flag == lisp['-']:
flag = self.ask_for_style()
# If FLAG is zero or negative, only change default box style.
if isinstance(flag, int) and flag <= 0:
self.default_style = -flag
lisp.message("Default style set to %d" % -flag)
return
# Decide box style and refilling.
if flag is None:
style = self.default_style
refill = True
elif isinstance(flag, int):
if self.default_style is None:
style = flag
else:
style = merge_styles(self.default_style, flag)
refill = True
else:
flag = flag.copy()
if isinstance(flag, list):
style = self.default_style
refill = False
else:
lisp.error("Unexpected flag value %s" % flag)
# Prepare for reboxing.
lisp.message("Reboxing...")
checkpoint = lisp.buffer_undo_list.value()
start, end = find_limits()
text = lisp.buffer_substring(start, end)
width = lisp.fill_column.value()
tabify = lisp.indent_tabs_mode.value() is not None
point = lisp.point()
if start <= point < end:
position = point - start
else:
position = None
# Rebox the text and replace it in Emacs buffer.
old_style, new_style, text, position = engine(
text, style=style, width=width,
refill=refill, tabify=tabify, position=position)
if text is None:
lisp.error("Cannot rebox to style %d" % new_style)
lisp.delete_region(start, end)
lisp.insert(text)
if position is not None:
lisp.goto_char(start + position)
# Collapse all operations into a single one, for Undo.
self.clean_undo_after(checkpoint)
# We are finished, tell the user.
if old_style == new_style:
lisp.message("Reboxed with style %d" % old_style)
else:
lisp.message("Reboxed from style %d to %d"
% (old_style, new_style))
def ask_for_style(self):
"""\
Request the style interactively, using the minibuffer.
"""
language = quality = type = None
while language is None:
lisp.message("\
Box language is 100-none, 200-/*, 300-//, 400-#, 500-;, 600-%%")
key = lisp.read_char()
if key >= ord('0') and key <= ord('6'):
language = key - ord('0')
while quality is None:
lisp.message("\
Box quality/width is 10-simple/1, 20-rounded/2, 30-starred/3 or 40-starred/4")
key = lisp.read_char()
if key >= ord('0') and key <= ord('4'):
quality = key - ord('0')
while type is None:
lisp.message("\
Box type is 1-opened, 2-half-single, 3-single, 4-half-double or 5-double")
key = lisp.read_char()
if key >= ord('0') and key <= ord('5'):
type = key - ord('0')
return 100*language + 10*quality + type
def find_region(self):
"""\
Return the limits of the region.
"""
return lisp.point(), lisp.mark(lisp.t)
def find_comment(self):
"""\
Find and return the limits of the block of comments following or enclosing
the cursor, or return an error if the cursor is not within such a block
of comments. Extend it as far as possible in both directions.
"""
let = Let().push_excursion()
try:
# Find the start of the current or immediately following comment.
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t\n')
lisp.beginning_of_line()
if not language_matcher[0](self.remainder_of_line()):
temp = lisp.point()
if not lisp.re_search_forward('\\*/', None, lisp.t):
lisp.error("outside any comment block")
lisp.re_search_backward('/\\*')
if lisp.point() > temp:
lisp.error("outside any comment block")
temp = lisp.point()
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t')
if lisp.point() != temp:
lisp.error("text before start of comment")
lisp.beginning_of_line()
start = lisp.point()
language = guess_language(self.remainder_of_line())
# Find the end of this comment.
if language == 2:
lisp.search_forward('*/')
if not lisp.looking_at('[ \t]*$'):
lisp.error("text after end of comment")
lisp.end_of_line()
if lisp.eobp():
lisp.insert('\n')
else:
lisp.forward_char(1)
end = lisp.point()
# Try to extend the comment block backwards.
lisp.goto_char(start)
while not lisp.bobp():
if language == 2:
lisp.skip_chars_backward(' \t\n')
if not lisp.looking_at('[ \t]*\n[ \t]*/\\*'):
break
if lisp.point() < 2:
break
lisp.backward_char(2)
if not lisp.looking_at('\\*/'):
break
lisp.re_search_backward('/\\*')
temp = lisp.point()
lisp.beginning_of_line()
lisp.skip_chars_forward(' \t')
if lisp.point() != temp:
break
lisp.beginning_of_line()
else:
lisp.previous_line(1)
if not language_matcher[language](self.remainder_of_line()):
break
start = lisp.point()
# Try to extend the comment block forward.
lisp.goto_char(end)
while language_matcher[language](self.remainder_of_line()):
if language == 2:
lisp.re_search_forward('[ \t]*/\\*')
lisp.re_search_forward('\\*/')
if lisp.looking_at('[ \t]*$'):
lisp.beginning_of_line()
lisp.forward_line(1)
end = lisp.point()
else:
lisp.forward_line(1)
end = lisp.point()
return start, end
finally:
let.pops()
def remainder_of_line(self):
"""\
Return all characters between point and end of line in Emacs buffer.
"""
return lisp('''\
(buffer-substring (point) (save-excursion (skip-chars-forward "^\n") (point)))
''')
def clean_undo_after_old(self, checkpoint):
"""\
Remove all intermediate boundaries from the Undo list since CHECKPOINT.
"""
# Declare some Lisp functions.
car = lisp.car
cdr = lisp.cdr
eq = lisp.eq
setcdr = lisp.setcdr
# Remove any `nil' delimiter recently added to the Undo list.
cursor = lisp.buffer_undo_list.value()
if not eq(cursor, checkpoint):
tail = cdr(cursor)
while not eq(tail, checkpoint):
if car(tail):
cursor = tail
tail = cdr(cursor)
else:
tail = cdr(tail)
setcdr(cursor, tail)
def clean_undo_after(self, checkpoint):
"""\
Remove all intermediate boundaries from the Undo list since CHECKPOINT.
"""
lisp("""
(let ((undo-list %s))
(if (not (eq buffer-undo-list undo-list))
(let ((cursor buffer-undo-list))
(while (not (eq (cdr cursor) undo-list))
(if (car (cdr cursor))
(setq cursor (cdr cursor))
(setcdr cursor (cdr (cdr cursor)))))))
nil)
"""
% (checkpoint or 'nil'))
## Reboxing main control.
def engine(text, style=None, width=79, refill=True, tabify=False,
position=None):
"""\
Add, delete or adjust a boxed comment held in TEXT, according to STYLE.
STYLE values are explained at beginning of this file. Any zero attribute
in STYLE indicates that the corresponding attribute should be recovered
from the currently existing box. Produced lines will not go over WIDTH
columns if possible, if refilling gets done. But if REFILL is false, WIDTH
is ignored. If TABIFY is true, the beginning of produced lines will have
spaces replace by TABs. POSITION is either None, or a character position
within TEXT. Returns four values: the old box style, the new box style,
the reformatted text, and either None or the adjusted value of POSITION in
the new text. The reformatted text is returned as None if the requested
style does not exist.
"""
last_line_complete = text and text[-1] == '\n'
if last_line_complete:
text = text[:-1]
lines = text.expandtabs().split('\n')
# Decide about refilling and the box style to use.
new_style = 111
old_template = guess_template(lines)
new_style = merge_styles(new_style, old_template.style)
if style is not None:
new_style = merge_styles(new_style, style)
new_template = template_registry.get(new_style)
# Interrupt processing if STYLE does not exist.
if not new_template:
return old_template.style, new_style, None, None
# Remove all previous comment marks, and left margin.
if position is not None:
marker = Marker()
marker.save_position(text, position, old_template.characters())
lines, margin = old_template.unbuild(lines)
# Ensure only one white line between paragraphs.
counter = 1
while counter < len(lines) - 1:
if lines[counter] == '' and lines[counter-1] == '':
del lines[counter]
else:
counter = counter + 1
# Rebuild the boxed comment.
lines = new_template.build(lines, width, refill, margin)
# Retabify to the left only.
if tabify:
for counter in range(len(lines)):
tabs = len(re.match(' *', lines[counter]).group()) / 8
lines[counter] = '\t' * tabs + lines[counter][8*tabs:]
# Restore the point position.
text = '\n'.join(lines)
if last_line_complete:
text = text + '\n'
if position is not None:
position = marker.get_position(text, new_template.characters())
return old_template.style, new_style, text, position
def guess_language(line):
"""\
Guess the language in use for LINE.
"""
for language in range(len(language_matcher) - 1, 1, -1):
if language_matcher[language](line):
return language
return 1
def guess_template(lines):
"""\
Find the heaviest box template matching LINES.
"""
best_template = None
for template in list(template_registry.values()):
if best_template is None or template > best_template:
if template.match(lines):
best_template = template
return best_template
def left_margin_size(lines):
"""\
Return the width of the left margin for all LINES. Ignore white lines.
"""
margin = None
for line in lines:
counter = len(re.match(' *', line).group())
if counter != len(line):
if margin is None or counter < margin:
margin = counter
if margin is None:
margin = 0
return margin
def merge_styles(original, update):
"""\
Return style attributes as per ORIGINAL, in which attributes have been
overridden by non-zero corresponding style attributes from UPDATE.
"""
style = [original / 100, original / 10 % 10, original % 10]
merge = update / 100, update / 10 % 10, update % 10
for counter in range(3):
if merge[counter]:
style[counter] = merge[counter]
return 100*style[0] + 10*style[1] + style[2]
## Refilling logic.
def refill_lines(lines, width,
cached_refiller=[]):
"""\
Refill LINES, trying to not produce lines having more than WIDTH columns.
"""
if not cached_refiller:
for Refiller in Refiller_Gnu_Fmt, Refiller_Textwrap, Refiller_Dumb:
refiller = Refiller()
new_lines = refiller.fill(lines, width)
if new_lines is not None:
cached_refiller.append(refiller)
return new_lines
return cached_refiller[0].fill(lines, width)
class Refiller:
available = True
def fill(self, lines, width):
if self.available:
new_lines = []
start = 0
while start < len(lines) and not lines[start]:
start = start + 1
end = start
while end < len(lines):
while end < len(lines) and lines[end]:
end = end + 1
new_lines = new_lines + self.fill_paragraph(lines[start:end],
width)
while end < len(lines) and not lines[end]:
end = end + 1
if end < len(lines):
new_lines.append('')
start = end
return new_lines
class Refiller_Gnu_Fmt(Refiller):
"""\
Use both Knuth algorithm and protection for full stops at end of sentences.
"""
def fill(self, lines, width):
if self.available:
import tempfile, os
name = tempfile.mktemp()
handle = file(name, 'w')
handle.write('\n'.join(lines) + '\n')
handle.close()
handle = os.popen('fmt -cuw %d %s' % (width, name))
text = handle.read()
os.remove(name)
if handle.close() is None:
return [line.expandtabs() for line in text.split('\n')[:-1]]
class Refiller_Textwrap(Refiller):
"""\
No Knuth algorithm, but protection for full stops at end of sentences.
"""
def __init__(self):
try:
from textwrap import TextWrapper
except ImportError:
self.available = False
else:
self.wrapper = TextWrapper(fix_sentence_endings=1)
def fill_paragraph(self, lines, width):
# FIXME: This one fills indented lines more aggressively than the
# dumb refiller. I'm not sure what it the best thing to do, but
# ideally, all refillers should behave more or less the same way.
self.wrapper.width = width
prefix = ' ' * left_margin_size(lines)
self.wrapper.initial_indent = prefix
self.wrapper.subsequent_indent = prefix
return self.wrapper.wrap(' '.join(lines))
class Refiller_Dumb(Refiller):
"""\
No Knuth algorithm, nor even protection for full stops at end of sentences.
"""
def fill_paragraph(self, lines, width):
margin = left_margin_size(lines)
prefix = ' ' * margin
new_lines = []
new_line = ''
for line in lines:
counter = len(line) - len(line.lstrip())
if counter > margin:
if new_line:
new_lines.append(prefix + new_line)
new_line = ''
indent = ' ' * (counter - margin)
else:
indent = ''
for word in line.split():
if new_line:
if len(new_line) + 1 + len(word) > width:
new_lines.append(prefix + new_line)
new_line = word
else:
new_line = new_line + ' ' + word
else:
new_line = indent + word
indent = ''
if new_line:
new_lines.append(prefix + new_line)
return new_lines
## Marking logic.
class Marker:
"""\
Heuristics to simulate a marker while reformatting boxes.
"""
def save_position(self, text, position, ignorable):
"""\
Given a TEXT and a POSITION in that text, save the adjusted position
by faking that all IGNORABLE characters before POSITION were removed.
"""
ignore = {}
for character in ' \t\r\n' + ignorable:
ignore[character] = None
counter = 0
for character in text[:position]:
if character in ignore:
counter = counter + 1
self.position = position - counter
def get_position(self, text, ignorable, latest=0):
"""\
Given a TEXT, return the value that would yield the currently saved position,
if it was saved by `save_position' with IGNORABLE. Unless the position lies
within a series of ignorable characters, LATEST has no effect in practice.
If LATEST is true, return the biggest possible value instead of the smallest.
"""
ignore = {}
for character in ' \t\r\n' + ignorable:
ignore[character] = None
counter = 0
position = 0
if latest:
for character in text:
if character in ignore:
counter = counter + 1
else:
if position == self.position:
break
position = position + 1
elif self.position > 0:
for character in text:
if character in ignore:
counter = counter + 1
else:
position = position + 1
if position == self.position:
break
return position + counter
## Template processing.
class Template:
def __init__(self, style, weight, lines):
"""\
Digest and register a single template. The template is numbered STYLE,
has a parsing WEIGHT, and is described by one to three LINES.
STYLE should be used only once through all `declare_template' calls.
One of the lines should contain the substring `box' to represent the comment
to be boxed, and if three lines are given, `box' should appear in the middle
one. Lines containing only spaces are implied as necessary before and after
the the `box' line, so we have three lines.
Normally, all three template lines should be of the same length. If the first
line is shorter, it represents a start comment string to be bundled within the
first line of the comment text. If the third line is shorter, it represents
an end comment string to be bundled at the end of the comment text, and
refilled with it.
"""
assert style not in template_registry, \
"Style %d defined more than once" % style
self.style = style
self.weight = weight
# Make it exactly three lines, with `box' in the middle.
start = lines[0].find('box')
if start >= 0:
line1 = None
line2 = lines[0]
if len(lines) > 1:
line3 = lines[1]
else:
line3 = None
else:
start = lines[1].find('box')
if start >= 0:
line1 = lines[0]
line2 = lines[1]
if len(lines) > 2:
line3 = lines[2]
else:
line3 = None
else:
assert 0, "Erroneous template for %d style" % style
end = start + len('box')
# Define a few booleans.
self.merge_nw = line1 is not None and len(line1) < len(line2)
self.merge_se = line3 is not None and len(line3) < len(line2)
# Define strings at various cardinal directions.
if line1 is None:
self.nw = self.nn = self.ne = None
elif self.merge_nw:
self.nw = line1
self.nn = self.ne = None
else:
if start > 0:
self.nw = line1[:start]
else:
self.nw = None
if line1[start] != ' ':
self.nn = line1[start]
else:
self.nn = None
if end < len(line1):
self.ne = line1[end:].rstrip()
else:
self.ne = None
if start > 0:
self.ww = line2[:start]
else:
self.ww = None
if end < len(line2):
self.ee = line2[end:]
else:
self.ee = None
if line3 is None:
self.sw = self.ss = self.se = None
elif self.merge_se:
self.sw = self.ss = None
self.se = line3.rstrip()
else:
if start > 0:
self.sw = line3[:start]
else:
self.sw = None
if line3[start] != ' ':
self.ss = line3[start]
else:
self.ss = None
if end < len(line3):
self.se = line3[end:].rstrip()
else:
self.se = None
# Define parsing regexps.
if self.merge_nw:
self.regexp1 = re.compile(' *' + regexp_quote(self.nw) + '.*$')
elif self.nw and not self.nn and not self.ne:
self.regexp1 = re.compile(' *' + regexp_quote(self.nw) + '$')
elif self.nw or self.nn or self.ne:
self.regexp1 = re.compile(
' *' + regexp_quote(self.nw) + regexp_ruler(self.nn)
+ regexp_quote(self.ne) + '$')
else:
self.regexp1 = None
if self.ww or self.ee:
self.regexp2 = re.compile(
' *' + regexp_quote(self.ww) + '.*'
+ regexp_quote(self.ee) + '$')
else:
self.regexp2 = None
if self.merge_se:
self.regexp3 = re.compile('.*' + regexp_quote(self.se) + '$')
elif self.sw and not self.ss and not self.se:
self.regexp3 = re.compile(' *' + regexp_quote(self.sw) + '$')
elif self.sw or self.ss or self.se:
self.regexp3 = re.compile(
' *' + regexp_quote(self.sw) + regexp_ruler(self.ss)
+ regexp_quote(self.se) + '$')
else:
self.regexp3 = None
# Save results.
template_registry[style] = self
def __cmp__(self, other):
return cmp(self.weight, other.weight)
def characters(self):
"""\
Return a string of characters which may be used to draw the box.
"""
characters = ''
for text in (self.nw, self.nn, self.ne,
self.ww, self.ee,
self.sw, self.ss, self.se):
if text:
for character in text:
if character not in characters:
characters = characters + character
return characters
def match(self, lines):
"""\
Returns true if LINES exactly match this template.
"""
start = 0
end = len(lines)
if self.regexp1 is not None:
if start == end or not self.regexp1.match(lines[start]):
return 0
start = start + 1
if self.regexp3 is not None:
if end == 0 or not self.regexp3.match(lines[end-1]):
return 0
end = end - 1
if self.regexp2 is not None:
for line in lines[start:end]:
if not self.regexp2.match(line):
return 0
return 1
def unbuild(self, lines):
"""\
Remove all comment marks from LINES, as hinted by this template. Returns the
cleaned up set of lines, and the size of the left margin.
"""
margin = left_margin_size(lines)
# Remove box style marks.
start = 0
end = len(lines)
if self.regexp1 is not None:
lines[start] = unbuild_clean(lines[start], self.regexp1)
start = start + 1
if self.regexp3 is not None:
lines[end-1] = unbuild_clean(lines[end-1], self.regexp3)
end = end - 1
if self.regexp2 is not None:
for counter in range(start, end):
lines[counter] = unbuild_clean(lines[counter], self.regexp2)
# Remove the left side of the box after it turned into spaces.
delta = left_margin_size(lines) - margin
for counter in range(len(lines)):
lines[counter] = lines[counter][delta:]
# Remove leading and trailing white lines.
start = 0
end = len(lines)
while start < end and lines[start] == '':
start = start + 1
while end > start and lines[end-1] == '':
end = end - 1
return lines[start:end], margin
def build(self, lines, width, refill, margin):
"""\
Put LINES back into a boxed comment according to this template, after
having refilled them if REFILL. The box should start at column MARGIN,
and the total size of each line should ideally not go over WIDTH.
"""
# Merge a short end delimiter now, so it gets refilled with text.
if self.merge_se:
if lines:
lines[-1] = lines[-1] + ' ' + self.se
else:
lines = [self.se]
# Reduce WIDTH according to left and right inserts, then refill.
if self.ww:
width = width - len(self.ww)
if self.ee:
width = width - len(self.ee)
if refill:
lines = refill_lines(lines, width)
# Reduce WIDTH further according to the current right margin,
# and excluding the left margin.
maximum = 0
for line in lines:
if line:
if line[-1] in '.!?':
length = len(line) + 1
else:
length = len(line)
if length > maximum:
maximum = length
width = maximum - margin
# Construct the top line.
if self.merge_nw:
lines[0] = ' ' * margin + self.nw + lines[0][margin:]
start = 1
elif self.nw or self.nn or self.ne:
if self.nn:
line = self.nn * width
else:
line = ' ' * width
if self.nw:
line = self.nw + line
if self.ne:
line = line + self.ne
lines.insert(0, (' ' * margin + line).rstrip())
start = 1
else:
start = 0
# Construct all middle lines.
for counter in range(start, len(lines)):
line = lines[counter][margin:]
line = line + ' ' * (width - len(line))
if self.ww:
line = self.ww + line
if self.ee:
line = line + self.ee
lines[counter] = (' ' * margin + line).rstrip()
# Construct the bottom line.
if self.sw or self.ss or self.se and not self.merge_se:
if self.ss:
line = self.ss * width
else:
line = ' ' * width
if self.sw:
line = self.sw + line
if self.se and not self.merge_se:
line = line + self.se
lines.append((' ' * margin + line).rstrip())
return lines
def regexp_quote(text):
"""\
Return a regexp matching TEXT without its surrounding space, maybe
followed by spaces. If STRING is nil, return the empty regexp.
Unless spaces, the text is nested within a regexp parenthetical group.
"""
if text is None:
return ''
if text == ' ' * len(text):
return ' *'
return '(' + re.escape(text.strip()) + ') *'
def regexp_ruler(character):
"""\
Return a regexp matching two or more repetitions of CHARACTER, maybe
followed by spaces. Is CHARACTER is nil, return the empty regexp.
Unless spaces, the ruler is nested within a regexp parenthetical group.
"""
if character is None:
return ''
if character == ' ':
return ' +'
return '(' + re.escape(character + character) + '+) *'
def unbuild_clean(line, regexp):
"""\
Return LINE with all parenthetical groups in REGEXP erased and replaced by an
equivalent number of spaces, except for trailing spaces, which get removed.
"""
match = re.match(regexp, line)
groups = match.groups()
for counter in range(len(groups)):
if groups[counter] is not None:
start, end = match.span(1 + counter)
line = line[:start] + ' ' * (end - start) + line[end:]
return line.rstrip()
## Template data.
# Matcher functions for a comment start, indexed by numeric LANGUAGE.
language_matcher = []
for pattern in (r' *(/\*|//+|#+|;+|%+)',
r'', # 1
r' */\*', # 2
r' *//+', # 3
r' *#+', # 4
r' *;+', # 5
r' *%+'): # 6
language_matcher.append(re.compile(pattern).match)
# Template objects, indexed by numeric style.
template_registry = {}
def make_generic(style, weight, lines):
"""\
Add various language digit to STYLE and generate one template per language,
all using the same WEIGHT. Replace `?' in LINES accordingly.
"""
for language, character in ((300, '/'), # C++ style comments
(400, '#'), # scripting languages
(500, ';'), # Lisp and assembler
(600, '%')): # TeX and PostScript
new_style = language + style
if 310 < new_style <= 319:
# Disallow quality 10 with C++.
continue
new_lines = []
for line in lines:
new_lines.append(line.replace('?', character))
Template(new_style, weight, new_lines)
# Generic programming language templates.
make_generic(11, 115, ('? box',))
make_generic(12, 215, ('? box ?',
'? --- ?'))
make_generic(13, 315, ('? --- ?',
'? box ?',
'? --- ?'))
make_generic(14, 415, ('? box ?',
'???????'))
make_generic(15, 515, ('???????',
'? box ?',
'???????'))
make_generic(16, 615, ('?????',
'? box',
'?????'))
make_generic(17, 715, ('?????',
'? box',
'?????'))
make_generic(21, 125, ('?? box',))
make_generic(22, 225, ('?? box ??',
'?? --- ??'))
make_generic(23, 325, ('?? --- ??',
'?? box ??',
'?? --- ??'))
make_generic(24, 425, ('?? box ??',
'?????????'))
make_generic(25, 525, ('?????????',
'?? box ??',
'?????????'))
make_generic(26, 526, ('??????',
'?? box',
'??????'))
make_generic(27, 527, ('??????',
'?? box',
'??????'))
make_generic(31, 135, ('??? box',))
make_generic(32, 235, ('??? box ???',
'??? --- ???'))
make_generic(33, 335, ('??? --- ???',
'??? box ???',
'??? --- ???'))
make_generic(34, 435, ('??? box ???',
'???????????'))
make_generic(35, 535, ('???????????',
'??? box ???',
'???????????'))
make_generic(36, 536, ('???????',
'??? box',
'???????'))
make_generic(37, 537, ('???????',
'??? box',
'???????'))
make_generic(41, 145, ('???? box',))
make_generic(42, 245, ('???? box ????',
'???? --- ????'))
make_generic(43, 345, ('???? --- ????',
'???? box ????',
'???? --- ????'))
make_generic(44, 445, ('???? box ????',
'?????????????'))
make_generic(45, 545, ('?????????????',
'???? box ????',
'?????????????'))
make_generic(46, 546, ('????????',
'???? box',
'????????'))
make_generic(47, 547, ('????????',
'???? box',
'????????'))
# Textual (non programming) templates.
Template(111, 113, ('box',))
Template(112, 213, ('| box |',
'+-----+'))
Template(113, 313, ('+-----+',
'| box |',
'+-----+'))
Template(114, 413, ('| box |',
'*=====*'))
Template(115, 513, ('*=====*',
'| box |',
'*=====*'))
Template(116, 613, ('+----',
'| box',
'+----'))
Template(117, 713, ('*====',
'| box',
'*===='))
Template(121, 123, ('| box |',))
Template(122, 223, ('| box |',
'`-----\''))
Template(123, 323, ('.-----.',
'| box |',
'`-----\''))
Template(124, 423, ('| box |',
'\\=====/'))
Template(125, 523, ('/=====\\',
'| box |',
'\\=====/'))
Template(126, 623, ('.----',
'| box',
'`----'))
Template(127, 723, ('/====',
'| box',
'\\===='))
Template(141, 143, ('| box ',))
Template(142, 243, ('* box *',
'*******'))
Template(143, 343, ('*******',
'* box *',
'*******'))
Template(144, 443, ('X box X',
'XXXXXXX'))
Template(145, 543, ('XXXXXXX',
'X box X',
'XXXXXXX'))
Template(146, 643, ('*****',
'* box',
'*****'))
Template(147, 743, ('XXXXX',
'X box',
'XXXXX'))
# C language templates.
Template(211, 118, ('/* box */',))
Template(212, 218, ('/* box */',
'/* --- */'))
Template(213, 318, ('/* --- */',
'/* box */',
'/* --- */'))
Template(214, 418, ('/* box */',
'/* === */'))
Template(215, 518, ('/* === */',
'/* box */',
'/* === */'))
Template(216, 618, ('/* ---',
' box',
' ---*/'))
Template(217, 718, ('/* ===',
' box',
' ===*/'))
Template(221, 128, ('/* ',
' box',
'*/'))
Template(222, 228, ('/* .',
'| box |',
'`----*/'))
Template(223, 328, ('/*----.',
'| box |',
'`----*/'))
Template(224, 428, ('/* \\',
'| box |',
'\\====*/'))
Template(225, 528, ('/*====\\',
'| box |',
'\\====*/'))
Template(226, 628, ('/*---',
'| box',
'`----*/'))
Template(227, 728, ('/*===',
'| box',
'\\====*/'))
Template(231, 138, ('/* ',
' | box',
' */ '))
Template(232, 238, ('/* ',
' | box | ',
' *-----*/'))
Template(233, 338, ('/*-----* ',
' | box | ',
' *-----*/'))
Template(234, 438, ('/* box */',
'/*-----*/'))
Template(235, 538, ('/*-----*/',
'/* box */',
'/*-----*/'))
Template(236, 638, ('/*---- ',
' | box ',
' *----*/'))
Template(237, 738, ('/*----',
' box',
' ----*/'))
Template(241, 148, ('/* ',
' * box',
' */ '))
Template(242, 248, ('/* * ',
' * box * ',
' *******/'))
Template(243, 348, ('/******* ',
' * box * ',
' *******/'))
Template(244, 448, ('/* box */',
'/*******/'))
Template(245, 548, ('/*******/',
'/* box */',
'/*******/'))
Template(246, 648, ('/******* ',
' * box * ',
' *******/'))
Template(247, 748, ('/****',
' box',
' *****/'))
Template(251, 158, ('/* ',
' * box',
' */ '))
if __name__ == '__main__':
main(*sys.argv[1:])
| gpl-2.0 |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/numpy/polynomial/_polybase.py | 26 | 30184 | """
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
from __future__ import division, absolute_import, print_function
from abc import ABCMeta, abstractmethod, abstractproperty
from numbers import Number
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(object):
"""An abstract base class for series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
__metaclass__ = ABCMeta
# Not hashable
__hash__ = None
# Don't let participate in array operations. Value doesn't matter.
__array_priority__ = 1000
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
@abstractproperty
def domain(self):
pass
@abstractproperty
def window(self):
pass
@abstractproperty
def nickname(self):
pass
@abstractmethod
def _add(self):
pass
@abstractmethod
def _sub(self):
pass
@abstractmethod
def _mul(self):
pass
@abstractmethod
def _div(self):
pass
@abstractmethod
def _pow(self):
pass
@abstractmethod
def _val(self):
pass
@abstractmethod
def _int(self):
pass
@abstractmethod
def _der(self):
pass
@abstractmethod
def _fit(self):
pass
@abstractmethod
def _line(self):
pass
@abstractmethod
def _roots(self):
pass
@abstractmethod
def _fromroots(self):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef:
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError:
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
format = "%s(%s, %s, %s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return format % (name, coef, domain, window)
def __str__(self):
format = "%s(%s)"
coef = str(self.coef)
name = self.nickname
return format % (name, coef)
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
try:
othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
except TypeError as e:
raise e
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __div__(self, other):
# set to __floordiv__, /, for now.
return self.__floordiv__(other)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, Number) or isinstance(other, bool):
form = "unsupported types for true division: '%s', '%s'"
raise TypeError(form % (type(self), type(other)))
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
try:
othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
except (TypeError, ZeroDivisionError) as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
# Enhance me
# some augmented arithmetic operations could be added here
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
Contains the new set of coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
Examples
--------
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For Numpy versions >= 1.11 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain specified in the call.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
| mit |
alu0100207385/dsi_3Django | build/lib.linux-i686-2.7/django/db/models/aggregates.py | 114 | 2601 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
salilsub/tartarus | mlb/baseball/baseball/player_data.py | 2 | 5944 | from sqlalchemy import Column,Float,String,Date
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
Base = declarative_base()
class Batter(Base):
__tablename__ = 'batter'
player_id = Column(Float,primary_key=True)
game_id = Column(String,primary_key=True)
name = Column(String(255))
position = Column(String(255))
gamedate = Column(Date)
hit = Column(Float,default=0)
single = Column(Float,default=0)
double = Column(Float,default=0)
triple = Column(Float,default=0)
home_run = Column(Float,default=0)
rbi = Column(Float,default=0)
run = Column(Float,default=0)
base_on_balls = Column(Float,default=0)
hit_by_pinch = Column(Float,default=0)
stolen_base = Column(Float,default=0)
caught_stealing = Column(Float,default=0)
sacrifice_bunts = Column(Float,default=0)
left_on_base = Column(Float,default=0)
error = Column(Float,default=0)
strikeouts = Column(Float,default=0)
assists = Column(Float,default=0)
sacrifice_flies = Column(Float,default=0)
fly_outs = Column(Float,default=0)
put_outs = Column(Float,default=0)
at_bats = Column(Float,default=0)
ground_outs = Column(Float,default=0)
fantasy_scoring = {
'single':3,
'double':5,
'triple':8,
'home_run':10,
'rbi':2,
'run':2,
'base_on_balls':2,
'hit_by_pinch':2,
'stolen_base':5,
'caught_stealing':-2
}
def __init__(self,**kwargs):
self.player_id = kwargs['player_id']
self.game_id = kwargs['game_id']
self.name = kwargs['name']
self.position = kwargs['position']
self.gamedate = kwargs['gamedate']
self.single = float(kwargs['hit']) - float(kwargs['double']) - float(kwargs['triple'])
excluded_fields = ['player_id','game_id','name','position','gamedate']
# self.double = float(double)
# self.triple = float(triple)
# self.home_run = float(home_run)
# self.rbi = float(rbi)
# self.run = float(run)
# self.base_on_balls = float(base_on_balls)
# self.hit_by_pinch = float(hit_by_pinch)
# self.stolen_base = float(stolen_base)
# self.caught_stealing = float(caught_stealing)
# sacrifice_bunts = Column(Float,default=0)
# left_on_base = Column(Float,default=0)
# error = Column(Float,default=0)
# strikeouts = Column(Float,default=0)
# assists = Column(Float,default=0)
# sacrifice_flies = Column(Float,default=0)
# fly_outs = Column(Float,default=0)
# put_outs = Column(Float,default=0)
# at_bats = Column(Float,default=0)
# ground_outs = Column(Float,default=0)
for k in [kw for kw in kwargs.keys() if kw not in excluded_fields]:
setattr(self,k,float(kwargs[k]))
@hybrid_property
def score(self):
points = 0
for k in self.fantasy_scoring.keys():
points += (self.fantasy_scoring[k]*getattr(self,k))
return points
class Pitcher(Base):
__tablename__ = 'pitcher'
player_id = Column(Float,primary_key=True)
game_id = Column(String,primary_key=True)
name = Column(String(255))
position = Column(String(255))
gamedate = Column(Date)
inning_pitched = Column(Float,default=0)
strikout = Column(Float,default=0)
win = Column(Float,default=0)
earned_run_allowed = Column(Float,default=0)
hit_against = Column(Float,default=0)
base_on_balls_against = Column(Float,default=0)
hit_batsman = Column(Float,default=0)
complete_game = Column(Float,default=0)
complete_game_shut_out = Column(Float,default=0)
no_hitter = Column(Float,default=0)
home_run = Column(Float,default=0)
blown_save = Column(Float,default=0)
batters_faced = Column(Float,default=0)
saves = Column(Float,default=0)
hold = Column(Float,default=0)
loss = Column(Float,default=0)
runs = Column(Float,default=0)
out = Column(Float,default=0)
fantasy_scoring = {
'inning_pitched':2.25,
'strikout':2,
'win':4,
'earned_run_allowed':-2,
'hit_against':-0.6,
'base_on_balls_against':-0.6,
'hit_batsman':-0.6,
'complete_game':2.5,
'complete_game_shut_out':2.5,
'no_hitter':5
}
def __init__(self,**kwargs):
self.player_id = kwargs['player_id']
self.game_id = kwargs['game_id']
self.name = kwargs['name']
self.position = kwargs['position']
self.gamedate = kwargs['gamedate']
try:
self.earned_run_allowed = float(kwargs['earned_run_allowed'])
except:
self.earned_run_allowed = 0
if float(kwargs['complete_game']) == 1 and float(kwargs['runs']) == 0:
self.complete_game_shut_out = 1
if float(kwargs['hit_against']) == 0:
self.no_hitter = 1
excluded_fields = ['player_id','game_id','name','position','gamedate','earned_run_allowed']
for k in [kw for kw in kwargs.keys() if kw not in excluded_fields]:
print k
print kwargs[k]
setattr(self,k,float(kwargs[k]))
# self.inning_pitched = float(inning_pitched)
# self.strikeout = float(strikeout)
# self.win = float(win)
# try:
# self.earned_run_allowed = float(earned_run_allowed)
# except:
# self.earned_run_allowed = 0
#
# self.hit_against = float(hit_against)
# self.base_on_balls_against = float(base_on_balls_against)
# self.hit_batsman = float(hit_batsman)
# self.complete_game = float(complete_game)
# if self.complete_game == 1 and float(runs) == 0:
# self.complete_game_shut_out = 1
# if self.hit_against == 0:
# self.no_hitter = 1
@hybrid_property
def score(self):
points = 0
for k in self.fantasy_scoring.keys():
points += (self.fantasy_scoring[k]*getattr(self,k))
return points
| mit |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/words/test/test_xmpproutertap.py | 40 | 2397 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.xmpproutertap}.
"""
from twisted.application import internet
from twisted.trial import unittest
from twisted.words import xmpproutertap as tap
from twisted.words.protocols.jabber import component
class XMPPRouterTapTest(unittest.TestCase):
def test_port(self):
"""
The port option is recognised as a parameter.
"""
opt = tap.Options()
opt.parseOptions(['--port', '7001'])
self.assertEqual(opt['port'], '7001')
def test_portDefault(self):
"""
The port option has '5347' as default value
"""
opt = tap.Options()
opt.parseOptions([])
self.assertEqual(opt['port'], 'tcp:5347:interface=127.0.0.1')
def test_secret(self):
"""
The secret option is recognised as a parameter.
"""
opt = tap.Options()
opt.parseOptions(['--secret', 'hushhush'])
self.assertEqual(opt['secret'], 'hushhush')
def test_secretDefault(self):
"""
The secret option has 'secret' as default value
"""
opt = tap.Options()
opt.parseOptions([])
self.assertEqual(opt['secret'], 'secret')
def test_verbose(self):
"""
The verbose option is recognised as a flag.
"""
opt = tap.Options()
opt.parseOptions(['--verbose'])
self.assertTrue(opt['verbose'])
def test_makeService(self):
"""
The service gets set up with a router and factory.
"""
opt = tap.Options()
opt.parseOptions([])
s = tap.makeService(opt)
self.assertIsInstance(s, internet.StreamServerEndpointService)
self.assertEqual('127.0.0.1', s.endpoint._interface)
self.assertEqual(5347, s.endpoint._port)
factory = s.factory
self.assertIsInstance(factory, component.XMPPComponentServerFactory)
self.assertIsInstance(factory.router, component.Router)
self.assertEqual('secret', factory.secret)
self.assertFalse(factory.logTraffic)
def test_makeServiceVerbose(self):
"""
The verbose flag enables traffic logging.
"""
opt = tap.Options()
opt.parseOptions(['--verbose'])
s = tap.makeService(opt)
self.assertTrue(s.factory.logTraffic)
| gpl-2.0 |
cmsdaq/hltd | lib/python-inotify-0.5/inotify/watcher.py | 1 | 10565 | # watcher.py - high-level interfaces to the Linux inotify subsystem
# Copyright 2006 Bryan O'Sullivan <bos@serpentine.com>
# This library is free software; you can redistribute it and/or modify
# it under the terms of version 2.1 of the GNU Lesser General Public
# License, incorporated herein by reference.
'''High-level interfaces to the Linux inotify subsystem.
The inotify subsystem provides an efficient mechanism for file status
monitoring and change notification.
The Watcher class hides the low-level details of the inotify
interface, and provides a Pythonic wrapper around it. It generates
events that provide somewhat more information than raw inotify makes
available.
The AutoWatcher class is more useful, as it automatically watches
newly-created directories on your behalf.'''
__author__ = "Bryan O'Sullivan <bos@serpentine.com>"
import inotify._inotify as inotify
import array
import errno
import fcntl
import os
import termios
import six
class Event(object):
'''Derived inotify event class.
The following fields are available:
mask: event mask, indicating what kind of event this is
cookie: rename cookie, if a rename-related event
path: path of the directory in which the event occurred
name: name of the directory entry to which the event occurred
(may be None if the event happened to a watched directory)
fullpath: complete path at which the event occurred
wd: watch descriptor that triggered this event'''
__slots__ = (
'cookie',
'fullpath',
'mask',
'name',
'path',
'raw',
'wd',
)
def __init__(self, raw, path):
self.path = path
self.raw = raw
if raw.name:
self.fullpath = path + '/' + raw.name
else:
self.fullpath = path
self.wd = raw.wd
self.mask = raw.mask
self.cookie = raw.cookie
self.name = raw.name
def __repr__(self):
r = repr(self.raw)
return 'Event(path=' + repr(self.path) + ', ' + r[r.find('(')+1:]
_event_props = {
'access': 'File was accessed',
'modify': 'File was modified',
'attrib': 'Attribute of a directory entry was changed',
'close_write': 'File was closed after being written to',
'close_nowrite': 'File was closed without being written to',
'open': 'File was opened',
'moved_from': 'Directory entry was renamed from this name',
'moved_to': 'Directory entry was renamed to this name',
'create': 'Directory entry was created',
'delete': 'Directory entry was deleted',
'delete_self': 'The watched directory entry was deleted',
'move_self': 'The watched directory entry was renamed',
'unmount': 'Directory was unmounted, and can no longer be watched',
'q_overflow': 'Kernel dropped events due to queue overflow',
'ignored': 'Directory entry is no longer being watched',
'isdir': 'Event occurred on a directory',
}
for k, v in six.iteritems(_event_props):
mask = getattr(inotify, 'IN_' + k.upper())
def getter(self):
return self.mask & mask
getter.__name__ = k
getter.__doc__ = v
setattr(Event, k, property(getter, doc=v))
del _event_props
class Watcher(object):
'''Provide a Pythonic interface to the low-level inotify API.
Also adds derived information to each event that is not available
through the normal inotify API, such as directory name.'''
__slots__ = (
'fd',
'_paths',
'_wds',
)
def __init__(self):
'''Create a new inotify instance.'''
self.fd = inotify.init()
self._paths = {}
self._wds = {}
def fileno(self):
'''Return the file descriptor this watcher uses.
Useful for passing to select and poll.'''
return self.fd
def add(self, path, mask):
'''Add or modify a watch.
Return the watch descriptor added or modified.'''
path = os.path.normpath(path)
wd = inotify.add_watch(self.fd, path, mask)
self._paths[path] = wd, mask
self._wds[wd] = path, mask
return wd
def remove(self, wd):
'''Remove the given watch.'''
inotify.remove_watch(self.fd, wd)
self._remove(wd)
def _remove(self, wd):
path_mask = self._wds.pop(wd, None)
if path_mask is not None:
self._paths.pop(path_mask[0])
def path(self, path):
'''Return a (watch descriptor, event mask) pair for the given path.
If the path is not being watched, return None.'''
return self._paths.get(path)
def wd(self, wd):
'''Return a (path, event mask) pair for the given watch descriptor.
If the watch descriptor is not valid or not associated with
this watcher, return None.'''
return self._wds.get(wd)
def read(self, bufsize=None):
'''Read a list of queued inotify events.
If bufsize is zero, only return those events that can be read
immediately without blocking. Otherwise, block until events are
available.'''
events = []
for evt in inotify.read(self.fd, bufsize):
events.append(Event(evt, self._wds[evt.wd][0]))
if evt.mask & inotify.IN_IGNORED:
self._remove(evt.wd)
elif evt.mask & inotify.IN_UNMOUNT:
self.close()
return events
def close(self):
'''Shut down this watcher.
All subsequent method calls are likely to raise exceptions.'''
os.close(self.fd)
self.fd = None
self._paths = None
self._wds = None
def __len__(self):
'''Return the number of active watches.'''
return len(self._paths)
def __iter__(self):
'''Yield a (path, watch descriptor, event mask) tuple for each
entry being watched.'''
for path, (wd, mask) in six.iteritems(self._paths):
yield path, wd, mask
def __del__(self):
if self.fd is not None:
os.close(self.fd)
ignored_errors = [errno.ENOENT, errno.EPERM, errno.ENOTDIR]
def add_iter(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Yield each added or modified watch descriptor.
To ensure that this method runs to completion, you must
iterate over all of its results, even if you do not care what
they are. For example:
for wd in w.add_iter(path, mask):
pass
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
# Add the IN_ONLYDIR flag to the event mask, to avoid a possible
# race when adding a subdirectory. In the time between the
# event being queued by the kernel and us processing it, the
# directory may have been deleted, or replaced with a different
# kind of entry with the same name.
submask = mask | inotify.IN_ONLYDIR
try:
yield self.add(path, mask)
except OSError as err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
for root, dirs, names in os.walk(path, topdown=False, onerror=onerror):
for d in dirs:
try:
yield self.add(root + '/' + d, submask)
except OSError as err:
if onerror and err.errno not in self.ignored_errors:
onerror(err)
def add_all(self, path, mask, onerror=None):
'''Add or modify watches over path and its subdirectories.
Return a list of added or modified watch descriptors.
By default, errors are ignored. If optional arg "onerror" is
specified, it should be a function; it will be called with one
argument, an OSError instance. It can report the error to
continue with the walk, or raise the exception to abort the
walk.'''
return [w for w in self.add_iter(path, mask, onerror)]
class AutoWatcher(Watcher):
'''Watcher class that automatically watches newly created directories.'''
__slots__ = (
'addfilter',
)
def __init__(self, addfilter=None):
'''Create a new inotify instance.
This instance will automatically watch newly created
directories.
If the optional addfilter parameter is not None, it must be a
callable that takes one parameter. It will be called each time
a directory is about to be automatically watched. If it returns
True, the directory will be watched if it still exists,
otherwise, it will beb skipped.'''
super(AutoWatcher, self).__init__()
self.addfilter = addfilter
_dir_create_mask = inotify.IN_ISDIR | inotify.IN_CREATE
def read(self, bufsize=None):
events = super(AutoWatcher, self).read(bufsize)
for evt in events:
if evt.mask & self._dir_create_mask == self._dir_create_mask:
if self.addfilter is None or self.addfilter(evt):
parentmask = self._wds[evt.wd][1]
# See note about race avoidance via IN_ONLYDIR above.
mask = parentmask | inotify.IN_ONLYDIR
try:
self.add_all(evt.fullpath, mask)
except OSError as err:
if err.errno not in self.ignored_errors:
raise
return events
class Threshold(object):
'''Class that indicates whether a file descriptor has reached a
threshold of readable bytes available.
This class is not thread-safe.'''
__slots__ = (
'fd',
'threshold',
'_iocbuf',
)
def __init__(self, fd, threshold=1024):
self.fd = fd
self.threshold = threshold
self._iocbuf = array.array('i', [0])
def readable(self):
'''Return the number of bytes readable on this file descriptor.'''
fcntl.ioctl(self.fd, termios.FIONREAD, self._iocbuf, True)
return self._iocbuf[0]
def __call__(self):
'''Indicate whether the number of readable bytes has met or
exceeded the threshold.'''
return self.readable() >= self.threshold
| lgpl-3.0 |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Tools/Scripts/webkitpy/port/xvfbdriver.py | 1 | 5142 | # Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2014 Igalia S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import re
import time
from webkitpy.port.server_process import ServerProcess
from webkitpy.port.driver import Driver
_log = logging.getLogger(__name__)
class XvfbDriver(Driver):
@staticmethod
def check_driver(port):
xvfb_findcmd = ['which', 'Xvfb']
if port._should_use_jhbuild():
xvfb_findcmd = port._jhbuild_wrapper + xvfb_findcmd
xvfb_found = port.host.executive.run_command(xvfb_findcmd, return_exit_code=True) is 0
if not xvfb_found:
_log.error("No Xvfb found. Cannot run layout tests.")
return xvfb_found
def _xvfb_pipe(self):
return os.pipe()
def _xvfb_read_display_id(self, read_fd):
import errno
import select
fd_set = [read_fd]
while fd_set:
try:
fd_list = select.select(fd_set, [], [])[0]
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if read_fd in fd_list:
# We only expect a number, so first read should be enough.
display_id = os.read(read_fd, 256).strip('\n')
fd_set = []
return int(display_id)
def _xvfb_close_pipe(self, pipe_fds):
os.close(pipe_fds[0])
os.close(pipe_fds[1])
def _xvfb_run(self, environment):
read_fd, write_fd = self._xvfb_pipe()
run_xvfb = ["Xvfb", "-displayfd", str(write_fd), "-screen", "0", "1024x768x%s" % self._xvfb_screen_depth(), "-nolisten", "tcp"]
if self._port._should_use_jhbuild():
run_xvfb = self._port._jhbuild_wrapper + run_xvfb
with open(os.devnull, 'w') as devnull:
self._xvfb_process = self._port.host.executive.popen(run_xvfb, stderr=devnull, env=environment)
display_id = self._xvfb_read_display_id(read_fd)
self._xvfb_close_pipe((read_fd, write_fd))
return display_id
def _xvfb_screen_depth(self):
return os.environ.get('XVFB_SCREEN_DEPTH', '24')
def _start(self, pixel_tests, per_test_args):
self.stop()
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server(server_name)
display_id = self._xvfb_run(environment)
# We must do this here because the DISPLAY number depends on _worker_number
environment['DISPLAY'] = ":%d" % display_id
self._driver_tempdir = self._port.host.filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
# Currently on WebKit2, there is no API for setting the application
# cache directory. Each worker should have it's own and it should be
# cleaned afterwards, so we set it to inside the temporary folder by
# prepending XDG_CACHE_HOME with DUMPRENDERTREE_TEMP.
environment['XDG_CACHE_HOME'] = self._port.host.filesystem.join(str(self._driver_tempdir), 'appcache')
self._crashed_process_name = None
self._crashed_pid = None
self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
self._server_process.start()
def stop(self):
super(XvfbDriver, self).stop()
if getattr(self, '_xvfb_process', None):
self._port.host.executive.kill_process(self._xvfb_process.pid)
self._xvfb_process = None
| gpl-2.0 |
saturngod/pyWebTest-gitbook | book/js/Lib/sre_constants.py | 692 | 7172 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
#MAXREPEAT = 2147483648
#from _sre import MAXREPEAT
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode "locale"
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
SRE_FLAG_ASCII = 256 # use ascii "locale"
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = sorted(d.items(), key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print("done")
| mit |
fjacob21/pycon2015 | elpiwear/proximity_warning.py | 1 | 1278 | import Edison.i2c as I2C
import sharp2y0a21
import ads1015
import time
import thread
class proximity_warning:
def __init__(self, sensorid, calibration, sensing_freq):
self.sensing_freq = sensing_freq
self.warning = []
adc = ads1015.ads1015(I2C.i2c(1,0x48))
adc.setchannel(sensorid, True)
adc.setdatarate(ads1015.ADS1015_DR_128SPS)
self.sensor = sharp2y0a21.sharp2y0a21(adc)
self.sensor.loadcalibration(calibration)
def add_warning(self, distance, warning):
self.warning.append({'distance':distance, 'warning':warning})
self.warning = sorted(self.warning, key=lambda k: k['distance'])
def start(self):
self.stop_flag = False
self.thread = thread.start_new_thread( self.sensing_thread, () )
def stop(self):
self.stop_flag = True
def detect_warning(self, distance):
for warn in self.warnings:
if distance < warn['distance']:
return warn['warning']
return None
def sensing_thread(self):
while not self.stop_flag:
dist = self.sensor.distance()
warn = self.detect_warning(dist)
if warn is not None:
warn(dist)
time.sleep(self.sensing_freq)
| mit |
kouaw/CouchPotatoServer | libs/suds/xsd/sxbase.py | 193 | 19777 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbase} module provides I{base} classes that represent
schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.element import Element
from suds.sax import Namespace
log = getLogger(__name__)
class SchemaObject(object):
"""
A schema object is an extension to object object with
with schema awareness.
@ivar root: The XML root element.
@type root: L{Element}
@ivar schema: The schema containing this object.
@type schema: L{schema.Schema}
@ivar form_qualified: A flag that inidcates that @elementFormDefault
has a value of I{qualified}.
@type form_qualified: boolean
@ivar nillable: A flag that inidcates that @nillable
has a value of I{true}.
@type nillable: boolean
@ivar default: The default value.
@type default: object
@ivar rawchildren: A list raw of all children.
@type rawchildren: [L{SchemaObject},...]
"""
@classmethod
def prepend(cls, d, s, filter=Filter()):
"""
Prepend schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be prepended.
@type filter: L{Filter}
"""
i = 0
for x in s:
if x in filter:
d.insert(i, x)
i += 1
@classmethod
def append(cls, d, s, filter=Filter()):
"""
Append schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be appended.
@type filter: L{Filter}
"""
for item in s:
if item in filter:
d.append(item)
def __init__(self, schema, root):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
@param root: The xml root node.
@type root: L{Element}
"""
self.schema = schema
self.root = root
self.id = objid(self)
self.name = root.get('name')
self.qname = (self.name, schema.tns[1])
self.min = root.get('minOccurs')
self.max = root.get('maxOccurs')
self.type = root.get('type')
self.ref = root.get('ref')
self.form_qualified = schema.form_qualified
self.nillable = False
self.default = root.get('default')
self.rawchildren = []
self.cache = {}
def attributes(self, filter=Filter()):
"""
Get only the attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list of tuples (attr, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if child.isattr() and child in filter:
result.append((child, ancestry))
return result
def children(self, filter=Filter()):
"""
Get only the I{direct} or non-attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list tuples: (child, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if not child.isattr() and child in filter:
result.append((child, ancestry))
return result
def get_attribute(self, name):
"""
Get (find) a I{non-attribute} attribute by name.
@param name: A attribute name.
@type name: str
@return: A tuple: the requested (attribute, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.attributes():
if child.name == name:
return (child, ancestry)
return (None, [])
def get_child(self, name):
"""
Get (find) a I{non-attribute} child by name.
@param name: A child name.
@type name: str
@return: A tuple: the requested (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.children():
if child.any() or child.name == name:
return (child, ancestry)
return (None, [])
def namespace(self, prefix=None):
"""
Get this properties namespace
@param prefix: The default prefix.
@type prefix: str
@return: The schema's target namespace
@rtype: (I{prefix},I{URI})
"""
ns = self.schema.tns
if ns[0] is None:
ns = (prefix, ns[1])
return ns
def default_namespace(self):
return self.root.defaultNamespace()
def unbounded(self):
"""
Get whether this node is unbounded I{(a collection)}
@return: True if unbounded, else False.
@rtype: boolean
"""
max = self.max
if max is None:
max = '1'
if max.isdigit():
return (int(max) > 1)
else:
return ( max == 'unbounded' )
def optional(self):
"""
Get whether this type is optional.
@return: True if optional, else False
@rtype: boolean
"""
min = self.min
if min is None:
min = '1'
return ( min == '0' )
def required(self):
"""
Get whether this type is required.
@return: True if required, else False
@rtype: boolean
"""
return ( not self.optional() )
def resolve(self, nobuiltin=False):
"""
Resolve and return the nodes true self.
@param nobuiltin: Flag indicates that resolution must
not continue to include xsd builtins.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
"""
return self.cache.get(nobuiltin, self)
def sequence(self):
"""
Get whether this is an <xs:sequence/>
@return: True if <xs:sequence/>, else False
@rtype: boolean
"""
return False
def xslist(self):
"""
Get whether this is an <xs:list/>
@return: True if any, else False
@rtype: boolean
"""
return False
def all(self):
"""
Get whether this is an <xs:all/>
@return: True if any, else False
@rtype: boolean
"""
return False
def choice(self):
"""
Get whether this is n <xs:choice/>
@return: True if any, else False
@rtype: boolean
"""
return False
def any(self):
"""
Get whether this is an <xs:any/>
@return: True if any, else False
@rtype: boolean
"""
return False
def builtin(self):
"""
Get whether this is a schema-instance (xs) type.
@return: True if any, else False
@rtype: boolean
"""
return False
def enum(self):
"""
Get whether this is a simple-type containing an enumeration.
@return: True if any, else False
@rtype: boolean
"""
return False
def isattr(self):
"""
Get whether the object is a schema I{attribute} definition.
@return: True if an attribute, else False.
@rtype: boolean
"""
return False
def extension(self):
"""
Get whether the object is an extension of another type.
@return: True if an extension, else False.
@rtype: boolean
"""
return False
def restriction(self):
"""
Get whether the object is an restriction of another type.
@return: True if an restriction, else False.
@rtype: boolean
"""
return False
def mixed(self):
"""
Get whether this I{mixed} content.
"""
return False
def find(self, qref, classes=()):
"""
Find a referenced type in self or children.
@param qref: A qualified reference.
@type qref: qref
@param classes: A list of classes used to qualify the match.
@type classes: [I{class},...]
@return: The referenced type.
@rtype: L{SchemaObject}
@see: L{qualify()}
"""
if not len(classes):
classes = (self.__class__,)
if self.qname == qref and self.__class__ in classes:
return self
for c in self.rawchildren:
p = c.find(qref, classes)
if p is not None:
return p
return None
def translate(self, value, topython=True):
"""
Translate a value (type) to/from a python type.
@param value: A value to translate.
@return: The converted I{language} type.
"""
return value
def childtags(self):
"""
Get a list of valid child tag names.
@return: A list of child tag names.
@rtype: [str,...]
"""
return ()
def dependencies(self):
"""
Get a list of dependancies for dereferencing.
@return: A merge dependancy index and a list of dependancies.
@rtype: (int, [L{SchemaObject},...])
"""
return (None, [])
def autoqualified(self):
"""
The list of I{auto} qualified attribute values.
Qualification means to convert values into I{qref}.
@return: A list of attibute names.
@rtype: list
"""
return ['type', 'ref']
def qualify(self):
"""
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
"""
defns = self.root.defaultNamespace()
if Namespace.none(defns):
defns = self.schema.tns
for a in self.autoqualified():
ref = getattr(self, a)
if ref is None:
continue
if isqref(ref):
continue
qref = qualify(ref, self.root, defns)
log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref)
setattr(self, a, qref)
def merge(self, other):
"""
Merge another object as needed.
"""
other.qualify()
for n in ('name',
'qname',
'min',
'max',
'default',
'type',
'nillable',
'form_qualified',):
if getattr(self, n) is not None:
continue
v = getattr(other, n)
if v is None:
continue
setattr(self, n, v)
def content(self, collection=None, filter=Filter(), history=None):
"""
Get a I{flattened} list of this nodes contents.
@param collection: A list to fill.
@type collection: list
@param filter: A filter used to constrain the result.
@type filter: L{Filter}
@param history: The history list used to prevent cyclic dependency.
@type history: list
@return: The filled list.
@rtype: list
"""
if collection is None:
collection = []
if history is None:
history = []
if self in history:
return collection
history.append(self)
if self in filter:
collection.append(self)
for c in self.rawchildren:
c.content(collection, filter, history[:])
return collection
def str(self, indent=0, history=None):
"""
Get a string representation of this object.
@param indent: The indent.
@type indent: int
@return: A string.
@rtype: str
"""
if history is None:
history = []
if self in history:
return '%s ...' % Repr(self)
history.append(self)
tab = '%*s'%(indent*3, '')
result = []
result.append('%s<%s' % (tab, self.id))
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
result.append(' %s="%s"' % (n, v))
if len(self):
result.append('>')
for c in self.rawchildren:
result.append('\n')
result.append(c.str(indent+1, history[:]))
if c.isattr():
result.append('@')
result.append('\n%s' % tab)
result.append('</%s>' % self.__class__.__name__)
else:
result.append(' />')
return ''.join(result)
def description(self):
"""
Get the names used for str() and repr() description.
@return: A dictionary of relavent attributes.
@rtype: [str,...]
"""
return ()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.str())
def __repr__(self):
s = []
s.append('<%s' % self.id)
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
s.append(' %s="%s"' % (n, v))
s.append(' />')
myrep = ''.join(s)
return myrep.encode('utf-8')
def __len__(self):
n = 0
for x in self: n += 1
return n
def __iter__(self):
return Iter(self)
def __getitem__(self, index):
i = 0
for c in self:
if i == index:
return c
class Iter:
"""
The content iterator - used to iterate the L{Content} children. The iterator
provides a I{view} of the children that is free of container elements
such as <sequence/> and <choice/>.
@ivar stack: A stack used to control nesting.
@type stack: list
"""
class Frame:
""" A content iterator frame. """
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.sx = sx
self.items = sx.rawchildren
self.index = 0
def next(self):
"""
Get the I{next} item in the frame's collection.
@return: The next item or None
@rtype: L{SchemaObject}
"""
if self.index < len(self.items):
result = self.items[self.index]
self.index += 1
return result
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.stack = []
self.push(sx)
def push(self, sx):
"""
Create a frame and push the specified object.
@param sx: A schema object to push.
@type sx: L{SchemaObject}
"""
self.stack.append(Iter.Frame(sx))
def pop(self):
"""
Pop the I{top} frame.
@return: The popped frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack.pop()
else:
raise StopIteration()
def top(self):
"""
Get the I{top} frame.
@return: The top frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack[-1]
else:
raise StopIteration()
def next(self):
"""
Get the next item.
@return: A tuple: the next (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
@raise StopIteration: A the end.
"""
frame = self.top()
while True:
result = frame.next()
if result is None:
self.pop()
return self.next()
if isinstance(result, Content):
ancestry = [f.sx for f in self.stack]
return (result, ancestry)
self.push(result)
return self.next()
def __iter__(self):
return self
class XBuiltin(SchemaObject):
"""
Represents an (xsd) schema <xs:*/> node
"""
def __init__(self, schema, name):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
"""
root = Element(name)
SchemaObject.__init__(self, schema, root)
self.name = name
self.nillable = True
def namespace(self, prefix=None):
return Namespace.xsdns
def builtin(self):
return True
def resolve(self, nobuiltin=False):
return self
class Content(SchemaObject):
"""
This class represents those schema objects that represent
real XML document content.
"""
pass
class NodeFinder:
"""
Find nodes based on flexable criteria. The I{matcher} is
may be any object that implements a match(n) method.
@ivar matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@ivar limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
def __init__(self, matcher, limit=0):
"""
@param matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@param limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
self.matcher = matcher
self.limit = limit
def find(self, node, list):
"""
Traverse the tree looking for matches.
@param node: A node to match on.
@type node: L{SchemaObject}
@param list: A list to fill.
@type list: list
"""
if self.matcher.match(node):
list.append(node)
self.limit -= 1
if self.limit == 0:
return
for c in node.rawchildren:
self.find(c, list)
return self | gpl-3.0 |
hackerbot/DjangoDev | tests/gis_tests/geo3d/models.py | 302 | 1294 | from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City3D(NamedModel):
point = models.PointField(dim=3)
class Interstate2D(NamedModel):
line = models.LineStringField(srid=4269)
class Interstate3D(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class InterstateProj2D(NamedModel):
line = models.LineStringField(srid=32140)
class InterstateProj3D(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Polygon2D(NamedModel):
poly = models.PolygonField(srid=32140)
class Polygon3D(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
class Point2D(SimpleModel):
point = models.PointField()
class Point3D(SimpleModel):
point = models.PointField(dim=3)
class MultiPoint3D(SimpleModel):
mpoint = models.MultiPointField(dim=3)
| bsd-3-clause |
0111001101111010/cs595-f13 | assignment5/Q2/q2.py | 1 | 2515 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
import json
REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize?oauth_token="
ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
CONSUMER_KEY = "k9gkxQR2WFpxhrCyQ8pzpw"
CONSUMER_SECRET = "HIIRWNeuIMTKkkgZObFb6DA97PD4zwXJOby51st69Bc"
OAUTH_TOKEN = "299932017-u8jkuCzc4pPlLHt4qzA0KnbbFnJdzANm4S3wfDf4"
OAUTH_TOKEN_SECRET = "rSBprorXEmpHzQoqBjYxqDS7fIEP2HOMDr7L9ASo"
def setup_oauth():
"""Authorize your app via identifier."""
# Request token
oauth = OAuth1(CONSUMER_KEY, client_secret=CONSUMER_SECRET)
r = requests.post(url=REQUEST_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
resource_owner_key = credentials.get('oauth_token')[0]
resource_owner_secret = credentials.get('oauth_token_secret')[0]
# Authorize
authorize_url = AUTHORIZE_URL + resource_owner_key
print 'Please go here and authorize: ' + authorize_url
verifier = raw_input('Please input the verifier: ')
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
# Finally, Obtain the Access Token
r = requests.post(url=ACCESS_TOKEN_URL, auth=oauth)
credentials = parse_qs(r.content)
token = credentials.get('oauth_token')[0]
secret = credentials.get('oauth_token_secret')[0]
return token, secret
def get_oauth():
oauth = OAuth1(CONSUMER_KEY,
client_secret=CONSUMER_SECRET,
resource_owner_key=OAUTH_TOKEN,
resource_owner_secret=OAUTH_TOKEN_SECRET)
return oauth
if __name__ == "__main__":
if not OAUTH_TOKEN:
token, secret = setup_oauth()
print "OAUTH_TOKEN: " + token
print "OAUTH_TOKEN_SECRET: " + secret
print
else:
oauth = get_oauth()
urls = list()
f = open('followers.csv', 'w')
r = requests.get(url="https://api.twitter.com/1.1/followers/list.json?cursor=-1&screen_name=stanzheng&skip_status=true&include_user_entities=false&count=350", auth=oauth)
r = r.json()
for followers in r["users"]:
bob =f.write(str(followers['screen_name']) + ',' + str(followers['followers_count']) + '\n') | mit |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/python/gnuradio/gr/qa_vector_sink_source.py | 18 | 2014 | #!/usr/bin/env python
#
# Copyright 2008,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import math
class test_vector_sink_source(gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_001(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
src = gr.vector_source_f(src_data)
dst = gr.vector_sink_f()
self.tb.connect(src, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
src = gr.vector_source_f(src_data, False, 2)
dst = gr.vector_sink_f(2)
self.tb.connect(src, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_003(self):
src_data = [float(x) for x in range(16)]
expected_result = tuple(src_data)
self.assertRaises(ValueError, lambda : gr.vector_source_f(src_data, False, 3))
if __name__ == '__main__':
gr_unittest.run(test_vector_sink_source, "test_vector_sink_source.xml")
| gpl-3.0 |
h0tw1r3/kernel_samsung_sghi717 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
blrm/openshift-tools | ansible/roles/lib_ops_utils/library/yum_repo_exclude.py | 8 | 4559 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
See Ansible Module Documentation (Below)
'''
import re
import iniparse
DOCUMENTATION = '''
---
module: yum_repo_exclude
short_description: manage packages on a YUM repo's exclude line
description:
- Add package names or patterns to a YUM repository configuration's exclude line
options:
name=dict(required=True),
repo=dict(required=True),
patterns=dict(required=True, type='list'),
state=dict(required=False, default='present', choices=['present', 'absent']),
name:
description:
- Filename where the repository configuration exists
required: true
state:
description:
- One of 'present', 'absent'. If 'present', patterns are added (if necessary). If 'absent', patterns are removed (if necessary).
required: true
default: present
repo:
description:
- The name of the repository
required: true
patterns:
description:
- A list of package names and/or package patterns
required: true
author:
- "Joel Smith (joelsmith@redhat.com)"
'''
EXAMPLES = '''
tasks:
- name: Don't install foo from repo bar
yum_repo_exclude:
name: /etc/yum.repos.d/bar.repo
repo: bar
patterns: [ foo ]
- name: Stop excluding baz and qux-* from repo bar
yum_repo_exclude:
name: /etc/yum.repos.d/bar.repo
repo: bar
patterns: [ baz, qux-* ]
state: absent
'''
class YumRepoExcludeError(Exception):
'''All YumRepoExclude methods throw this exception when errors occur'''
def __init__(self, msg):
super(YumRepoExcludeError, self).__init__(msg)
self.msg = msg
class YumRepoExclude(object):
'''A YUM repo's exclude option'''
def __init__(self, filename, repo):
'''Create an exclude'''
self.filename = filename
self.repo = repo
def get(self):
'''Get the current exclude value'''
ini = None
with open(self.filename) as repofile:
ini = iniparse.INIConfig(repofile)
repoobj = ini[self.repo]
if not getattr(repoobj, "__getitem__", None):
raise YumRepoExcludeError("Repository {} not found in file {}".format(self.repo, self.filename))
current = repoobj["exclude"]
if getattr(current, "__getitem__", None):
return re.split(r'\s+', current)
return list()
def set(self, patterns):
'''Update the exclude value'''
with open(self.filename, 'r+') as repofile:
ini = iniparse.INIConfig(repofile)
repoobj = ini[self.repo]
if not getattr(repoobj, "__getitem__", None):
raise YumRepoExcludeError("Repository {} not found in file {}".format(self.repo, self.filename))
repoobj["exclude"] = " ".join(patterns)
repofile.seek(0)
repofile.write(re.sub(r'^exclude += +', 'exclude=', str(ini), flags=re.M))
repofile.truncate()
def main():
'''Ansible module to add/remove packages or patterns from a YUM repo's exclude line'''
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
repo=dict(required=True),
patterns=dict(required=True, type='list'),
state=dict(required=False, default='present', choices=['present', 'absent']),
),
supports_check_mode=True
)
name = module.params['name']
repo = module.params['repo']
patterns = module.params['patterns']
state = module.params['state']
changed = False
yumrepo = YumRepoExclude(name, repo)
try:
current = set(yumrepo.get())
if state == 'absent':
expected = current - set(patterns)
elif state == 'present':
expected = current | set(patterns)
if current != expected:
yumrepo.set(expected)
current = set(yumrepo.get())
if current == expected:
changed = True
else:
module.fail_json(msg="Update to repo {} from {} failed. Expected {}, got {}".format(repo, name,
expected, current))
except YumRepoExcludeError as ex:
module.fail_json(msg=ex.msg)
return module.exit_json(changed=changed)
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| apache-2.0 |
angad/libjingle-mac | scons-2.2.0/build/lib/SCons/Tool/c++.py | 14 | 3416 | """SCons.Tool.c++
Tool-specific initialization for generic Posix C++ compilers.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/c++.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import os.path
import SCons.Tool
import SCons.Defaults
import SCons.Util
compilers = ['CC', 'c++']
CXXSuffixes = ['.cpp', '.cc', '.cxx', '.c++', '.C++', '.mm']
if SCons.Util.case_sensitive_suffixes('.c', '.C'):
CXXSuffixes.append('.C')
def iscplusplus(source):
if not source:
# Source might be None for unusual cases like SConf.
return 0
for s in source:
if s.sources:
ext = os.path.splitext(str(s.sources[0]))[1]
if ext in CXXSuffixes:
return 1
return 0
def generate(env):
"""
Add Builders and construction variables for Visual Age C++ compilers
to an Environment.
"""
import SCons.Tool
import SCons.Tool.cc
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter)
shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter)
SCons.Tool.cc.add_common_cc_variables(env)
env['CXX'] = 'c++'
env['CXXFLAGS'] = SCons.Util.CLVar('')
env['CXXCOM'] = '$CXX -o $TARGET -c $CXXFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
env['SHCXXCOM'] = '$SHCXX -o $TARGET -c $SHCXXFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
env['SHOBJSUFFIX'] = '.os'
env['OBJSUFFIX'] = '.o'
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0
env['CXXFILESUFFIX'] = '.cc'
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| bsd-3-clause |
cheekiatng/titanium_mobile | support/common/simplejson/tests/test_unicode.py | 123 | 2327 | from unittest import TestCase
import simplejson as json
class TestUnicode(TestCase):
def test_encoding1(self):
encoder = json.JSONEncoder(encoding='utf-8')
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = encoder.encode(u)
js = encoder.encode(s)
self.assertEquals(ju, js)
def test_encoding2(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
s = u.encode('utf-8')
ju = json.dumps(u, encoding='utf-8')
js = json.dumps(s, encoding='utf-8')
self.assertEquals(ju, js)
def test_encoding3(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u)
self.assertEquals(j, '"\\u03b1\\u03a9"')
def test_encoding4(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u])
self.assertEquals(j, '["\\u03b1\\u03a9"]')
def test_encoding5(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps(u, ensure_ascii=False)
self.assertEquals(j, u'"%s"' % (u,))
def test_encoding6(self):
u = u'\N{GREEK SMALL LETTER ALPHA}\N{GREEK CAPITAL LETTER OMEGA}'
j = json.dumps([u], ensure_ascii=False)
self.assertEquals(j, u'["%s"]' % (u,))
def test_big_unicode_encode(self):
u = u'\U0001d120'
self.assertEquals(json.dumps(u), '"\\ud834\\udd20"')
self.assertEquals(json.dumps(u, ensure_ascii=False), u'"\U0001d120"')
def test_big_unicode_decode(self):
u = u'z\U0001d120x'
self.assertEquals(json.loads('"' + u + '"'), u)
self.assertEquals(json.loads('"z\\ud834\\udd20x"'), u)
def test_unicode_decode(self):
for i in range(0, 0xd7ff):
u = unichr(i)
s = '"\\u%04x"' % (i,)
self.assertEquals(json.loads(s), u)
def test_default_encoding(self):
self.assertEquals(json.loads(u'{"a": "\xe9"}'.encode('utf-8')),
{'a': u'\xe9'})
def test_unicode_preservation(self):
self.assertEquals(type(json.loads(u'""')), unicode)
self.assertEquals(type(json.loads(u'"a"')), unicode)
self.assertEquals(type(json.loads(u'["a"]')[0]), unicode) | apache-2.0 |
hujiajie/chromium-crosswalk | testing/scripts/webkit_python_tests.py | 57 | 1128 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
sys.executable,
os.path.join(common.SRC_DIR, 'third_party', 'WebKit',
'Tools', 'Scripts', 'test-webkitpy'),
'--write-full-results-to', tempfile_path,
])
with open(tempfile_path) as f:
results = json.load(f)
parsed_results = common.parse_common_test_results(results)
failures = parsed_results['unexpected_failures']
json.dump({
'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
((rc == 0) or failures)),
'failures': failures.keys(),
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| bsd-3-clause |
hustodemon/spacewalk | client/rhel/rhn-client-tools/src/up2date_client/rpcServer.py | 2 | 10239 | #
import os
import sys
import config
import socket
import time
import httplib
import urllib2
import clientCaps
import up2dateLog
import up2dateErrors
import up2dateUtils
import up2dateAuth
import urlparse
import xmlrpclib
from rhn import rpclib
import gettext
t = gettext.translation('rhn-client-tools', fallback=True)
_ = t.ugettext
def stdoutMsgCallback(msg):
print msg
class RetryServer(rpclib.Server):
def addServerList(self, serverList):
self.serverList = serverList
def _request1(self, methodname, params):
self.log = up2dateLog.initLog()
while 1:
try:
ret = self._request(methodname, params)
except rpclib.InvalidRedirectionError:
raise
except xmlrpclib.Fault:
raise
except httplib.BadStatusLine:
self.log.log_me("Error: Server Unavailable. Please try later.")
stdoutMsgCallback(
_("Error: Server Unavailable. Please try later."))
sys.exit(-1)
except:
server = self.serverList.next()
if server == None:
# since just because we failed, the server list could
# change (aka, firstboot, they get an option to reset the
# the server configuration) so reset the serverList
self.serverList.resetServerIndex()
raise
msg = "An error occurred talking to %s:\n" % self._host
msg = msg + "%s\n%s\n" % (sys.exc_type, sys.exc_value)
msg = msg + "Trying the next serverURL: %s\n" % self.serverList.server()
self.log.log_me(msg)
# try a different url
# use the next serverURL
import urllib
typ, uri = urllib.splittype(self.serverList.server())
typ = typ.lower()
if typ not in ("http", "https"):
raise rpclib.InvalidRedirectionError(
"Redirected to unsupported protocol %s" % typ), None, sys.exc_info()[2]
self._host, self._handler = urllib.splithost(uri)
self._orig_handler = self._handler
self._type = typ
self._uri = self.serverList.server()
if not self._handler:
self._handler = "/RPC2"
self._allow_redirect = 1
continue
# if we get this far, we succedded
break
return ret
def __getattr__(self, name):
# magic method dispatcher
return rpclib.xmlrpclib._Method(self._request1, name)
# uh, yeah, this could be an iterator, but we need it to work on 1.5 as well
class ServerList:
def __init__(self, serverlist=[]):
self.serverList = serverlist
self.index = 0
def server(self):
self.serverurl = self.serverList[self.index]
return self.serverurl
def next(self):
self.index = self.index + 1
if self.index >= len(self.serverList):
return None
return self.server()
def resetServerIndex(self):
self.index = 0
def getServer(refreshCallback=None, serverOverride=None, timeout=None):
log = up2dateLog.initLog()
cfg = config.initUp2dateConfig()
# Where do we keep the CA certificate for RHNS?
# The servers we're talking to need to have their certs
# signed by one of these CA.
ca = cfg["sslCACert"]
if isinstance(ca, basestring):
ca = [ca]
rhns_ca_certs = ca or ["/usr/share/rhn/RHNS-CA-CERT"]
if cfg["enableProxy"]:
proxyHost = config.getProxySetting()
else:
proxyHost = None
if not serverOverride:
serverUrls = config.getServerlURL()
else:
serverUrls = serverOverride
serverList = ServerList(serverUrls)
proxyUser = None
proxyPassword = None
if cfg["enableProxyAuth"]:
proxyUser = cfg["proxyUser"] or None
proxyPassword = cfg["proxyPassword"] or None
lang = None
for env in 'LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG':
if os.environ.has_key(env):
if not os.environ[env]:
# sometimes unset
continue
lang = os.environ[env].split(':')[0]
lang = lang.split('.')[0]
break
s = RetryServer(serverList.server(),
refreshCallback=refreshCallback,
proxy=proxyHost,
username=proxyUser,
password=proxyPassword,
timeout=timeout)
s.addServerList(serverList)
s.add_header("X-Up2date-Version", up2dateUtils.version())
if lang:
s.setlang(lang)
# require RHNS-CA-CERT file to be able to authenticate the SSL connections
need_ca = [ True for i in s.serverList.serverList
if urlparse.urlparse(i)[0] == 'https']
if need_ca:
for rhns_ca_cert in rhns_ca_certs:
if not os.access(rhns_ca_cert, os.R_OK):
msg = "%s: %s" % (_("ERROR: can not find RHNS CA file"),
rhns_ca_cert)
log.log_me("%s" % msg)
raise up2dateErrors.SSLCertificateFileNotFound(msg)
# force the validation of the SSL cert
s.add_trusted_cert(rhns_ca_cert)
clientCaps.loadLocalCaps()
# send up the capabality info
headerlist = clientCaps.caps.headerFormat()
for (headerName, value) in headerlist:
s.add_header(headerName, value)
return s
def doCall(method, *args, **kwargs):
log = up2dateLog.initLog()
log.log_debug("rpcServer: Calling XMLRPC %s" % method.__dict__['_Method__name'])
cfg = config.initUp2dateConfig()
ret = None
attempt_count = 1
try:
attempts = int(cfg["networkRetries"])
except ValueError:
attempts = 1
if attempts <= 0:
attempts = 1
while 1:
failure = 0
ret = None
try:
ret = method(*args, **kwargs)
except KeyboardInterrupt:
raise up2dateErrors.CommunicationError(_(
"Connection aborted by the user")), None, sys.exc_info()[2]
# if we get a socket error, keep tryingx2
except (socket.error, socket.sslerror), e:
log.log_me("A socket error occurred: %s, attempt #%s" % (
e, attempt_count))
if attempt_count >= attempts:
if len(e.args) > 1:
raise up2dateErrors.CommunicationError(e.args[1]), None, sys.exc_info()[2]
else:
raise up2dateErrors.CommunicationError(e.args[0]), None, sys.exc_info()[2]
else:
failure = 1
except httplib.IncompleteRead:
print "httplib.IncompleteRead"
raise up2dateErrors.CommunicationError("httplib.IncompleteRead"), None, sys.exc_info()[2]
except urllib2.HTTPError, e:
msg = "\nAn HTTP error occurred:\n"
msg = msg + "URL: %s\n" % e.filename
msg = msg + "Status Code: %s\n" % e.code
msg = msg + "Error Message: %s\n" % e.msg
log.log_me(msg)
raise up2dateErrors.CommunicationError(msg), None, sys.exc_info()[2]
except xmlrpclib.ProtocolError, e:
log.log_me("A protocol error occurred: %s , attempt #%s," % (
e.errmsg, attempt_count))
if e.errcode == 404:
log.log_me("Could not find URL, %s" % (e.url))
log.log_me("Check server name and/or URL, then retry\n");
(errCode, errMsg) = rpclib.reportError(e.headers)
reset = 0
if abs(errCode) == 34:
log.log_me("Auth token timeout occurred\n errmsg: %s" % errMsg)
# this calls login, which in tern calls doCall (ie,
# this function) but login should never get a 34, so
# should be safe from recursion
up2dateAuth.updateLoginInfo()
# the servers are being throttle to pay users only, catch the
# exceptions and display a nice error message
if abs(errCode) == 51:
log.log_me(_("Server has refused connection due to high load"))
raise up2dateErrors.CommunicationError(e.errmsg), None, sys.exc_info()[2]
# if we get a 404 from our server, thats pretty
# fatal... no point in retrying over and over. Note that
# errCode == 17 is specific to our servers, if the
# serverURL is just pointing somewhere random they will
# get a 0 for errcode and will raise a CommunicationError
if abs(errCode) == 17:
#in this case, the args are the package string, so lets try to
# build a useful error message
if type(args[0]) == type([]):
pkg = args[0]
else:
pkg=args[1]
if type(pkg) == type([]):
pkgName = "%s-%s-%s.%s" % (pkg[0], pkg[1], pkg[2], pkg[4])
else:
pkgName = pkg
msg = "File Not Found: %s\n%s" % (pkgName, errMsg)
log.log_me(msg)
raise up2dateErrors.FileNotFoundError(msg), None, sys.exc_info()[2]
if not reset:
if attempt_count >= attempts:
raise up2dateErrors.CommunicationError(e.errmsg), None, sys.exc_info()[2]
else:
failure = 1
except xmlrpclib.ResponseError:
raise up2dateErrors.CommunicationError(
"Broken response from the server."), None, sys.exc_info()[2]
if ret != None:
break
else:
failure = 1
if failure:
# rest for five seconds before trying again
time.sleep(5)
attempt_count = attempt_count + 1
if attempt_count > attempts:
raise up2dateErrors.CommunicationError("The data returned from the server was incomplete")
return ret
| gpl-2.0 |
benpetty/Code-Katas | katas/cafe_order_checker/test_cafe_order_checker.py | 1 | 1249 | import unittest
from .cafe_order_checker import is_first_come_first_served
class Test(unittest.TestCase):
def test_both_registers_have_same_number_of_orders(self):
result = is_first_come_first_served([1, 4, 5], [2, 3, 6], [1, 2, 3, 4, 5, 6])
self.assertTrue(result)
def test_registers_have_different_lengths(self):
result = is_first_come_first_served([1, 5], [2, 3, 6], [1, 2, 6, 3, 5])
self.assertFalse(result)
def test_one_register_is_empty(self):
result = is_first_come_first_served([], [2, 3, 6], [2, 3, 6])
self.assertTrue(result)
def test_served_orders_is_missing_orders(self):
result = is_first_come_first_served([1, 5], [2, 3, 6], [1, 6, 3, 5])
self.assertFalse(result)
def test_served_orders_has_extra_orders(self):
result = is_first_come_first_served([1, 5], [2, 3, 6], [1, 2, 3, 5, 6, 8])
self.assertFalse(result)
def test_one_register_has_extra_orders(self):
result = is_first_come_first_served([1, 9], [7, 8], [1, 7, 8])
self.assertFalse(result)
def test_one_register_has_unserved_orders(self):
result = is_first_come_first_served([55, 9], [7, 8], [1, 7, 8, 9])
self.assertFalse(result)
| mit |
danieljaouen/ansible | lib/ansible/modules/network/f5/bigip_selfip.py | 8 | 23297 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_selfip
short_description: Manage Self-IPs on a BIG-IP system
description:
- Manage Self-IPs on a BIG-IP system.
version_added: 2.2
options:
address:
description:
- The IP addresses for the new self IP. This value is ignored upon update
as addresses themselves cannot be changed after they are created.
- This value is required when creating new self IPs.
allow_service:
description:
- Configure port lockdown for the Self IP. By default, the Self IP has a
"default deny" policy. This can be changed to allow TCP and UDP ports
as well as specific protocols. This list should contain C(protocol):C(port)
values.
name:
description:
- The self IP to create.
- If this parameter is not specified, then it will default to the value supplied
in the C(address) parameter.
required: True
netmask:
description:
- The netmask for the self IP. When creating a new Self IP, this value
is required.
state:
description:
- When C(present), guarantees that the Self-IP exists with the provided
attributes.
- When C(absent), removes the Self-IP from the system.
default: present
choices:
- absent
- present
traffic_group:
description:
- The traffic group for the Self IP addresses in an active-active,
redundant load balancer configuration. When creating a new Self IP, if
this value is not specified, the default of C(/Common/traffic-group-local-only)
will be used.
vlan:
description:
- The VLAN that the new self IPs will be on. When creating a new Self
IP, this value is required.
route_domain:
description:
- The route domain id of the system. When creating a new Self IP, if
this value is not specified, a default value of C(0) will be used.
- This value cannot be changed after it is set.
version_added: 2.3
partition:
description:
- Device partition to manage resources on. You can set different partitions
for Self IPs, but the address used may not match any other address used
by a Self IP. In that sense, Self IPs are not isolated by partitions as
other resources on a BIG-IP are.
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create Self IP
bigip_selfip:
address: 10.10.10.10
name: self1
netmask: 255.255.255.0
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
vlan: vlan1
delegate_to: localhost
- name: Create Self IP with a Route Domain
bigip_selfip:
server: lb.mydomain.com
user: admin
password: secret
validate_certs: no
name: self1
address: 10.10.10.10
netmask: 255.255.255.0
vlan: vlan1
route_domain: 10
allow_service: default
delegate_to: localhost
- name: Delete Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
delegate_to: localhost
- name: Allow management web UI to be accessed on this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:443
delegate_to: localhost
- name: Allow HTTPS and SSH access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:443
- tcp:22
delegate_to: localhost
- name: Allow all services access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- all
delegate_to: localhost
- name: Allow only GRE and IGMP protocols access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- gre:0
- igmp:0
delegate_to: localhost
- name: Allow all TCP, but no other protocols access to this Self IP
bigip_selfip:
name: self1
password: secret
server: lb.mydomain.com
state: absent
user: admin
validate_certs: no
allow_service:
- tcp:0
delegate_to: localhost
'''
RETURN = r'''
allow_service:
description: Services that allowed via this Self IP
returned: changed
type: list
sample: ['igmp:0','tcp:22','udp:53']
address:
description: The address for the Self IP
returned: changed
type: string
sample: 192.0.2.10
name:
description: The name of the Self IP
returned: created
type: string
sample: self1
netmask:
description: The netmask of the Self IP
returned: changed
type: string
sample: 255.255.255.0
traffic_group:
description: The traffic group that the Self IP is a member of
returned: changed
type: string
sample: traffic-group-local-only
vlan:
description: The VLAN set on the Self IP
returned: changed
type: string
sample: vlan1
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.ipaddress import is_valid_ip
from library.module_utils.network.f5.ipaddress import ipv6_netmask_to_cidr
from library.module_utils.compat.ipaddress import ip_address
from library.module_utils.compat.ipaddress import ip_network
from library.module_utils.compat.ipaddress import ip_interface
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
from ansible.module_utils.network.f5.ipaddress import ipv6_netmask_to_cidr
from ansible.module_utils.compat.ipaddress import ip_address
from ansible.module_utils.compat.ipaddress import ip_network
from ansible.module_utils.compat.ipaddress import ip_interface
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'trafficGroup': 'traffic_group',
'allowService': 'allow_service'
}
updatables = [
'traffic_group', 'allow_service', 'vlan', 'netmask', 'address'
]
returnables = [
'traffic_group', 'allow_service', 'vlan', 'route_domain', 'netmask', 'address'
]
api_attributes = [
'trafficGroup', 'allowService', 'vlan', 'address'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
@property
def vlan(self):
if self._values['vlan'] is None:
return None
return fq_name(self.partition, self._values['vlan'])
class ModuleParameters(Parameters):
@property
def address(self):
address = "{0}%{1}/{2}".format(
self.ip, self.route_domain, self.netmask
)
return address
@property
def ip(self):
if self._values['address'] is None:
return None
if is_valid_ip(self._values['address']):
return self._values['address']
else:
raise F5ModuleError(
'The provided address is not a valid IP address'
)
@property
def traffic_group(self):
if self._values['traffic_group'] is None:
return None
return fq_name(self.partition, self._values['traffic_group'])
@property
def route_domain(self):
if self._values['route_domain'] is None:
return None
result = int(self._values['route_domain'])
return result
@property
def netmask(self):
if self._values['netmask'] is None:
return None
result = -1
try:
result = int(self._values['netmask'])
if 0 < result < 256:
pass
except ValueError:
if is_valid_ip(self._values['netmask']):
addr = ip_address(u'{0}'.format(str(self._values['netmask'])))
if addr.version == 4:
ip = ip_network(u'0.0.0.0/%s' % str(self._values['netmask']))
result = ip.prefixlen
else:
result = ipv6_netmask_to_cidr(self._values['netmask'])
if result < 0:
raise F5ModuleError(
'The provided netmask {0} is neither in IP or CIDR format'.format(result)
)
return result
@property
def allow_service(self):
"""Verifies that a supplied service string has correct format
The string format for port lockdown is PROTOCOL:PORT. This method
will verify that the provided input matches the allowed protocols
and the port ranges before submitting to BIG-IP.
The only allowed exceptions to this rule are the following values
* all
* default
* none
These are special cases that are handled differently in the API.
"all" is set as a string, "default" is set as a one item list, and
"none" removes the key entirely from the REST API.
:raises F5ModuleError:
"""
if self._values['allow_service'] is None:
return None
result = []
allowed_protocols = [
'eigrp', 'egp', 'gre', 'icmp', 'igmp', 'igp', 'ipip',
'l2tp', 'ospf', 'pim', 'tcp', 'udp'
]
special_protocols = [
'all', 'none', 'default'
]
for svc in self._values['allow_service']:
if svc in special_protocols:
result = [svc]
break
elif svc in allowed_protocols:
full_service = '{0}:0'.format(svc)
result.append(full_service)
else:
tmp = svc.split(':')
if tmp[0] not in allowed_protocols:
raise F5ModuleError(
"The provided protocol '%s' is invalid" % (tmp[0])
)
try:
port = int(tmp[1])
except Exception:
raise F5ModuleError(
"The provided port '%s' is not a number" % (tmp[1])
)
if port < 0 or port > 65535:
raise F5ModuleError(
"The provided port '{0}' must be between 0 and 65535".format(port)
)
else:
result.append(svc)
result = sorted(list(set(result)))
return result
class ApiParameters(Parameters):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == 'all':
self._values['allow_service'] = ['all']
return sorted(self._values['allow_service'])
@property
def destination_ip(self):
if self._values['address'] is None:
return None
try:
pattern = r'(?P<rd>%[0-9]+)'
addr = re.sub(pattern, '', self._values['address'])
ip = ip_interface(u'{0}'.format(addr))
return ip.with_prefixlen
except ValueError:
raise F5ModuleError(
"The provided destination is not an IP address"
)
@property
def netmask(self):
ip = ip_interface(self.destination_ip)
return int(ip.network.prefixlen)
@property
def ip(self):
result = ip_interface(self.destination_ip)
return str(result.ip)
class Changes(Parameters):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def address(self):
return None
@property
def allow_service(self):
"""Returns services formatted for consumption by f5-sdk update
The BIG-IP endpoint for services takes different values depending on
what you want the "allowed services" to be. It can be any of the
following
- a list containing "protocol:port" values
- the string "all"
- a null value, or None
This is a convenience function to massage the values the user has
supplied so that they are formatted in such a way that BIG-IP will
accept them and apply the specified policy.
"""
if self.want.allow_service is None:
return None
result = self.want.allow_service
if result[0] == 'none' and self.have.allow_service is None:
return None
elif self.have.allow_service is None:
return result
elif result[0] == 'all' and self.have.allow_service[0] != 'all':
return ['all']
elif result[0] == 'none':
return []
elif set(self.want.allow_service) != set(self.have.allow_service):
return result
@property
def netmask(self):
if self.want.netmask is None:
return None
ip = self.have.ip
if is_valid_ip(ip):
if self.want.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.want.route_domain, self.have.netmask)
elif self.have.route_domain is not None:
want = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.want.netmask)
have = "{0}%{1}/{2}".format(ip, self.have.route_domain, self.have.netmask)
else:
want = "{0}/{1}".format(ip, self.want.netmask)
have = "{0}/{1}".format(ip, self.have.netmask)
if want != have:
return want
else:
raise F5ModuleError(
'The provided address/netmask value "{0}" was invalid'.format(self.have.ip)
)
@property
def traffic_group(self):
if self.want.traffic_group != self.have.traffic_group:
return self.want.traffic_group
class UsableChanges(Changes):
@property
def allow_service(self):
if self._values['allow_service'] is None:
return None
if self._values['allow_service'] == ['all']:
return 'all'
return sorted(self._values['allow_service'])
class ReportableChanges(Changes):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.have = None
self.want = ModuleParameters(params=self.module.params)
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = ApiParameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if k in ['netmask']:
changed['address'] = change
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def read_current_from_device(self):
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
params = ApiParameters(params=result)
return params
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def read_partition_default_route_domain_from_device(self):
resource = self.client.api.tm.auth.partitions.partition.load(name=self.want.partition)
return int(resource.defaultRouteDomain)
def create(self):
if self.want.address is None or self.want.netmask is None:
raise F5ModuleError(
'An address and a netmask must be specified'
)
if self.want.vlan is None:
raise F5ModuleError(
'A VLAN name must be specified'
)
if self.want.route_domain is None:
rd = self.read_partition_default_route_domain_from_device()
self.want.update({'route_domain': rd})
if self.want.traffic_group is None:
self.want.update({'traffic_group': '/Common/traffic-group-local-only'})
if self.want.route_domain is None:
self.want.update({'route_domain': 0})
if self.want.allow_service:
if 'all' in self.want.allow_service:
self.want.update(dict(allow_service=['all']))
elif 'none' in self.want.allow_service:
self.want.update(dict(allow_service=[]))
elif 'default' in self.want.allow_service:
self.want.update(dict(allow_service=['default']))
self._set_changed_options()
if self.want.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the Self IP")
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.net.selfips.selfip.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the Self IP")
return True
def remove_from_device(self):
resource = self.client.api.tm.net.selfips.selfip.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
def exists(self):
result = self.client.api.tm.net.selfips.selfip.exists(
name=self.want.name,
partition=self.want.partition
)
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
address=dict(),
allow_service=dict(type='list'),
name=dict(required=True),
netmask=dict(),
traffic_group=dict(),
vlan=dict(),
route_domain=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
bigswitch/nova | nova/tests/fixtures.py | 1 | 23692 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import logging as std_logging
import os
import warnings
import fixtures
import mock
from oslo_config import cfg
from oslo_db.sqlalchemy import enginefacade
from oslo_messaging import conffixture as messaging_conffixture
import six
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova.db import migration
from nova.db.sqlalchemy import api as session
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import service as service_obj
from nova import rpc
from nova import service
from nova.tests.functional.api import client
_TRUE_VALUES = ('True', 'true', '1', 'yes')
CONF = cfg.CONF
DB_SCHEMA = {'main': "", 'api': ""}
SESSION_CONFIGURED = False
class ServiceFixture(fixtures.Fixture):
"""Run a service as a test fixture."""
def __init__(self, name, host=None, **kwargs):
name = name
# If not otherwise specified, the host will default to the
# name of the service. Some things like aggregates care that
# this is stable.
host = host or name
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'nova-%s' % name)
self.kwargs = kwargs
def setUp(self):
super(ServiceFixture, self).setUp()
self.service = service.Service.create(**self.kwargs)
self.service.start()
self.addCleanup(self.service.kill)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
class OutputStreamCapture(fixtures.Fixture):
"""Capture output streams during tests.
This fixture captures errant printing to stderr / stdout during
the tests and lets us see those streams at the end of the test
runs instead. Useful to see what was happening during failed
tests.
"""
def setUp(self):
super(OutputStreamCapture, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
self.out = self.useFixture(fixtures.StringStream('stdout'))
self.useFixture(
fixtures.MonkeyPatch('sys.stdout', self.out.stream))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
self.err = self.useFixture(fixtures.StringStream('stderr'))
self.useFixture(
fixtures.MonkeyPatch('sys.stderr', self.err.stream))
@property
def stderr(self):
return self.err._details["stderr"].as_text()
@property
def stdout(self):
return self.out._details["stdout"].as_text()
class Timeout(fixtures.Fixture):
"""Setup per test timeouts.
In order to avoid test deadlocks we support setting up a test
timeout parameter read from the environment. In almost all
cases where the timeout is reached this means a deadlock.
A class level TIMEOUT_SCALING_FACTOR also exists, which allows
extremely long tests to specify they need more time.
"""
def __init__(self, timeout, scaling=1):
super(Timeout, self).__init__()
try:
self.test_timeout = int(timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
self.test_timeout = 0
if scaling >= 1:
self.test_timeout *= scaling
else:
raise ValueError('scaling value must be >= 1')
def setUp(self):
super(Timeout, self).setUp()
if self.test_timeout > 0:
self.useFixture(fixtures.Timeout(self.test_timeout, gentle=True))
class DatabasePoisonFixture(fixtures.Fixture):
def setUp(self):
super(DatabasePoisonFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'oslo_db.sqlalchemy.enginefacade._TransactionFactory.'
'_create_session',
self._poison_configure))
def _poison_configure(self, *a, **k):
warnings.warn('This test uses methods that set internal oslo_db '
'state, but it does not claim to use the database. '
'This will conflict with the setup of tests that '
'do use the database and cause failures later.')
return mock.MagicMock()
class Database(fixtures.Fixture):
def __init__(self, database='main', connection=None):
"""Create a database fixture.
:param database: The type of database, 'main' or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
# NOTE(pkholkin): oslo_db.enginefacade is configured in tests the same
# way as it is done for any other service that uses db
global SESSION_CONFIGURED
if not SESSION_CONFIGURED:
session.configure(CONF)
SESSION_CONFIGURED = True
self.database = database
if database == 'main':
if connection is not None:
ctxt_mgr = session.create_context_manager(
connection=connection)
facade = ctxt_mgr.get_legacy_facade()
self.get_engine = facade.get_engine
else:
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[self.database]:
engine = self.get_engine()
conn = engine.connect()
migration.db_sync(database=self.database)
DB_SCHEMA[self.database] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
self._cache_schema()
engine = self.get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[self.database])
def setUp(self):
super(Database, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DatabaseAtVersion(fixtures.Fixture):
def __init__(self, version, database='main'):
"""Create a database fixture.
:param version: Max version to sync to (or None for current)
:param database: The type of database, 'main' or 'api'
"""
super(DatabaseAtVersion, self).__init__()
self.database = database
self.version = version
if database == 'main':
self.get_engine = session.get_engine
elif database == 'api':
self.get_engine = session.get_api_engine
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
engine = self.get_engine()
engine.dispose()
engine.connect()
migration.db_sync(version=self.version, database=self.database)
def setUp(self):
super(DatabaseAtVersion, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class DefaultFlavorsFixture(fixtures.Fixture):
def setUp(self):
super(DefaultFlavorsFixture, self).setUp()
ctxt = context.get_admin_context()
defaults = {'rxtx_factor': 1.0, 'disabled': False, 'is_public': True,
'ephemeral_gb': 0, 'swap': 0}
default_flavors = [
objects.Flavor(context=ctxt, memory_mb=512, vcpus=1,
root_gb=1, flavorid='1', name='m1.tiny',
**defaults),
objects.Flavor(context=ctxt, memory_mb=2048, vcpus=1,
root_gb=20, flavorid='2', name='m1.small',
**defaults),
objects.Flavor(context=ctxt, memory_mb=4096, vcpus=2,
root_gb=40, flavorid='3', name='m1.medium',
**defaults),
objects.Flavor(context=ctxt, memory_mb=8192, vcpus=4,
root_gb=80, flavorid='4', name='m1.large',
**defaults),
objects.Flavor(context=ctxt, memory_mb=16384, vcpus=8,
root_gb=160, flavorid='5', name='m1.xlarge',
**defaults),
]
for flavor in default_flavors:
flavor.create()
class RPCFixture(fixtures.Fixture):
def __init__(self, *exmods):
super(RPCFixture, self).__init__()
self.exmods = []
self.exmods.extend(exmods)
def setUp(self):
super(RPCFixture, self).setUp()
self.addCleanup(rpc.cleanup)
rpc.add_extra_exmods(*self.exmods)
self.addCleanup(rpc.clear_extra_exmods)
self.messaging_conf = messaging_conffixture.ConfFixture(CONF)
self.messaging_conf.transport_driver = 'fake'
self.useFixture(self.messaging_conf)
rpc.init(CONF)
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
def setUp(self):
super(WarningsFixture, self).setUp()
# NOTE(sdague): Make deprecation warnings only happen once. Otherwise
# this gets kind of crazy given the way that upstream python libs use
# this.
warnings.simplefilter("once", DeprecationWarning)
warnings.filterwarnings('ignore',
message='With-statements now directly support'
' multiple context managers')
self.addCleanup(warnings.resetwarnings)
class ConfPatcher(fixtures.Fixture):
"""Fixture to patch and restore global CONF.
This also resets overrides for everything that is patched during
it's teardown.
"""
def __init__(self, **kwargs):
"""Constructor
:params group: if specified all config options apply to that group.
:params **kwargs: the rest of the kwargs are processed as a
set of key/value pairs to be set as configuration override.
"""
super(ConfPatcher, self).__init__()
self.group = kwargs.pop('group', None)
self.args = kwargs
def setUp(self):
super(ConfPatcher, self).setUp()
for k, v in six.iteritems(self.args):
self.addCleanup(CONF.clear_override, k, self.group)
CONF.set_override(k, v, self.group)
class OSAPIFixture(fixtures.Fixture):
"""Create an OS API server as a fixture.
This spawns an OS API server as a fixture in a new greenthread in
the current test. The fixture has a .api paramenter with is a
simple rest client that can communicate with it.
This fixture is extremely useful for testing REST responses
through the WSGI stack easily in functional tests.
Usage:
api = self.useFixture(fixtures.OSAPIFixture()).api
resp = api.api_request('/someurl')
self.assertEqual(200, resp.status_code)
resp = api.api_request('/otherurl', method='POST', body='{foo}')
The resp is a requests library response. Common attributes that
you'll want to use are:
- resp.status_code - integer HTTP status code returned by the request
- resp.content - the body of the response
- resp.headers - dictionary of HTTP headers returned
"""
def __init__(self, api_version='v2',
project_id='6f70656e737461636b20342065766572'):
"""Constructor
:param api_version: the API version that we're interested in
using. Currently this expects 'v2' or 'v2.1' as possible
options.
:param project_id: the project id to use on the API.
"""
super(OSAPIFixture, self).__init__()
self.api_version = api_version
self.project_id = project_id
def setUp(self):
super(OSAPIFixture, self).setUp()
# in order to run these in tests we need to bind only to local
# host, and dynamically allocate ports
conf_overrides = {
'osapi_compute_listen': '127.0.0.1',
'metadata_listen': '127.0.0.1',
'osapi_compute_listen_port': 0,
'metadata_listen_port': 0,
'verbose': True,
'debug': True
}
self.useFixture(ConfPatcher(**conf_overrides))
self.osapi = service.WSGIService("osapi_compute")
self.osapi.start()
self.addCleanup(self.osapi.stop)
self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': self.osapi.host, 'port': self.osapi.port,
'api_version': self.api_version})
self.api = client.TestOpenStackClient('fake', 'fake', self.auth_url,
self.project_id)
self.admin_api = client.TestOpenStackClient(
'admin', 'admin', self.auth_url, self.project_id)
class PoisonFunctions(fixtures.Fixture):
"""Poison functions so they explode if we touch them.
When running under a non full stack test harness there are parts
of the code that you don't want to go anywhere near. These include
things like code that spins up extra threads, which just
introduces races.
"""
def setUp(self):
super(PoisonFunctions, self).setUp()
# The nova libvirt driver starts an event thread which only
# causes trouble in tests. Make sure that if tests don't
# properly patch it the test explodes.
# explicit import because MonkeyPatch doesn't magic import
# correctly if we are patching a method on a class in a
# module.
import nova.virt.libvirt.host # noqa
def evloop(*args, **kwargs):
import sys
warnings.warn("Forgot to disable libvirt event thread")
sys.exit(1)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.Host._init_events',
evloop))
class IndirectionAPIFixture(fixtures.Fixture):
"""Patch and restore the global NovaObject indirection api."""
def __init__(self, indirection_api):
"""Constructor
:param indirection_api: the indirection API to be used for tests.
"""
super(IndirectionAPIFixture, self).__init__()
self.indirection_api = indirection_api
def cleanup(self):
obj_base.NovaObject.indirection_api = self.orig_indirection_api
def setUp(self):
super(IndirectionAPIFixture, self).setUp()
self.orig_indirection_api = obj_base.NovaObject.indirection_api
obj_base.NovaObject.indirection_api = self.indirection_api
self.addCleanup(self.cleanup)
class _FakeGreenThread(object):
def __init__(self, func, *args, **kwargs):
self._result = func(*args, **kwargs)
def cancel(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def kill(self, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def link(self, func, *args, **kwargs):
func(self, *args, **kwargs)
def unlink(self, func, *args, **kwargs):
# This method doesn't make sense for a synchronous call, it's just
# defined to satisfy the interface.
pass
def wait(self):
return self._result
class SpawnIsSynchronousFixture(fixtures.Fixture):
"""Patch and restore the spawn_n utility method to be synchronous"""
def setUp(self):
super(SpawnIsSynchronousFixture, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn_n', _FakeGreenThread))
self.useFixture(fixtures.MonkeyPatch(
'nova.utils.spawn', _FakeGreenThread))
class BannedDBSchemaOperations(fixtures.Fixture):
"""Ban some operations for migrations"""
def __init__(self, banned_resources=None):
super(BannedDBSchemaOperations, self).__init__()
self._banned_resources = banned_resources or []
@staticmethod
def _explode(resource, op):
raise exception.DBNotAllowed(
'Operation %s.%s() is not allowed in a database migration' % (
resource, op))
def setUp(self):
super(BannedDBSchemaOperations, self).setUp()
for thing in self._banned_resources:
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.drop' % thing,
lambda *a, **k: self._explode(thing, 'drop')))
self.useFixture(fixtures.MonkeyPatch(
'sqlalchemy.%s.alter' % thing,
lambda *a, **k: self._explode(thing, 'alter')))
class StableObjectJsonFixture(fixtures.Fixture):
"""Fixture that makes sure we get stable JSON object representations.
Since objects contain things like set(), which can't be converted to
JSON, we have some situations where the representation isn't fully
deterministic. This doesn't matter at all at runtime, but does to
unit tests that try to assert things at a low level.
This fixture mocks the obj_to_primitive() call and makes sure to
sort the list of changed fields (which came from a set) before
returning it to the caller.
"""
def __init__(self):
self._original_otp = obj_base.NovaObject.obj_to_primitive
def setUp(self):
super(StableObjectJsonFixture, self).setUp()
def _doit(obj, *args, **kwargs):
result = self._original_otp(obj, *args, **kwargs)
if 'nova_object.changes' in result:
result['nova_object.changes'].sort()
return result
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.base.NovaObject.obj_to_primitive', _doit))
class EngineFacadeFixture(fixtures.Fixture):
"""Fixture to isolation EngineFacade during tests.
Because many elements of EngineFacade are based on globals, once
an engine facade has been initialized, all future code goes
through it. This means that the initialization of sqlite in
databases in our Database fixture will drive all connections to
sqlite. While that's fine in a production environment, during
testing this means we can't test againts multiple backends in the
same test run.
oslo.db does not yet support a reset mechanism here. This builds a
custom in tree engine facade fixture to handle this. Eventually
this will be added to oslo.db and this can be removed. Tracked by
https://bugs.launchpad.net/oslo.db/+bug/1548960
"""
def __init__(self, ctx_manager, engine, sessionmaker):
super(EngineFacadeFixture, self).__init__()
self._ctx_manager = ctx_manager
self._engine = engine
self._sessionmaker = sessionmaker
def setUp(self):
super(EngineFacadeFixture, self).setUp()
self._existing_factory = self._ctx_manager._root_factory
self._ctx_manager._root_factory = enginefacade._TestTransactionFactory(
self._engine, self._sessionmaker, apply_global=False,
synchronous_reader=True)
self.addCleanup(self.cleanup)
def cleanup(self):
self._ctx_manager._root_factory = self._existing_factory
class ForbidNewLegacyNotificationFixture(fixtures.Fixture):
"""Make sure the test fails if new legacy notification is added"""
def __init__(self):
super(ForbidNewLegacyNotificationFixture, self).__init__()
self.notifier = rpc.LegacyValidatingNotifier
def setUp(self):
super(ForbidNewLegacyNotificationFixture, self).setUp()
self.notifier.fatal = True
# allow the special test value used in
# nova.tests.unit.test_notifications.NotificationsTestCase
self.notifier.allowed_legacy_notification_event_types.append(
'_decorated_function')
self.addCleanup(self.cleanup)
def cleanup(self):
self.notifier.fatal = False
self.notifier.allowed_legacy_notification_event_types.remove(
'_decorated_function')
class AllServicesCurrent(fixtures.Fixture):
def setUp(self):
super(AllServicesCurrent, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.objects.Service.get_minimum_version_multi',
self._fake_minimum))
compute_rpcapi.LAST_VERSION = None
def _fake_minimum(self, *args, **kwargs):
return service_obj.SERVICE_VERSION
| apache-2.0 |
dixon13/CBA_A3 | tools/make.py | 5 | 55615 | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
# make.py
# An Arma 3 addon build system
###############################################################################
# The MIT License (MIT)
# Copyright (c) 2013-2014 Ryan Schultz
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
__version__ = "0.7"
import sys
if sys.version_info[0] == 2:
print("Python 3 is required.")
sys.exit(1)
import os
import os.path
import shutil
import platform
import glob
import subprocess
import hashlib
import configparser
import json
import traceback
import time
import timeit
import re
from tempfile import mkstemp
if sys.platform == "win32":
import winreg
######## GLOBALS #########
project = "@CBA_A3"
project_version = "2.0.0"
arma3tools_path = ""
work_drive = ""
module_root = ""
make_root = ""
release_dir = ""
module_root_parent = ""
optionals_root = ""
key_name = "cba"
key = ""
dssignfile = ""
prefix = "cba"
pbo_name_prefix = "cba_"
signature_blacklist = []
importantFiles = ["mod.cpp", "meta.cpp", "README.md", "LICENSE.md", "logo_cba_ca.paa"]
versionFiles = ["mod.cpp"]
###############################################################################
# http://akiscode.com/articles/sha-1directoryhash.shtml
# Copyright (c) 2009 Stephen Akiki
# MIT License (Means you can do whatever you want with this)
# See http://www.opensource.org/licenses/mit-license.php
# Error Codes:
# -1 -> Directory does not exist
# -2 -> General error (see stack traceback)
def get_directory_hash(directory):
directory_hash = hashlib.sha1()
if not os.path.exists (directory):
return -1
try:
for root, dirs, files in os.walk(directory):
for names in files:
path = os.path.join(root, names)
try:
f = open(path, 'rb')
except:
# You can't open the file for some reason
f.close()
continue
while 1:
# Read file in as little chunks
buf = f.read(4096)
if not buf: break
new = hashlib.sha1(buf)
directory_hash.update(new.digest())
f.close()
except:
# Print the stack traceback
traceback.print_exc()
return -2
retVal = directory_hash.hexdigest()
#print_yellow("Hash Value for {} is {}".format(directory,retVal))
return directory_hash.hexdigest()
def Fract_Sec(s):
temp = float()
temp = float(s) / (60*60*24)
d = int(temp)
temp = (temp - d) * 24
h = int(temp)
temp = (temp - h) * 60
m = int(temp)
temp = (temp - m) * 60
sec = temp
return d,h,m,sec
#endef Fract_Sec
# Copyright (c) André Burgaud
# http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
if sys.platform == "win32":
from ctypes import windll, Structure, c_short, c_ushort, byref
SHORT = c_short
WORD = c_ushort
class COORD(Structure):
"""struct in wincon.h."""
_fields_ = [
("X", SHORT),
("Y", SHORT)]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD)]
# winbase.h
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h
FOREGROUND_BLACK = 0x0000
FOREGROUND_BLUE = 0x0001
FOREGROUND_GREEN = 0x0002
FOREGROUND_CYAN = 0x0003
FOREGROUND_RED = 0x0004
FOREGROUND_MAGENTA = 0x0005
FOREGROUND_YELLOW = 0x0006
FOREGROUND_GREY = 0x0007
FOREGROUND_INTENSITY = 0x0008 # foreground color is intensified.
BACKGROUND_BLACK = 0x0000
BACKGROUND_BLUE = 0x0010
BACKGROUND_GREEN = 0x0020
BACKGROUND_CYAN = 0x0030
BACKGROUND_RED = 0x0040
BACKGROUND_MAGENTA = 0x0050
BACKGROUND_YELLOW = 0x0060
BACKGROUND_GREY = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
stdout_handle = windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
def get_text_attr():
"""Returns the character attributes (colors) of the console screen
buffer."""
csbi = CONSOLE_SCREEN_BUFFER_INFO()
GetConsoleScreenBufferInfo(stdout_handle, byref(csbi))
return csbi.wAttributes
def set_text_attr(color):
"""Sets the character attributes (colors) of the console screen
buffer. Color is a combination of foreground and background color,
foreground and background intensity."""
SetConsoleTextAttribute(stdout_handle, color)
###############################################################################
def find_bi_tools(work_drive):
"""Find BI tools."""
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
k = winreg.OpenKey(reg, r"Software\bohemia interactive\arma 3 tools")
arma3tools_path = winreg.QueryValueEx(k, "path")[0]
winreg.CloseKey(k)
except:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
addonbuilder_path = os.path.join(arma3tools_path, "AddonBuilder", "AddonBuilder.exe")
dssignfile_path = os.path.join(arma3tools_path, "DSSignFile", "DSSignFile.exe")
dscreatekey_path = os.path.join(arma3tools_path, "DSSignFile", "DSCreateKey.exe")
cfgconvert_path = os.path.join(arma3tools_path, "CfgConvert", "CfgConvert.exe")
if os.path.isfile(addonbuilder_path) and os.path.isfile(dssignfile_path) and os.path.isfile(dscreatekey_path) and os.path.isfile(cfgconvert_path):
return [addonbuilder_path, dssignfile_path, dscreatekey_path, cfgconvert_path]
else:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
def find_depbo_tools(regKey):
"""Use registry entries to find DePBO-based tools."""
stop = False
if regKey == "HKCU":
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
stop = True
else:
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\pboProject")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\pboProject")
try:
pboproject_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found pboproject.")
except:
print_error("ERROR: Could not find pboProject.")
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\rapify")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\rapify")
try:
rapify_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found rapify.")
except:
print_error("Could not find rapify.")
try:
k = winreg.OpenKey(reg, r"Software\Wow6432Node\Mikero\MakePbo")
except FileNotFoundError:
k = winreg.OpenKey(reg, r"Software\Mikero\MakePbo")
try:
makepbo_path = winreg.QueryValueEx(k, "exe")[0]
winreg.CloseKey(k)
print("Found makepbo.")
except:
print_error("Could not find makepbo.")
except:
if stop == True:
raise Exception("BadDePBO", "DePBO tools not installed correctly")
return -1
#Strip any quotations from the path due to a MikeRo tool bug which leaves a trailing space in some of its registry paths.
return [pboproject_path.strip('"'),rapify_path.strip('"'),makepbo_path.strip('"')]
def color(color):
"""Set the color. Works on Win32 and normal terminals."""
if sys.platform == "win32":
if color == "green":
set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "yellow":
set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "red":
set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "blue":
set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)
elif color == "reset":
set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)
elif color == "grey":
set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)
else :
if color == "green":
sys.stdout.write('\033[92m')
elif color == "red":
sys.stdout.write('\033[91m')
elif color == "blue":
sys.stdout.write('\033[94m')
elif color == "reset":
sys.stdout.write('\033[0m')
def print_error(msg):
color("red")
print ("ERROR: {}".format(msg))
color("reset")
def print_green(msg):
color("green")
print(msg)
color("reset")
def print_blue(msg):
color("blue")
print(msg)
color("reset")
def print_yellow(msg):
color("yellow")
print(msg)
color("reset")
def copy_important_files(source_dir,destination_dir):
originalDir = os.getcwd()
#copy importantFiles
try:
print_blue("\nSearching for important files in {}".format(source_dir))
print("Source_dir: {}".format(source_dir))
print("Destination_dir: {}".format(destination_dir))
for file in importantFiles:
print_green("Copying file => {}".format(os.path.join(source_dir,file)))
shutil.copyfile(os.path.join(source_dir,file),os.path.join(destination_dir,file))
except:
print_error("COPYING IMPORTANT FILES.")
raise
#copy all extension dlls
try:
os.chdir(os.path.join(source_dir))
print_blue("\nSearching for DLLs in {}".format(os.getcwd()))
filenames = glob.glob("*.dll")
if not filenames:
print ("Empty SET")
for dll in filenames:
print_green("Copying dll => {}".format(os.path.join(source_dir,dll)))
if os.path.isfile(dll):
shutil.copyfile(os.path.join(source_dir,dll),os.path.join(destination_dir,dll))
except:
print_error("COPYING DLL FILES.")
raise
finally:
os.chdir(originalDir)
def copy_optionals_for_building(mod,pbos):
src_directories = os.listdir(optionals_root)
current_dir = os.getcwd()
print_blue("\nChecking Optionals folder...")
try:
#special server.pbo processing
files = glob.glob(os.path.join(release_dir, project, "optionals", "*.pbo"))
for file in files:
file_name = os.path.basename(file)
#print ("Adding the following file: {}".format(file_name))
pbos.append(file_name)
pbo_path = os.path.join(release_dir, project, "optionals", file_name)
sigFile_name = file_name +"."+ key_name + ".bisign"
sig_path = os.path.join(release_dir, project, "optionals", sigFile_name)
if (os.path.isfile(pbo_path)):
print("Moving {} for processing.".format(pbo_path))
shutil.move(pbo_path, os.path.join(release_dir, project, "addons", file_name))
if (os.path.isfile(sig_path)):
#print("Moving {} for processing.".format(sig_path))
shutil.move(sig_path, os.path.join(release_dir, project, "addons", sigFile_name))
except:
print_error("Error in moving")
raise
finally:
os.chdir(current_dir)
print("")
try:
for dir_name in src_directories:
mod.append(dir_name)
#userconfig requires special handling since it is not a PBO source folder.
#CfgConvert fails to build server.pbo if userconfig is not found in P:\
if (dir_name == "userconfig"):
if (os.path.exists(os.path.join(release_dir, project, "optionals", dir_name))):
shutil.rmtree(os.path.join(release_dir, project, "optionals", dir_name), True)
shutil.copytree(os.path.join(optionals_root,dir_name), os.path.join(release_dir, project, "optionals", dir_name))
destination = os.path.join(work_drive,dir_name)
else:
destination = os.path.join(module_root,dir_name)
print("Temporarily copying {} => {} for building.".format(os.path.join(optionals_root,dir_name),destination))
if (os.path.exists(destination)):
shutil.rmtree(destination, True)
shutil.copytree(os.path.join(optionals_root,dir_name), destination)
except:
print_error("Copy Optionals Failed")
raise
finally:
os.chdir(current_dir)
def cleanup_optionals(mod):
print("")
try:
for dir_name in mod:
#userconfig requires special handling since it is not a PBO source folder.
if (dir_name == "userconfig"):
destination = os.path.join(work_drive,dir_name)
else:
destination = os.path.join(module_root,dir_name)
print("Cleaning {}".format(destination))
try:
file_name = "{}{}.pbo".format(pbo_name_prefix,dir_name)
src_file_path = os.path.join(release_dir, project, "addons", file_name)
dst_file_path = os.path.join(release_dir, project, "optionals", file_name)
sigFile_name = "{}.{}.bisign".format(file_name,key_name)
src_sig_path = os.path.join(release_dir, project, "addons", sigFile_name)
dst_sig_path = os.path.join(release_dir, project, "optionals", sigFile_name)
if (os.path.isfile(src_file_path)):
#print("Preserving {}".format(file_name))
os.renames(src_file_path,dst_file_path)
if (os.path.isfile(src_sig_path)):
#print("Preserving {}".format(sigFile_name))
os.renames(src_sig_path,dst_sig_path)
except FileExistsError:
print_error("{} already exists".format(file_name))
continue
shutil.rmtree(destination)
except FileNotFoundError:
print_yellow("{} file not found".format(file_name))
except:
print_error("Cleaning Optionals Failed")
raise
def purge(dir, pattern, friendlyPattern="files"):
print_green("Deleting {} files from directory: {}".format(friendlyPattern,dir))
for f in os.listdir(dir):
if re.search(pattern, f):
os.remove(os.path.join(dir, f))
def build_signature_file(file_name):
global key
global dssignfile
global signature_blacklist
ret = 0
baseFile = os.path.basename(file_name)
#print_yellow("Sig_fileName: {}".format(baseFile))
if not (baseFile in signature_blacklist):
print("Signing with {}.".format(key))
ret = subprocess.call([dssignfile, key, file_name])
if ret == 0:
return True
else:
return False
def check_for_obsolete_pbos(addonspath, file):
module = file[len(pbo_name_prefix):-4]
if not os.path.exists(os.path.join(addonspath, module)):
return True
return False
def backup_config(module):
#backup original $PBOPREFIX$
global work_drive
global prefix
try:
configpath = os.path.join(work_drive, prefix, module, "$PBOPREFIX$")
if os.path.isfile(configpath):
shutil.copyfile(configpath, os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"))
else:
print_error("$PBOPREFIX$ Does not exist for module: {}.".format(module))
except:
print_error("Error creating backup of $PBOPREFIX$ for module {}.".format(module))
return True
def addon_restore(modulePath):
#restore original $PBOPREFIX$
try:
if os.path.isfile(os.path.join(modulePath, "$PBOPREFIX$.backup")):
if os.path.isfile(os.path.join(modulePath, "$PBOPREFIX$")):
os.remove(os.path.join(modulePath, "$PBOPREFIX$"))
os.rename(os.path.join(modulePath, "$PBOPREFIX$.backup"), os.path.join(modulePath, "$PBOPREFIX$"))
except:
print_yellow("Some error occurred. Check your addon folder {} for integrity".format(modulePath))
return True
def get_project_version():
global project_version
versionStamp = project_version
#do the magic based on https://github.com/acemod/ACE3/issues/806#issuecomment-95639048
try:
scriptModPath = os.path.join(work_drive, prefix, "main\script_mod.hpp")
if os.path.isfile(scriptModPath):
f = open(scriptModPath, "r")
hpptext = f.read()
f.close()
if hpptext:
majorText = re.search(r"#define MAJOR (.*\b)", hpptext).group(1)
minorText = re.search(r"#define MINOR (.*\b)", hpptext).group(1)
patchlvlText = re.search(r"#define PATCHLVL (.*\b)", hpptext).group(1)
buildText = re.search(r"#define BUILD (.*\b)", hpptext).group(1)
if majorText:
versionStamp = "{major}.{minor}.{patchlvl}.{build}".format(major=majorText,minor=minorText,patchlvl=patchlvlText,build=buildText)
else:
print_error("A Critical file seems to be missing or inaccessible: {}".format(scriptModPath))
raise FileNotFoundError("File Not Found: {}".format(scriptModPath))
except Exception as e:
print_error("Get_project_version error: {}".format(e))
print_error("Check the integrity of the file: {}".format(scriptModPath))
versionStamp = project_version
print_error("Resetting to the default version stamp: {}".format(versionStamp))
input("Press Enter to continue...")
print("Resuming build...")
print_yellow("{} VERSION set to {}".format(project.lstrip("@").upper(),versionStamp))
project_version = versionStamp
return project_version
def replace_file(filePath, oldSubstring, newSubstring):
#Create temp file
fh, absPath = mkstemp()
with open(absPath,'w') as newFile:
with open(filePath) as oldFile:
for line in oldFile:
newFile.write(line.replace(oldSubstring, newSubstring))
newFile.close()
#Remove original file
os.remove(filePath)
#Move new file
shutil.move(absPath, filePath)
def set_version_in_files():
newVersion = project_version # MAJOR.MINOR.PATCH.BUILD
newVersionShort = newVersion[:-2] # MAJOR.MINOR.PATCH
# Regex patterns
pattern = re.compile(r"(\b[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+)") # MAJOR.MINOR.PATCH.BUILD
patternShort = re.compile(r"(\b[0\.-9]+\b\.[0\.-9]+\b\.[0\.-9]+)") # MAJOR.MINOR.PATCH
# Change versions in files containing version
for i in versionFiles:
filePath = os.path.join(module_root_parent, i)
try:
# Save the file contents to a variable if the file exists
if os.path.isfile(filePath):
f = open(filePath, "r+")
fileText = f.read()
f.close()
if fileText:
# Search and save version stamp, search short if long not found
versionFound = re.findall(pattern, fileText)
if not versionFound:
versionFound = re.findall(patternShort, fileText)
# Replace version stamp if any of the new version parts is higher than the one found
if versionFound:
# First item in the list findall returns
versionFound = versionFound[0]
# Use the same version length as the one found
if len(versionFound) == len(newVersion):
newVersionUsed = newVersion
if len(versionFound) == len(newVersionShort):
newVersionUsed = newVersionShort
# Print change and modify the file if changed
if versionFound != newVersionUsed:
print_green("Changing version {} => {} in {}".format(versionFound, newVersionUsed, filePath))
replace_file(filePath, versionFound, newVersionUsed)
except WindowsError as e:
# Temporary file is still "in use" by Python, pass this exception
pass
except Exception as e:
print_error("set_version_in_files error: {}".format(e))
raise
return True
def stash_version_files_for_building():
try:
for file in versionFiles:
filePath = os.path.join(module_root_parent, file)
stashPath = os.path.join(release_dir, file)
print("Temporarily stashing {} => {}.bak for version update".format(filePath, stashPath))
shutil.copy(filePath, "{}.bak".format(stashPath))
except:
print_error("Stashing version files failed")
raise
# Set version
set_version_in_files()
return True
def restore_version_files():
try:
for file in versionFiles:
filePath = os.path.join(module_root_parent, file)
stashPath = os.path.join(release_dir, file)
print("Restoring {}".format(filePath))
shutil.move("{}.bak".format(stashPath), filePath)
except:
print_error("Restoring version files failed")
raise
return True
def get_private_keyname(commitID,module="main"):
global pbo_name_prefix
aceVersion = get_project_version()
keyName = str("{prefix}{version}-{commit_id}".format(prefix=pbo_name_prefix,version=aceVersion,commit_id=commitID))
return keyName
def get_commit_ID():
# Get latest commit ID
global make_root
curDir = os.getcwd()
try:
gitpath = os.path.join(os.path.dirname(make_root), ".git")
assert os.path.exists(gitpath)
os.chdir(make_root)
commit_id = subprocess.check_output(["git", "rev-parse", "HEAD"])
commit_id = str(commit_id, "utf-8")[:8]
except:
print_error("FAILED TO DETERMINE COMMIT ID.")
print_yellow("Verify that \GIT\BIN or \GIT\CMD is in your system path or user path.")
commit_id = "NOGIT"
raise
finally:
pass
os.chdir(curDir)
print_yellow("COMMIT ID set to {}".format(commit_id))
return commit_id
def version_stamp_pboprefix(module,commitID):
### Update pboPrefix with the correct version stamp. Use commit_id as the build number.
#This function will not handle any $PBOPREFIX$ backup or cleanup.
global work_drive
global prefix
configpath = os.path.join(work_drive, prefix, module, "$PBOPREFIX$")
try:
f = open(configpath, "r")
configtext = f.read()
f.close()
if configtext:
if re.search(r"version=(.*?)$", configtext, re.DOTALL):
if configtext:
configtext = re.sub(r"version=(.*?)$", "version={}\n".format(commitID), configtext, flags=re.DOTALL)
f = open(configpath, "w")
f.write(configtext)
f.close()
else:
os.remove(os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
os.rename(os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"), os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
else:
if configtext:
#append version info
f = open(configpath, "a")
f.write("\nversion = {}".format(commitID))
f.close()
else:
os.remove(os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
os.rename(os.path.join(work_drive, prefix, module, "$PBOPREFIX$.backup"), os.path.join(work_drive, prefix, module, "$PBOPREFIX$"))
except Exception as e:
print_error("Failed to include build number: {}".format(e))
return False
return True
###############################################################################
def main(argv):
"""Build an Arma addon suite in a directory from rules in a make.cfg file."""
print_blue("\nmake.py for Arma, modified for Advanced Combat Environment v{}".format(__version__))
global project_version
global arma3tools_path
global work_drive
global module_root
global make_root
global release_dir
global module_root_parent
global optionals_root
global key_name
global key
global dssignfile
global prefix
global pbo_name_prefix
if sys.platform != "win32":
print_error("Non-Windows platform (Cygwin?). Please re-run from cmd.")
sys.exit(1)
reg = winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER)
try:
k = winreg.OpenKey(reg, r"Software\bohemia interactive\arma 3 tools")
arma3tools_path = winreg.QueryValueEx(k, "path")[0]
winreg.CloseKey(k)
except:
raise Exception("BadTools","Arma 3 Tools are not installed correctly or the P: drive needs to be created.")
# Default behaviors
test = False # Copy to Arma 3 directory?
arg_modules = False # Only build modules on command line?
use_pboproject = True # Default to pboProject build tool
make_target = "DEFAULT" # Which section in make.cfg to use for the build
new_key = True # Make a new key and use it to sign?
quiet = False # Suppress output from build tool?
# Parse arguments
if "help" in argv or "-h" in argv or "--help" in argv:
print ("""
make.py [help] [test] [force] [key <name>] [target <name>] [release <version>]
[module name] [module name] [...]
test -- Copy result to Arma 3.
release <version> -- Make archive with <version>.
force -- Ignore cache and build all.
checkexternal -- Check External Files
target <name> -- Use rules in make.cfg under heading [<name>] rather than
default [Make]
key <name> -- Use key in working directory with <name> to sign. If it does not
exist, create key.
quiet -- Suppress command line output from build tool.
If module names are specified, only those modules will be built.
Examples:
make.py force test
Build all modules (ignoring cache) and copy the mod folder to the Arma 3
directory.
make.py mymodule_gun
Only build the module named 'mymodule_gun'.
make.py force key MyNewKey release 1.0
Build all modules (ignoring cache), sign them with NewKey, and pack them
into a zip file for release with version 1.0.
If a file called $NOBIN$ is found in the module directory, that module will not be binarized.
See the make.cfg file for additional build options.
""")
sys.exit(0)
if "force" in argv:
argv.remove("force")
force_build = True
else:
force_build = False
if "test" in argv:
test = True
argv.remove("test")
if "release" in argv:
make_release_zip = True
release_version = argv[argv.index("release") + 1]
argv.remove(release_version)
argv.remove("release")
else:
make_release_zip = False
release_version = project_version
if "target" in argv:
make_target = argv[argv.index("target") + 1]
argv.remove("target")
argv.remove(make_target)
force_build = True
if "key" in argv:
new_key = True
key_name = argv[argv.index("key") + 1]
argv.remove("key")
argv.remove(key_name)
if "quiet" in argv:
quiet = True
argv.remove("quiet")
if "checkexternal" in argv:
argv.remove("checkexternal")
check_external = True
else:
check_external = False
if "version" in argv:
argv.remove("version")
version_update = True
else:
version_update = False
print_yellow("\nCheck external references is set to {}".format(str(check_external)))
# Get the directory the make script is in.
make_root = os.path.dirname(os.path.realpath(__file__))
make_root_parent = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
os.chdir(make_root)
cfg = configparser.ConfigParser();
try:
cfg.read(os.path.join(make_root, "make.cfg"))
# Project name (with @ symbol)
project = cfg.get(make_target, "project", fallback="@"+os.path.basename(os.getcwd()))
# BI Tools work drive on Windows
work_drive = cfg.get(make_target, "work_drive", fallback="P:\\")
# Private key path
key = cfg.get(make_target, "key", fallback=None)
# Private key creation directory
private_key_path = cfg.get(make_target, "private_key_path", fallback=os.path.join(work_drive, "private_keys"))
# Project prefix (folder path)
prefix = cfg.get(make_target, "prefix", fallback="")
# Release archive prefix
zipPrefix = cfg.get(make_target, "zipPrefix", fallback=project.lstrip("@").lower())
# Should we autodetect modules on a complete build?
module_autodetect = cfg.getboolean(make_target, "module_autodetect", fallback=True)
# Manual list of modules to build for a complete build
modules = cfg.get(make_target, "modules", fallback=None)
# Parse it out
if modules:
modules = [x.strip() for x in modules.split(',')]
else:
modules = []
# List of directories to ignore when detecting
ignore = [x.strip() for x in cfg.get(make_target, "ignore", fallback="release").split(',')]
# Which build tool should we use?
build_tool = cfg.get(make_target, "build_tool", fallback="addonbuilder").lower()
# Release/build directory, relative to script dir
release_dir = cfg.get(make_target, "release_dir", fallback="release")
#Directory to copy the final built PBO's for a test run.
test_dir = cfg.get(make_target, "test_dir", fallback=os.path.join(os.environ["USERPROFILE"],r"documents\Arma 3"))
# Project PBO file prefix (files are renamed to prefix_name.pbo)
pbo_name_prefix = cfg.get(make_target, "pbo_name_prefix", fallback=None)
# Project module Root
module_root_parent = os.path.abspath(os.path.join(os.path.join(work_drive, prefix), os.pardir))
module_root = cfg.get(make_target, "module_root", fallback=os.path.join(make_root_parent, "addons"))
optionals_root = os.path.join(module_root_parent, "optionals")
extensions_root = os.path.join(module_root_parent, "extensions")
commit_id = get_commit_ID()
key_name = versionStamp = get_private_keyname(commit_id)
print_green ("module_root: {}".format(module_root))
if (os.path.isdir(module_root)):
os.chdir(module_root)
else:
print_error ("Directory {} does not exist.".format(module_root))
sys.exit()
if (os.path.isdir(optionals_root)):
print_green ("optionals_root: {}".format(optionals_root))
else:
print_error ("Directory {} does not exist.".format(optionals_root))
sys.exit()
print_green ("release_dir: {}".format(release_dir))
except:
raise
print_error("Could not parse make.cfg.")
sys.exit(1)
# See if we have been given specific modules to build from command line.
if len(argv) > 1 and not make_release_zip:
arg_modules = True
modules = argv[1:]
# Find the tools we need.
try:
tools = find_bi_tools(work_drive)
addonbuilder = tools[0]
dssignfile = tools[1]
dscreatekey = tools[2]
cfgconvert = tools[3]
except:
print_error("Arma 3 Tools are not installed correctly or the P: drive has not been created.")
sys.exit(1)
if build_tool == "pboproject":
try:
depbo_tools = find_depbo_tools("HKLM")
if depbo_tools == -1:
depbo_tools = find_depbo_tools("HKCU")
pboproject = depbo_tools[0]
rapifyTool = depbo_tools[1]
makepboTool = depbo_tools[2]
except:
raise
print_error("Could not find dePBO tools. Download the needed tools from: https://dev.withsix.com/projects/mikero-pbodll/files")
sys.exit(1)
# Try to open and deserialize build cache file.
try:
cache = {}
with open(os.path.join(make_root, "make.cache"), 'r') as f:
cache_raw = f.read()
cache = json.loads(cache_raw)
except:
print ("No cache found.")
cache = {}
# Check the build version (from main) with cached version - forces a full rebuild when version changes
project_version = get_project_version()
cacheVersion = "None";
if 'cacheVersion' in cache:
cacheVersion = cache['cacheVersion']
if (project_version != cacheVersion):
cache = {}
print("Reseting Cache {0} to New Version {1}".format(cacheVersion, project_version))
cache['cacheVersion'] = project_version
if not os.path.isdir(os.path.join(release_dir, project, "addons")):
try:
os.makedirs(os.path.join(release_dir, project, "addons"))
except:
print_error("Cannot create release directory")
raise
if not os.path.isdir(os.path.join(release_dir, project, "keys")):
try:
os.makedirs(os.path.join(release_dir, project, "keys"))
except:
print_error("Cannot create release directory")
raise
# Update version stamp in all files that contain it
# Update version only for release if full update not requested (backup and restore files)
print_blue("\nChecking for obsolete version numbers...")
if not version_update:
stash_version_files_for_building()
else:
# Set version
set_version_in_files();
print("Version in files has been changed, make sure you commit and push the updates!")
amountOfBuildsFailed = 0
namesOfBuildsFailed = []
try:
# Temporarily copy optionals_root for building. They will be removed later.
optionals_modules = []
optional_files = []
copy_optionals_for_building(optionals_modules,optional_files)
# Get list of subdirs in make root.
dirs = next(os.walk(module_root))[1]
# Autodetect what directories to build.
if module_autodetect and not arg_modules:
modules = []
for path in dirs:
# Any dir that has a config.cpp in its root is an addon to build.
config_path = os.path.join(path, 'config.cpp')
if os.path.isfile(config_path) and not path in ignore:
modules.append(path)
# Make the key specified from command line if necessary.
if new_key:
if not os.path.isfile(os.path.join(private_key_path, key_name + ".biprivatekey")):
print_yellow("\nRequested key does not exist.")
try:
os.makedirs(private_key_path)
except:
pass
curDir = os.getcwd()
os.chdir(private_key_path)
ret = subprocess.call([dscreatekey, key_name]) # Created in make_root
os.chdir(curDir)
if ret == 0:
print_green("Created: {}".format(os.path.join(private_key_path, key_name + ".biprivatekey")))
print("Removing any old signature keys...")
purge(os.path.join(module_root, release_dir, project, "addons"), "^.*\.bisign$","*.bisign")
purge(os.path.join(module_root, release_dir, project, "optionals"), "^.*\.bisign$","*.bisign")
purge(os.path.join(module_root, release_dir, project, "keys"), "^.*\.bikey$","*.bikey")
else:
print_error("Failed to create key!")
else:
print_green("\nNOTE: Using key {}".format(os.path.join(private_key_path, key_name + ".biprivatekey")))
try:
print("Copying public key to release directory.")
try:
os.makedirs(os.path.join(module_root, release_dir, project, "keys"))
except:
pass
# Use biKeyNameAbrev to attempt to minimize problems from this BI Bug REFERENCE: http://feedback.arma3.com/view.php?id=22133
biKeyNameAbrev = key_name.split("-")[0]
shutil.copyfile(os.path.join(private_key_path, key_name + ".bikey"), os.path.join(module_root, release_dir, project, "keys", "{}.bikey".format(biKeyNameAbrev)))
except:
print_error("Could not copy key to release directory.")
raise
key = os.path.join(private_key_path, "{}.biprivatekey".format(key_name))
# Remove any obsolete files.
print_blue("\nChecking for obsolete files...")
obsolete_check_path = os.path.join(module_root, release_dir, project,"addons")
for file in os.listdir(obsolete_check_path):
if (file.endswith(".pbo") and os.path.isfile(os.path.join(obsolete_check_path,file))):
if check_for_obsolete_pbos(module_root, file):
fileName = os.path.splitext(file)[0]
print_yellow("Removing obsolete file => {}".format(file))
purge(obsolete_check_path, "{}\..".format(fileName), "{}.*".format(fileName))
obsolete_check_path = os.path.join(module_root, release_dir, project)
for file in os.listdir(obsolete_check_path):
if (file.endswith(".dll") and os.path.isfile(os.path.join(obsolete_check_path,file))):
if check_for_obsolete_pbos(extensions_root, file):
fileName = os.path.splitext(file)[0]
print_yellow("Removing obsolete file => {}".format(file))
try:
os.remove(os.path.join(obsolete_check_path,file))
except:
print_error("\nFailed to delete {}".format(os.path.join(obsolete_check_path,file)))
pass
# For each module, prep files and then build.
print_blue("\nBuilding...")
for module in modules:
print_green("\nMaking {}".format(module + "-"*max(1, (60-len(module)))))
missing = False
sigMissing = False
# Cache check
if module in cache:
old_sha = cache[module]
else:
old_sha = ""
# Hash the module
new_sha = get_directory_hash(os.path.join(module_root, module))
# Is the pbo or sig file missing?
missing = not os.path.isfile(os.path.join(release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
sigFile = "{}{}.pbo.{}.bisign".format(pbo_name_prefix,module,key_name)
sigMissing = not os.path.isfile(os.path.join(release_dir, project, "addons", sigFile))
if missing:
print_yellow("Missing PBO file {}{}.pbo. Building...".format(pbo_name_prefix,module))
# Check if it needs rebuilt
# print ("Hash:", new_sha)
if old_sha == new_sha and not missing:
if not force_build:
print("Module has not changed.")
if sigMissing:
if key:
print("Missing Signature key {}".format(sigFile))
build_signature_file(os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
# Skip everything else
continue
# Only do this if the project isn't stored directly on the work drive.
# Split the path at the drive name and see if they are on the same drive (usually P:)
if os.path.splitdrive(module_root)[0] != os.path.splitdrive(work_drive)[0]:
try:
# Remove old work drive version (ignore errors)
shutil.rmtree(os.path.join(work_drive, prefix, module), True)
# Copy module to the work drive
shutil.copytree(module, os.path.join(work_drive, prefix, module))
except:
raise
print_error("ERROR: Could not copy module to work drive. Does the module exist?")
input("Press Enter to continue...")
print("Resuming build...")
continue
#else:
#print("WARNING: Module is stored on work drive ({}).".format(work_drive))
try:
# Remove the old pbo, key, and log
old = os.path.join(module_root, release_dir, project, "addons", "{}{}".format(pbo_name_prefix,module)) + "*"
files = glob.glob(old)
for f in files:
os.remove(f)
if pbo_name_prefix:
old = os.path.join(module_root, release_dir, project, "addons", "{}{}".format(pbo_name_prefix,module)) + "*"
files = glob.glob(old)
for f in files:
os.remove(f)
except:
raise
print_error("ERROR: Could not copy module to work drive. Does the module exist?")
input("Press Enter to continue...")
print("Resuming build...")
continue
# Build the module into a pbo
print_blue("Building: {}".format(os.path.join(work_drive, prefix, module)))
print_blue("Destination: {}".format(os.path.join(module_root, release_dir, project, "addons")))
# Make destination folder (if needed)
try:
os.makedirs(os.path.join(module_root, release_dir, project, "addons"))
except:
pass
# Run build tool
build_successful = False
if build_tool == "pboproject":
try:
nobinFilePath = os.path.join(work_drive, prefix, module, "$NOBIN$")
nobinNotestFilePath = os.path.join(work_drive, prefix, module, "$NOBIN-NOTEST$")
backup_config(module)
version_stamp_pboprefix(module,commit_id)
if os.path.isfile(nobinFilePath):
print_green("$NOBIN$ Found. Proceeding with non-binarizing!")
cmd = [makepboTool, "-P","-A","-L","-G","-X=*.backup", os.path.join(work_drive, prefix, module),os.path.join(module_root, release_dir, project,"addons")]
elif os.path.isfile(nobinNotestFilePath):
print_green("$NOBIN-NOTEST$ Found. Proceeding with non-binarizing [what you see is what you get]!")
cmd = [makepboTool, "-P","-A","-N","-G","-X=*.backup", os.path.join(work_drive, prefix, module),os.path.join(module_root, release_dir, project,"addons")]
else:
if check_external:
cmd = [pboproject, "-P", os.path.join(work_drive, prefix, module), "+Engine=Arma3", "-S","+Noisy", "+X", "+Clean", "+Mod="+os.path.join(module_root, release_dir, project), "-Key"]
else:
cmd = [pboproject, "-P", os.path.join(work_drive, prefix, module), "+Engine=Arma3", "-S","+Noisy", "-X", "+Clean", "+Mod="+os.path.join(module_root, release_dir, project), "-Key"]
color("grey")
if quiet:
devnull = open(os.devnull, 'w')
ret = subprocess.call(cmd, stdout=devnull)
devnull.close()
else:
ret = subprocess.call(cmd)
color("reset")
if ret == 0:
print_green("pboProject return code == {}".format(str(ret)))
# Prettyprefix rename the PBO if requested.
if pbo_name_prefix:
try:
os.rename(os.path.join(module_root, release_dir, project, "addons", "{}.pbo".format(module)), os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
except:
raise
print_error("Could not rename built PBO with prefix.")
# Sign result
if (key and not "{}{}.pbo".format(pbo_name_prefix,module) in signature_blacklist):
print("Signing with {}.".format(key))
if pbo_name_prefix:
ret = subprocess.call([dssignfile, key, os.path.join(module_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module))])
else:
ret = subprocess.call([dssignfile, key, os.path.join(module_root, release_dir, project, "addons", "{}.pbo".format(module))])
if ret == 0:
build_successful = True
else:
build_successful = True
if not build_successful:
print_error("pboProject return code == {}".format(str(ret)))
print_error("Module not successfully built/signed. Check your {}temp\{}_packing.log for more info.".format(work_drive,module))
print ("Resuming build...")
amountOfBuildsFailed += 1
namesOfBuildsFailed.append("{}".format(module))
continue
# Back to the root
os.chdir(module_root)
except:
raise
print_error("Could not run Addon Builder.")
input("Press Enter to continue...")
print ("Resuming build...")
continue
finally:
addon_restore(os.path.join(work_drive, prefix, module))
elif build_tool== "addonbuilder":
# Detect $NOBIN$ and do not binarize if found.
if os.path.isfile(os.path.join(work_drive, prefix, module, "$NOBIN$")):
do_binarize = False
print("$NOBIN$ file found in module, packing only.")
else:
do_binarize = True
try:
# Call AddonBuilder
os.chdir("P:\\")
cmd = [addonbuilder, os.path.join(work_drive, prefix, module), os.path.join(make_root, release_dir, project, "addons"), "-clear", "-project="+work_drive]
if not do_binarize:
cmd.append("-packonly")
if quiet:
previousDirectory = os.getcwd()
os.chdir(arma3tools_path)
devnull = open(os.devnull, 'w')
ret = subprocess.call(cmd, stdout=devnull)
devnull.close()
os.chdir(previousDirectory)
else:
previousDirectory = os.getcwd()
os.chdir(arma3tools_path)
print_error("Current directory - {}".format(os.getcwd()))
ret = subprocess.call(cmd)
os.chdir(previousDirectory)
print_error("Current directory - {}".format(os.getcwd()))
color("reset")
print_green("completed")
# Prettyprefix rename the PBO if requested.
if pbo_name_prefix:
try:
os.rename(os.path.join(make_root, release_dir, project, "addons", "{}.pbo".format(module)), os.path.join(make_root, release_dir, project, "addons", "{}{}.pbo".format(pbo_name_prefix,module)))
except:
raise
print_error("Could not rename built PBO with prefix.")
if ret == 0:
# Sign result
#print_yellow("Sig_fileName: ace_{}.pbo".format(module))
if (key and not "{}{}.pbo".format(pbo_name_prefix,module) in signature_blacklist) :
print("Signing with {}.".format(key))
if pbo_name_prefix:
ret = subprocess.call([dssignfile, key, os.path.join(make_root, release_dir, project, "addons","{}{}.pbo".format(pbo_name_prefix,module))])
else:
ret = subprocess.call([dssignfile, key, os.path.join(make_root, release_dir, project, "addons", "{}.pbo".format(module))])
if ret == 0:
build_successful = True
else:
build_successful = True
if not build_successful:
print_error("Module not successfully built. Check your {}temp\{}_packing.log for more info.".format(work_drive,module))
# Back to the root
os.chdir(make_root)
except:
raise
print_error("Could not run Addon Builder.")
input("Press Enter to continue...")
print ("Resuming build...")
continue
else:
print_error("Unknown build_tool {}!".format(build_tool))
# Update the hash for a successfully built module
if build_successful:
cache[module] = new_sha
except Exception as e:
print_yellow("Cancel or some error detected: {}".format(e))
finally:
copy_important_files(module_root_parent,os.path.join(release_dir, project))
cleanup_optionals(optionals_modules)
if not version_update:
restore_version_files()
# Done building all modules!
# Write out the cache state
cache_out = json.dumps(cache)
with open(os.path.join(make_root, "make.cache"), 'w') as f:
f.write(cache_out)
# Delete the pboproject temp files if building a release.
if make_release_zip and build_tool == "pboproject":
try:
shutil.rmtree(os.path.join(release_dir, project, "temp"), True)
except:
print_error("ERROR: Could not delete pboProject temp files.")
# Make release
if make_release_zip:
release_name = "{}_{}".format(zipPrefix, release_version)
print_blue("\nMaking release: {}.zip".format(release_name))
try:
# Delete all log files
for root, dirs, files in os.walk(os.path.join(release_dir, project, "addons")):
for currentFile in files:
if currentFile.lower().endswith("log"):
os.remove(os.path.join(root, currentFile))
# Remove all zip files from release folder to prevent zipping the zip
for file in os.listdir(release_dir):
if file.endswith(".zip"):
os.remove(os.path.join(release_dir, file))
# Create a zip with the contents of release folder in it
release_zip = shutil.make_archive("{}".format(release_name), "zip", release_dir)
# Move release zip to release folder
shutil.copy(release_zip, release_dir)
os.remove(release_zip)
except:
raise
print_error("Could not make release.")
# Copy to Arma 3 folder for testing
if test:
print_blue("\nCopying to Arma 3.")
a3_path = os.path.join(os.environ['USERPROFILE'],"Documents\Arma 3")
if sys.platform == "win32":
print_yellow("\nTesting for Win32.")
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
k = winreg.OpenKey(reg, r"SOFTWARE\Wow6432Node\Bohemia Interactive\Arma 3")
a3_path = winreg.EnumValue(k, 1)[1]
winreg.CloseKey(k)
except:
print_error("Could not find Arma 3's directory in the registry.")
else:
a3_path = cygwin_a3path
print_yellow("Path from the registry => {}".format(a3_path))
a3_path = test_dir
print_yellow("Copying build files to {}".format(a3_path))
if os.path.exists(a3_path):
try:
print_yellow("...in progress...")
shutil.rmtree(os.path.join(a3_path, "{}_DEV".format(project)), True)
shutil.copytree(os.path.join(module_root, release_dir, project), os.path.join(a3_path, "{}_DEV".format(project)))
except:
print_error("Could not copy files. Is Arma 3 running?")
if amountOfBuildsFailed > 0:
print_error("Build failed. {} pbos failed.".format(amountOfBuildsFailed))
for failedModuleName in namesOfBuildsFailed:
print("- {} failed.".format(failedModuleName))
else:
print_green("\nCompleted with 0 errors.")
if __name__ == "__main__":
start_time = timeit.default_timer()
main(sys.argv)
d,h,m,s = Fract_Sec(timeit.default_timer() - start_time)
print("\nTotal Program time elapsed: {0:2}h {1:2}m {2:4.5f}s".format(h,m,s))
input("Press Enter to continue...")
| gpl-2.0 |
marcosmodesto/django-testapp | django/core/management/commands/compilemessages.py | 373 | 2824 | import codecs
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
def has_bom(fn):
f = open(fn, 'r')
sample = f.read(4)
return sample[:3] == '\xef\xbb\xbf' or \
sample.startswith(codecs.BOM_UTF16_LE) or \
sample.startswith(codecs.BOM_UTF16_BE)
def compile_messages(stderr, locale=None):
basedirs = [os.path.join('conf', 'locale'), 'locale']
if os.environ.get('DJANGO_SETTINGS_MODULE'):
from django.conf import settings
basedirs.extend(settings.LOCALE_PATHS)
# Gather existing directories.
basedirs = set(map(os.path.abspath, filter(os.path.isdir, basedirs)))
if not basedirs:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree, or with the settings module specified.")
for basedir in basedirs:
if locale:
basedir = os.path.join(basedir, locale, 'LC_MESSAGES')
for dirpath, dirnames, filenames in os.walk(basedir):
for f in filenames:
if f.endswith('.po'):
stderr.write('processing file %s in %s\n' % (f, dirpath))
fn = os.path.join(dirpath, f)
if has_bom(fn):
raise CommandError("The %s file has a BOM (Byte Order Mark). Django only supports .po files encoded in UTF-8 and without any BOM." % fn)
pf = os.path.splitext(fn)[0]
# Store the names of the .mo and .po files in an environment
# variable, rather than doing a string replacement into the
# command, so that we can take advantage of shell quoting, to
# quote any malicious characters/escaping.
# See http://cyberelk.net/tim/articles/cmdline/ar01s02.html
os.environ['djangocompilemo'] = pf + '.mo'
os.environ['djangocompilepo'] = pf + '.po'
if sys.platform == 'win32': # Different shell-variable syntax
cmd = 'msgfmt --check-format -o "%djangocompilemo%" "%djangocompilepo%"'
else:
cmd = 'msgfmt --check-format -o "$djangocompilemo" "$djangocompilepo"'
os.system(cmd)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', dest='locale',
help='The locale to process. Default is to process all.'),
)
help = 'Compiles .po files to .mo files for use with builtin gettext support.'
requires_model_validation = False
can_import_settings = False
def handle(self, **options):
locale = options.get('locale')
compile_messages(self.stderr, locale=locale)
| bsd-3-clause |
betoesquivel/fil2014 | filenv/lib/python2.7/site-packages/pip/_vendor/html5lib/treebuilders/dom.py | 559 | 8469 | from __future__ import absolute_import, division, unicode_literals
from xml.dom import minidom, Node
import weakref
from . import _base
from .. import constants
from ..constants import namespaces
from ..utils import moduleFactoryFactory
def getDomBuilder(DomImplementation):
Dom = DomImplementation
class AttrList(object):
def __init__(self, element):
self.element = element
def __iter__(self):
return list(self.element.attributes.items()).__iter__()
def __setitem__(self, name, value):
self.element.setAttribute(name, value)
def __len__(self):
return len(list(self.element.attributes.items()))
def items(self):
return [(item[0], item[1]) for item in
list(self.element.attributes.items())]
def keys(self):
return list(self.element.attributes.keys())
def __getitem__(self, name):
return self.element.getAttribute(name)
def __contains__(self, name):
if isinstance(name, tuple):
raise NotImplementedError
else:
return self.element.hasAttribute(name)
class NodeBuilder(_base.Node):
def __init__(self, element):
_base.Node.__init__(self, element.nodeName)
self.element = element
namespace = property(lambda self: hasattr(self.element, "namespaceURI")
and self.element.namespaceURI or None)
def appendChild(self, node):
node.parent = self
self.element.appendChild(node.element)
def insertText(self, data, insertBefore=None):
text = self.element.ownerDocument.createTextNode(data)
if insertBefore:
self.element.insertBefore(text, insertBefore.element)
else:
self.element.appendChild(text)
def insertBefore(self, node, refNode):
self.element.insertBefore(node.element, refNode.element)
node.parent = self
def removeChild(self, node):
if node.element.parentNode == self.element:
self.element.removeChild(node.element)
node.parent = None
def reparentChildren(self, newParent):
while self.element.hasChildNodes():
child = self.element.firstChild
self.element.removeChild(child)
newParent.element.appendChild(child)
self.childNodes = []
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes:
for name, value in list(attributes.items()):
if isinstance(name, tuple):
if name[0] is not None:
qualifiedName = (name[0] + ":" + name[1])
else:
qualifiedName = name[1]
self.element.setAttributeNS(name[2], qualifiedName,
value)
else:
self.element.setAttribute(
name, value)
attributes = property(getAttributes, setAttributes)
def cloneNode(self):
return NodeBuilder(self.element.cloneNode(False))
def hasContent(self):
return self.element.hasChildNodes()
def getNameTuple(self):
if self.namespace is None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TreeBuilder(_base.TreeBuilder):
def documentClass(self):
self.dom = Dom.getDOMImplementation().createDocument(None, None, None)
return weakref.proxy(self)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
domimpl = Dom.getDOMImplementation()
doctype = domimpl.createDocumentType(name, publicId, systemId)
self.document.appendChild(NodeBuilder(doctype))
if Dom == minidom:
doctype.ownerDocument = self.dom
def elementClass(self, name, namespace=None):
if namespace is None and self.defaultNamespace is None:
node = self.dom.createElement(name)
else:
node = self.dom.createElementNS(namespace, name)
return NodeBuilder(node)
def commentClass(self, data):
return NodeBuilder(self.dom.createComment(data))
def fragmentClass(self):
return NodeBuilder(self.dom.createDocumentFragment())
def appendChild(self, node):
self.dom.appendChild(node.element)
def testSerializer(self, element):
return testSerializer(element)
def getDocument(self):
return self.dom
def getFragment(self):
return _base.TreeBuilder.getFragment(self).element
def insertText(self, data, parent=None):
data = data
if parent != self:
_base.TreeBuilder.insertText(self, data, parent)
else:
# HACK: allow text nodes as children of the document node
if hasattr(self.dom, '_child_node_types'):
if not Node.TEXT_NODE in self.dom._child_node_types:
self.dom._child_node_types = list(self.dom._child_node_types)
self.dom._child_node_types.append(Node.TEXT_NODE)
self.dom.appendChild(self.dom.createTextNode(data))
implementation = DomImplementation
name = None
def testSerializer(element):
element.normalize()
rv = []
def serializeElement(element, indent=0):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
if element.name:
if element.publicId or element.systemId:
publicId = element.publicId or ""
systemId = element.systemId or ""
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
(' ' * indent, element.name, publicId, systemId))
else:
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, element.name))
else:
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
elif element.nodeType == Node.DOCUMENT_NODE:
rv.append("#document")
elif element.nodeType == Node.DOCUMENT_FRAGMENT_NODE:
rv.append("#document-fragment")
elif element.nodeType == Node.COMMENT_NODE:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.nodeValue))
elif element.nodeType == Node.TEXT_NODE:
rv.append("|%s\"%s\"" % (' ' * indent, element.nodeValue))
else:
if (hasattr(element, "namespaceURI") and
element.namespaceURI is not None):
name = "%s %s" % (constants.prefixes[element.namespaceURI],
element.nodeName)
else:
name = element.nodeName
rv.append("|%s<%s>" % (' ' * indent, name))
if element.hasAttributes():
attributes = []
for i in range(len(element.attributes)):
attr = element.attributes.item(i)
name = attr.nodeName
value = attr.value
ns = attr.namespaceURI
if ns:
name = "%s %s" % (constants.prefixes[ns], attr.localName)
else:
name = attr.nodeName
attributes.append((name, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
indent += 2
for child in element.childNodes:
serializeElement(child, indent)
serializeElement(element, 0)
return "\n".join(rv)
return locals()
# The actual means to get a module!
getDomModule = moduleFactoryFactory(getDomBuilder)
| mit |
n9code/pyerarchy | tests/core/test_modulenode.py | 1 | 1194 | import os
import shutil
from unittest import TestCase
from nose.tools import eq_, ok_
from pyerarchy.ex import BadValueError
from pyerarchy.modulenode import ModuleNode, ThisModuleNode
__author__ = 'bagrat'
class ModuleNodeTest(TestCase):
test_module_name = 'testmodule'
module_dir = os.path.join(os.path.dirname(__file__), '../../' + test_module_name)
@classmethod
def setUpClass(cls):
super(ModuleNodeTest, cls).setUpClass()
os.makedirs(cls.module_dir)
with open(os.path.join(cls.module_dir, '__init__.py'), 'w') as init:
init.write('# nothing')
@classmethod
def tearDownClass(cls):
super(ModuleNodeTest, cls).tearDownClass()
shutil.rmtree(cls.module_dir)
def test_module(self):
ls = ModuleNode(self.test_module_name).ls()
eq_(len(ls), 2)
ok_('__init__.py' in ls)
ok_('__init__.pyc' in ls)
raises_bad_value = False
try:
ModuleNode(1)
except BadValueError:
raises_bad_value = True
ok_(raises_bad_value)
def test_this_module(self):
this = ThisModuleNode()
eq_(this._pyerarchy_path, __file__)
| mit |
RDXT/django.js | djangojs/management/commands/js.py | 3 | 3209 | # -*- coding: utf-8 -*-
'''
Main access point for all JS commands
'''
import argparse
import logging
import sys
from django.core.management.base import BaseCommand, handle_default_options
try:
from django.core.management.base import OutputWrapper
except:
pass
from djangojs.management.commands.js_localize import LocalizeParser
from djangojs.management.commands.js_launcher import LauncherParser
from djangojs.management.commands.js_bower import BowerParser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Handle javascript operations'
args = 'command'
requires_model_validation = False
can_import_settings = True
subparsers = (
BowerParser,
LauncherParser,
LocalizeParser,
)
def usage(self, subcommand):
return self.create_parser('', subcommand).format_usage()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr.
"""
parser = self.create_parser(argv[0], argv[1])
args = parser.parse_args(argv[2:])
handle_default_options(args)
try:
self.execute(args)
except Exception as e:
# self.stderr is not guaranteed to be set here
try:
fallback_stderr = OutputWrapper(sys.stderr, self.style.ERROR)
except:
fallback_stderr = self.stdout
stderr = getattr(self, 'stderr', fallback_stderr)
if args.traceback:
stderr.write(traceback.format_exc())
else:
stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
parser = argparse.ArgumentParser(prog='%s %s' % (prog_name, subcommand), description=self.help)
parser.add_argument('-v', '--verbosity', action='store', default=1, type=int, choices=range(4),
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'),
parser.add_argument('--settings',
help='The Python path to a settings module, e.g. "myproject.settings.main". '
'If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'),
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_argument('--traceback', action='store_true', help='Print traceback on exception'),
subparsers = parser.add_subparsers(description='JavaScript command to execute')
for subparser in self.subparsers:
subparser(self, subparsers)
return parser
def print_help(self, prog_name, subcommand):
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def handle(self, args):
args.func(args)
| lgpl-3.0 |
maartenbreddels/vaex | packages/vaex-ui/vaex/ui/variables.py | 1 | 14562 | import sys
import vaex
import vaex.ui.qt as dialogs
from vaex.ui.qt import *
import logging
from vaex.ui.icons import iconfile
logger = logging.getLogger("vaex.ui.variables")
class VariablesTableModel(QtCore.QAbstractTableModel):
def __init__(self, dataset, parent=None, *args):
"""
:type dataset: Dataset
"""
QtCore.QAbstractTableModel.__init__(self, parent, *args)
self.dataset = dataset
self.row_count_start = 1
# self.table_column_names = ["Type", "Name", "Value"]
self.table_column_names = ["Name", "Expression", "Value"]
# self.show_virtual = True
def get_dataset_column_names(self):
return list(self.dataset.variables.keys())
def rowCount(self, parent):
column_names = self.get_dataset_column_names()
return len(column_names)
def columnCount(self, parent):
return len(self.table_column_names) + 1
def setData(self, index, value, role=QtCore.Qt.EditRole):
row = index.row()
column_index = index.column() - 1
variable_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
# print index, value, role
# if property == "Visible":
# logger.debug("set visibility to: %s", value == QtCore.Qt.Checked)
# if property == "Description":
# self.dataset.descriptions[column_name] = value
# if property == "UCD":
# self.dataset.ucds[column_name] = value
# # TODO: move to dataset class
# self.dataset.signal_column_changed.emit(self.dataset, column_name, "change")
# if property == "Units":
# if value:
# try:
# unit = astropy.units.Unit(value)
# logger.debug("setting unit to: %s (%s)" % (value, unit))
# self.dataset.units[column_name] = unit
# # TODO: move to dataset class
# self.dataset.signal_column_changed.emit(self.dataset, column_name, "change")
# except Exception, e:
# dialogs.dialog_error(None, "Cannot parse unit", "Cannot parse unit:\n %s" % e)
# else:
# if column_name in self.dataset.units:
# del self.dataset.units[column_name]
if property == "Expression":
try:
test = eval(value, vaex.dataset.expression_namespace, self.dataset.variables)
self.dataset.add_variable(variable_name, value)
except Exception as e:
dialogs.dialog_error(None, "Invalid expression", "Invalid expression: %s" % e)
# although it may not be a valid expression, still set it to the user can edit it
# self.dataset.virtual_columns[column_name] = value
self.dataset.write_meta()
return True
def data(self, index, role=QtCore.Qt.DisplayRole):
# row_offset = self.get_row_offset()
# print index, role
if not index.isValid():
return None
if 0: # role == QtCore.Qt.CheckStateRole and index.column() == 0:
return QtCore.Qt.Checked
elif role not in [QtCore.Qt.DisplayRole, QtCore.Qt.EditRole]:
return None
if index.column() == 0:
# return "{:,}".format(index.row()+self.row_count_start + row_offset)
return str(index.row() + self.row_count_start)
else:
row = index.row()
column_index = index.column() - 1
variable_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
# column = None
# if column_name in self.dataset.get_column_names():
# column = self.dataset.columns[column_name]
# if property == "Visible":
# return QtCore.Qt.Checked
if property == "Name":
return variable_name
elif property == "Type":
if column_name in self.dataset.get_column_names():
return str(self.dataset.data_type(column_name))
else:
return "virtual column"
elif property == "Units":
unit = self.dataset.unit(column_name)
return str(unit) if unit else ""
elif property == "UCD":
return self.dataset.ucds.get(column_name, "")
elif property == "Description":
return self.dataset.descriptions.get(column_name, "")
elif property == "Value":
# return str(self.dataset.variables[variable_name])
try:
return str(self.dataset.evaluate_variable(variable_name))
except Exception as e:
# dialogs.dialog_error(None, "Invalid expression", "Invalid expression: %s" % e)
return "Error in expression: %s" % e
elif property == "Expression":
return str(self.dataset.variables[variable_name])
def flags(self, index):
row = index.row()
column_index = index.column() - 1
if column_index == 0:
return QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
column_name = self.get_dataset_column_names()[row]
property = self.table_column_names[column_index]
column = None
flags = QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled
if property in ["Expression", "Description", "Units", "UCD"]:
flags |= QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsSelectable
# if column_name in self.dataset.virtual_columns:
else:
flags |= QtCore.Qt.ItemIsSelectable
return flags
def headerData(self, index, orientation, role):
# row_offset = self.get_row_offset()
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
if index == 0:
return ""
else:
return self.table_column_names[index - 1]
# if orientation == QtCore.Qt.Vertical and role == QtCore.Qt.DisplayRole:
# return str(index+self.row_count_start + row_offset)
return None
def insertRows(self, *args):
return True
class VariablesTable(QtGui.QWidget):
def set_dataset(self, dataset):
if self.event_handler:
self.dataset.signal_variable_changed.disconnect(self.event_handler)
self.dataset = dataset
self.tableModel = VariablesTableModel(self.dataset, self)
self.tableView.setModel(self.tableModel)
self.tableView.selectionModel().currentChanged.connect(self.onCurrentChanged)
self.tableView.resizeColumnsToContents()
# self.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch);
self.tableView.horizontalHeader().setStretchLastSection(True)
self.event_handler = self.dataset.signal_variable_changed.connect(self.on_variable_change)
def on_variable_change(self, *args):
self.reset()
pass
def __init__(self, parent, menu=None):
super(VariablesTable, self).__init__(parent)
# dataset.add_virtual_column("xp", "x")
self.event_handler = None
self.resize(700, 500)
self.tableView = QtGui.QTableView()
# self.tableView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows);
# self.header = self.dataset.column_names
# self.tableView.pressed.connect(self.onSelectRow)
if qt_mayor == 5:
self.tableView.verticalHeader().setSectionResizeMode(QtGui.QHeaderView.Interactive)
else:
self.tableView.verticalHeader().setResizeMode(QtGui.QHeaderView.Interactive)
# self.unit_delegate = vaex.ui.completer.UnitDelegate(self.tableView)
# self.ucd_delegate = vaex.ui.completer.UCDDelegate(self.tableView)
# self.tableView.setItemDelegateForColumn(4, self.unit_delegate)
# self.tableView.setItemDelegateForColumn(5, self.ucd_delegate)
self.toolbar = QtGui.QToolBar(self)
# self.description = QtGui.QTextEdit(self.dataset.description, self)
# self.description.setFixedHeight(100)
# self.description.textChanged.connect(self.onTextChanged)
# self.action_group_add = QtGui.QActionGroup(self)
self.action_add = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Add variable', self)
self.action_remove = QtGui.QAction(QtGui.QIcon(iconfile('table-delete-column')), 'Remove variable', self)
self.action_remove.setEnabled(False)
self.action_add.setShortcut("Ctrl+Alt++")
self.action_remove.setShortcut("Ctrl+Alt+-")
self.toolbar.addAction(self.action_add)
self.toolbar.addAction(self.action_remove)
if 0:
self.action_add_menu = QtGui.QMenu()
self.action_add.setMenu(self.action_add_menu)
self.action_celestial = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Equatorial to galactic', self)
self.action_celestial.setShortcut("Ctrl+G")
self.action_add.menu().addAction(self.action_celestial)
self.action_celestial.triggered.connect(lambda *args: add_celestial(self, self.dataset))
self.action_car_to_gal = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Cartesian to galactic', self)
self.action_car_to_gal.setShortcut("Ctrl+S")
self.action_add.menu().addAction(self.action_car_to_gal)
self.action_car_to_gal.triggered.connect(lambda *args: add_sky(self, self.dataset, True))
self.action_par_to_dis = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Parallax to distance', self)
self.action_par_to_dis.setShortcut("Ctrl+D")
self.action_add.menu().addAction(self.action_par_to_dis)
self.action_par_to_dis.triggered.connect(lambda *args: add_distance(self, self.dataset))
self.action_gal_to_car = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Galactic to cartesian', self)
self.action_gal_to_car.setShortcut("Ctrl+C")
self.action_add.menu().addAction(self.action_gal_to_car)
self.action_gal_to_car.triggered.connect(lambda *args: add_cartesian(self, self.dataset, True))
self.action_gal_to_aitoff = QtGui.QAction(QtGui.QIcon(iconfile('table-insert-column')), 'Galactic to Aitoff projection', self)
self.action_gal_to_aitoff.setShortcut("Ctrl+A")
self.action_add.menu().addAction(self.action_gal_to_aitoff)
self.action_gal_to_aitoff.triggered.connect(lambda *args: add_aitoff(self, self.dataset, True))
# action_group_add.add(self.action_add)
self.action_add.triggered.connect(self.onAdd)
self.action_remove.triggered.connect(self.onRemove)
if menu:
menu.addAction(self.action_add)
menu.addAction(self.action_remove)
# self.tableView.pressed.connect(self.onSelectRow)
# self.tableView.activated.connect(self.onActivateRow)
# self.tableView.selectionModel().currentChanged.connect(self.onCurrentChanged)
self.boxlayout = QtGui.QVBoxLayout(self)
self.boxlayout.addWidget(self.toolbar, 0)
# self.boxlayout.addWidget(self.description, 0)
self.boxlayout.addWidget(self.tableView, 1)
self.setLayout(self.boxlayout)
self.tableView.resizeColumnsToContents()
# self.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch);
self.tableView.horizontalHeader().setStretchLastSection(True)
def onTextChanged(self, *args):
self.dataset.description = self.description.toPlainText()
logger.debug("setting description to: %s", self.dataset.description)
self.dataset.write_meta()
def onSelectRow(self, model):
row_index = model.row()
logger.debug("row index selected %d" % row_index)
def onCurrentChanged(self, model, previous):
# row_index = model.row()
# logger.debug("row index activated %d" % row_index)
self.check_remove()
def check_remove(self):
model = self.tableView.selectionModel().currentIndex()
column_names = self.tableModel.get_dataset_column_names()
column_name = column_names[model.row()]
self.action_remove.setEnabled(column_name not in ["e", "pi"])
def onRemove(self, _=None):
model = self.tableView.selectionModel().currentIndex()
column_names = self.tableModel.get_dataset_column_names()
column_name = column_names[model.row()]
logger.debug("removing %s", column_name)
# del self.dataset.virtual_columns[column_name]
self.dataset.delete_variable(column_name)
# self.reset()
self.check_remove()
def reset(self):
self.tableModel.beginResetModel()
self.tableView.reset()
self.tableModel.endResetModel()
def onAdd(self, _=None):
dialog = dialogs.QuickDialog(self, title="Add variable")
dialog.add_text("name", "Variable name", make_unique("var", self.dataset))
dialog.add_variable_expression("expression", "Expression", "e**-1+sin(pi)", self.dataset)
# dialog.add_unit("unit", "Expression", "sqrt(%s)" % self.dataset.get_column_names()[0], self.dataset)
# dialog.add_ucd("ucd", "UCD", "")
# dialog.add_text("description", "Description", placeholder="Enter a description")
values = dialog.get()
if values:
# if values["description"]:
# self.dataset.descriptions[values["name"]] = values["description"]
# if values["ucd"]:
# self.dataset.ucds[values["name"]] = values["ucd"]
self.dataset.add_variable(values["name"], values["expression"])
def make_unique(name, dataset):
postfix = ""
number = 2
original_name = name
while name in dataset.get_column_names(virtual=True):
name = original_name + "_" + str(number)
number += 1
return name
def main(argv=sys.argv):
dataset = vaex.open(argv[1])
app = QtGui.QApplication(argv)
table = VariablesTable(None)
table.set_dataset(dataset)
table.show()
table.raise_()
sys.exit(app.exec_())
if __name__ == "__main__":
vaex.set_log_level_debug()
main()
for i in range(3):
for j in range(3):
dataset.add_virtual_column("bla_%s%s" % (i, j), expr_matrix[i, j])
dataset.add_virtual_columns_matrix3d("vx", "vy", "vz", "mu_alpha", "mu_delta", "vr", "bla")
| mit |
oniwan/GCI | sele_chrome.py | 1 | 2504 | # -*- coding: utf-8 -*-
# import selenium
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as ec
import time
from selenium.webdriver.support.ui import Select
"""
Usage:
python sele_chrome.py
"""
#Config Sele_Chrome at DAIWA
"""
Do not change this config.
If you use this code for NIKKO, please copy this file.
"""
url = "https://lzone.daiwa.co.jp/lzone/"
username = "shinichiro.ueno@gci.jp"
password = "gcigci"
ticker = 3382
period_from = "2011/04/01"
period_to = "2017/04/28"
file_type = 2
#Set WebDriver Chrome
driver = webdriver.Chrome(executable_path = '/home/gci/Downloads/chromedriver') #PATH
#wait = WebDriverWait(driver,10) #Prototype
#Run
driver.get(url)
print driver.current_url
time.sleep(5)
driver.find_element_by_id('ticker').send_keys(Keys.ENTER)
driver.find_element_by_id('input-text').send_keys(ticker)
driver.find_element_by_id('input-btn-se').send_keys(Keys.ENTER)
time.sleep(3)
driver.save_screenshot('DAIWA_test1.png')
print driver.current_url
driver.save_screenshot('DAIWA_test2.png')
driver.find_element_by_css_selector('input[name="memberId"]').send_keys(username)
driver.find_element_by_css_selector('input[name="passWord"]').send_keys(password)
driver.save_screenshot('DAIWA_test3.png')
driver.find_element_by_id('image-btn_ok').send_keys(Keys.ENTER)
driver.save_screenshot('DAIWA_test4.png')
time.sleep(3)
driver.find_element_by_id('ticker').send_keys(Keys.ENTER)
driver.find_element_by_id('input-text').send_keys(ticker)
driver.find_element_by_id('input-btn-ad').send_keys(Keys.ENTER)
driver.save_screenshot('DAIWA_test5.png')
time.sleep(3)
driver.save_screenshot('DAIWA_test6.png')
elements = driver.find_elements_by_css_selector("input[type ='radio'][value='equity']")
for element in elements:
element.click()
driver.find_element_by_name('model.tickerCd').send_keys(ticker)
#driver.find_element_by_id('model_periodFrom').send_keys(Keys.CONTROL,"a")
driver.find_element_by_id('date1').send_keys(10*Keys.BACKSPACE)
driver.find_element_by_id('date1').send_keys(period_from)
select = Select(driver.find_element_by_name('model.docType'))
select.select_by_value("2")
time.sleep(2)
#all_options = element.find_elements_by_tag_name("option")
#all_options[1].click()
driver.find_element_by_id('image-btn_search').send_keys(Keys.ENTER)
time.sleep(10)
driver.save_screenshot('DAIWA_test7.png')
driver.close()
| mit |
Sancus/bedrock | bedrock/utils/git.py | 8 | 5281 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function, unicode_literals
import os
from hashlib import sha256
from shutil import rmtree
from subprocess import check_output, STDOUT
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from pathlib2 import Path
from bedrock.utils.models import GitRepoState
GIT = getattr(settings, 'GIT_BIN', 'git')
class GitRepo(object):
def __init__(self, path, remote_url=None, remote_name=None, branch_name='master'):
self.path = Path(path)
self.path_str = str(self.path)
self.remote_url = remote_url
self.branch_name = branch_name
if not remote_name:
remote_name = 'bedrock-dev' if settings.DEV else 'bedrock-prod'
self.remote_name = remote_name
db_latest_key = '%s:%s:%s:%s' % (self.path_str, remote_url or '',
remote_name, branch_name)
self.db_latest_key = sha256(db_latest_key).hexdigest()
def git(self, *args):
"""Run a git command against the current repo"""
curdir = os.getcwd()
try:
os.chdir(self.path_str)
output = check_output((GIT,) + args, stderr=STDOUT)
finally:
os.chdir(curdir)
return output.strip()
@property
def full_branch_name(self):
"""Full branch name with remote (e.g. origin/master)"""
return '{}/{}'.format(self.remote_name, self.branch_name)
@property
def current_hash(self):
"""The git revision ID (hash) of the current HEAD or None if no repo"""
try:
return self.git('rev-parse', 'HEAD')
except OSError:
return None
@property
def remote_names(self):
"""Return a list of the remote names in the repo or None if no repo"""
try:
return self.git('remote').split()
except OSError:
return None
def has_remote(self):
"""Return True if the repo has a remote by the correct name"""
return self.remote_name in self.remote_names
def add_remote(self):
"""Add the remote to the git repo from the init args"""
if not self.remote_url:
raise RuntimeError('remote_url required to add a remote')
self.git('remote', 'add', self.remote_name, self.remote_url)
def diff(self, start_hash, end_hash):
"""Return a 2 tuple: (modified files, deleted files)"""
diff_out = StringIO(self.git('diff', '--name-status', start_hash, end_hash))
modified = set()
removed = set()
for line in diff_out:
parts = line.split()
# delete
if parts[0] == 'D':
removed.add(parts[1])
# rename
elif parts[0][0] == 'R':
removed.add(parts[1])
modified.add(parts[2])
# everything else
else:
# some types (like copy) have two file entries
for part in parts[1:]:
modified.add(part)
return modified, removed
def clone(self):
"""Clone the repo specified in the initial arguments"""
if not self.remote_url:
raise RuntimeError('remote_url required to clone')
self.path.mkdir(parents=True, exist_ok=True)
self.git('clone', '--origin', self.remote_name, '--depth', '1',
'--branch', self.branch_name, self.remote_url, '.')
def pull(self):
"""Update the repo to the latest of the remote and branch
Return the previous hash and the new hash."""
if not self.has_remote():
self.add_remote()
old_hash = self.current_hash
self.git('fetch', self.remote_name)
self.git('checkout', '-f', self.full_branch_name)
return old_hash, self.current_hash
def update(self):
"""Updates a repo, cloning if necessary.
:return a tuple of lists of modified and deleted files if updated, None if cloned
"""
if self.path.is_dir():
if not self.path.joinpath('.git').is_dir():
rmtree(self.path_str, ignore_errors=True)
self.clone()
else:
return self.pull()
else:
self.clone()
return None, None
def reset(self, new_head):
self.git('reset', '--hard', new_head)
def get_db_latest(self):
try:
return GitRepoState.objects.get(repo_id=self.db_latest_key).latest_ref
except GitRepoState.DoesNotExist:
return None
def has_changes(self):
return self.current_hash != self.get_db_latest()
def set_db_latest(self, latest_ref=None):
latest_ref = latest_ref or self.current_hash
rs, created = GitRepoState.objects.get_or_create(repo_id=self.db_latest_key,
defaults={'latest_ref': latest_ref})
if not created:
rs.latest_ref = latest_ref
rs.save()
| mpl-2.0 |
ZTH1970/alcide | scripts/check_double.py | 1 | 4980 | # -*- coding: utf-8 -*-
import sys
import alcide.settings
import django.core.management
from datetime import datetime
import csv
django.core.management.setup_environ(alcide.settings)
from alcide.dossiers.models import PatientRecord
from alcide.actes.models import Act
from django.db import transaction
@transaction.commit_manually
def main():
print datetime.now()
f = open('./scripts/actes_to_modify.csv', 'wb')
writer = csv.writer(f, delimiter=';', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['id_act_keep', 'locked', 'billed', 'lost', 'switch', 'pause', 'comment'])
same_acts_set = []
seen = []
i = 0
total = PatientRecord.objects.all().count()
for patient in PatientRecord.objects.all():
i += 1
acts = Act.objects.filter(patient=patient)
for act in acts:
if not (patient.id, act.date, act.time, act.act_type) in seen:
seen.append((patient.id, act.date, act.time, act.act_type))
same_acts = Act.objects.filter(patient=patient, date=act.date, time=act.time, act_type=act.act_type)
nb = same_acts.count()
if nb > 1:
same_acts_set.append(same_acts)
if not i % 100:
percent = int(round((float(i) / float(total)) * 100))
out = '\r %20s [%s%s] %3d %%' % ("Recherche des doublons : ", '=' * percent, ' ' * (100 - percent), percent)
sys.stdout.write(out)
sys.stdout.flush()
if patient.last_name == 'Mazoyer' and patient.first_name == 'Manon':
for a in same_acts_set[len(same_acts_set)-1]:
print a
print a.get_state()
print a.is_billed
total = len(same_acts_set)
i = 0
for same_acts in same_acts_set:
i += 1
# Recherche du parent event
parent_event_id = None
for a in same_acts:
if a.parent_event:
# if parent_event_id and parent_event_id != a.parent_event.id:
# print "Il y a plusieurs evenement parent, bizarre"
parent_event_id = a.parent_event.id
keep = None
should = None
for a in same_acts:
state = a.get_state()
if state and state.state_name == 'VALIDE' and a.validation_locked == True:
should = a
break
for a in same_acts:
state = a.get_state()
if state and state.state_name != 'NON_VALIDE' and a.validation_locked == True:
keep = a
break
if should and keep and should != keep:
writer.writerow([str(keep.id), str(should.validation_locked), str(should.is_billed), str(should.is_lost), str(should.switch_billable), str(should.pause), str(should.comment)])
print "%s aurait du etre valide, facture: %s" % (keep, str(keep.is_billed))
if not keep:
lockeds = same_acts.filter(validation_locked=True)
if lockeds.count() >= 1:
keep = lockeds[0]
else:
for a in same_acts:
state = a.get_state()
if state and state.state_name == 'VALIDE':
should = a
break
for a in same_acts:
state = a.get_state()
if state and state.state_name != 'NON_VALIDE':
keep = a
break
if should and keep and should != keep:
writer.writerow([str(keep.id), str(should.validation_locked), str(should.is_billed), str(should.is_lost), str(should.switch_billable), str(should.pause), str(should.comment)])
print "Non verr, %s aurait du etre valide, facture: %s" % (keep, str(keep.is_billed))
if not keep:
keep = same_acts[0]
if parent_event_id and not keep.parent_event:
keep.parent_event_id = parent_event_id
keep.save()
acts_to_remove = same_acts.exclude(pk=keep.pk)
for act in acts_to_remove:
if act.parent_event:
if act.parent_event.recurrence_periodicity:
pass#print "attention, le parent de %d est un event periodique." % act.id
elif act.parent_event != keep.parent_event:
act.parent_event.delete()
if act.is_billed:
print "Suppresion de l'acte facture %s" % act
act.delete()
if not i % 100:
percent = int(round((float(i) / float(total)) * 100))
out = '\r %20s [%s%s] %3d %%' % ("Traitement des doublons : ", '=' * percent, ' ' * (100 - percent), percent)
sys.stdout.write(out)
sys.stdout.flush()
transaction.rollback()#commit()
print "Nb de doublons traites: %d" % total
print datetime.now()
if __name__ == "__main__":
main()
| agpl-3.0 |
ksh/gpitrainingv2 | controllers/sites.py | 4 | 49498 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enables hosting of multiple courses in one application instance.
We used to allow hosting of only one course in one Google App Engine instance.
Now we allow hosting of many courses simultaneously. To configure multiple
courses one must set an environment variable in app.yaml file, for example:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/coursea:/courses/a, course:/courseb:/courses/b'
...
This variable holds a ',' or newline separated list of course entries. Each
course entry has four ':' separated parts: the word 'course', the URL prefix,
and the file system location for the site files. If the third part is empty,
the course assets are stored in a datastore instead of the file system. The
fourth, optional part, is the name of the course namespace.
The URL prefix specifies, how will the course URL appear in the browser. In the
example above, the courses will be mapped to http://www.example.com[/coursea]
and http://www.example.com[/courseb].
The file system location of the files specifies, which files to serve for the
course. For each course we expect three sub-folders: 'assets', 'views', and
'data'. The 'data' folder must contain the CSV files that define the course
layout, the 'assets' and 'views' should contain the course specific files and
jinja2 templates respectively. In the example above, the course files are
expected to be placed into folders '/courses/a' and '/courses/b' of your Google
App Engine installation respectively. If this value is absent a datastore is
used to store course assets, not the file system.
By default Course Builder handles static '/assets' files using a custom
handler. You may choose to handle '/assets' files of your course as 'static'
files using Google App Engine handler. You can do so by creating a new static
file handler entry in your app.yaml and placing it before our main course
handler.
If you have an existing course developed using Course Builder and do NOT want
to host multiple courses, there is nothing for you to do. A following default
rule is silently created for you:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/:/'
...
It sets the '/' as the base URL for the course, uses root folder of your Google
App Engine installation to look for course /assets/..., /data/..., and
/views/... and uses blank datastore and memcache namespace. All in all,
everything behaves just as it did in the prior version of Course Builder when
only one course was supported.
If you have existing course developed using Course Builder and DO want to start
hosting multiple courses here are the steps. First, define the courses
configuration environment variable as described above. Second, copy existing
'assets', 'data' and 'views' folders of your course into the new location, for
example '/courses/mycourse'.
If you have an existing course built on a previous version of Course Builder
and you now decided to use new URL prefix, which is not '/', you will need
to update your old course html template and JavaScript files. You typically
would have to make two modifications. First, replace all absolute URLs with
the relative URLs. For example, if you had <a href='/forum'>..</a>, you will
need to replace it with <a href='forum'>..</a>. Second, you need to add <base>
tag at the top of you course 'base.html' and 'base_registration.html' files,
like this:
...
<head>
<base href="{{ gcb_course_base }}" />
...
Current Course Builder release already has all these modifications.
Note, that each 'course' runs in a separate Google App Engine namespace. The
name of the namespace is derived from the course files location. In the example
above, the course files are stored in the folder '/courses/a', which be mapped
to the namespace name 'gcb-courses-a'. The namespaces can't contain '/', so we
replace them with '-' and prefix the namespace with the project abbreviation
'gcb'. Remember these namespace names, you will need to use them if/when
accessing server administration panel, viewing objects in the datastore, etc.
Don't move the files to another folder after your course starts as a new folder
name will create a new namespace name and old data will no longer be used. You
are free to rename the course URL prefix at any time. Once again, if you are
not hosting multiple courses, your course will run in a default namespace
(None).
Good luck!
"""
import logging
import mimetypes
import os
import posixpath
import re
import threading
import urlparse
import zipfile
import appengine_config
from common import jinja_filters
from common import safe_dom
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.config import Registry
from models.counters import PerfCounter
from models.courses import Course
from models.roles import Roles
from models.vfs import AbstractFileSystem
from models.vfs import DatastoreBackedFileSystem
from models.vfs import LocalReadOnlyFileSystem
import webapp2
from webapp2_extras import i18n
import utils
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import zipserve
# base name for all course namespaces
GCB_BASE_COURSE_NAMESPACE = 'gcb-course'
# these folder and file names are reserved
GCB_ASSETS_FOLDER_NAME = os.path.normpath('/assets/')
GCB_VIEWS_FOLDER_NAME = os.path.normpath('/views/')
GCB_DATA_FOLDER_NAME = os.path.normpath('/data/')
GCB_CONFIG_FILENAME = os.path.normpath('/course.yaml')
# modules do have files that must be inheritable, like oeditor.html
GCB_MODULES_FOLDER_NAME = os.path.normpath('/modules/')
# Files in these folders are inheritable between file systems.
GCB_INHERITABLE_FOLDER_NAMES = [
os.path.join(GCB_ASSETS_FOLDER_NAME, 'css/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'img/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'lib/'),
GCB_VIEWS_FOLDER_NAME,
GCB_MODULES_FOLDER_NAME]
# supported site types
SITE_TYPE_COURSE = 'course'
# default 'Cache-Control' HTTP header for static files
DEFAULT_CACHE_CONTROL_MAX_AGE = 600
DEFAULT_CACHE_CONTROL_PUBLIC = 'public'
# default HTTP headers for dynamic responses
DEFAULT_EXPIRY_DATE = 'Mon, 01 Jan 1990 00:00:00 GMT'
DEFAULT_PRAGMA = 'no-cache'
# enable debug output
DEBUG_INFO = False
# thread local storage for current request PATH_INFO
PATH_INFO_THREAD_LOCAL = threading.local()
# performance counters
STATIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-static',
'A number of times request was served via static handler.')
DYNAMIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-dynamic',
'A number of times request was served via dynamic handler.')
ZIP_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-zip',
'A number of times request was served via zip handler.')
NO_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-none',
'A number of times request was not matched to any handler.')
HTTP_BYTES_IN = PerfCounter(
'gcb-sites-bytes-in',
'A number of bytes received from clients by the handler.')
HTTP_BYTES_OUT = PerfCounter(
'gcb-sites-bytes-out',
'A number of bytes sent out from the handler to clients.')
HTTP_STATUS_200 = PerfCounter(
'gcb-sites-http-20x',
'A number of times HTTP status code 20x was returned.')
HTTP_STATUS_300 = PerfCounter(
'gcb-sites-http-30x',
'A number of times HTTP status code 30x was returned.')
HTTP_STATUS_400 = PerfCounter(
'gcb-sites-http-40x',
'A number of times HTTP status code 40x was returned.')
HTTP_STATUS_500 = PerfCounter(
'gcb-sites-http-50x',
'A number of times HTTP status code 50x was returned.')
COUNTER_BY_HTTP_CODE = {
200: HTTP_STATUS_200, 300: HTTP_STATUS_300, 400: HTTP_STATUS_400,
500: HTTP_STATUS_500}
def count_stats(handler):
"""Records statistics about the request and the response."""
try:
# Record request bytes in.
if handler.request and handler.request.content_length:
HTTP_BYTES_IN.inc(handler.request.content_length)
# Record response HTTP status code.
if handler.response and handler.response.status_int:
rounded_status_code = (handler.response.status_int / 100) * 100
counter = COUNTER_BY_HTTP_CODE[rounded_status_code]
if not counter:
logging.error(
'Unknown HTTP status code: %s.',
handler.response.status_code)
else:
counter.inc()
# Record response bytes out.
if handler.response and handler.response.content_length:
HTTP_BYTES_OUT.inc(handler.response.content_length)
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Failed to count_stats(): %s.', str(e))
def has_path_info():
"""Checks if PATH_INFO is defined for the thread local."""
return hasattr(PATH_INFO_THREAD_LOCAL, 'path')
def set_path_info(path):
"""Stores PATH_INFO in thread local."""
if not path:
raise Exception('Use \'unset()\' instead.')
if has_path_info():
raise Exception('Expected no path set.')
PATH_INFO_THREAD_LOCAL.path = path
PATH_INFO_THREAD_LOCAL.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(
ApplicationContext.get_namespace_name_for_request())
def get_path_info():
"""Gets PATH_INFO from thread local."""
return PATH_INFO_THREAD_LOCAL.path
def unset_path_info():
"""Removed PATH_INFO from thread local."""
if not has_path_info():
raise Exception('Expected valid path already set.')
namespace_manager.set_namespace(
PATH_INFO_THREAD_LOCAL.old_namespace)
del PATH_INFO_THREAD_LOCAL.old_namespace
del PATH_INFO_THREAD_LOCAL.path
def debug(message):
if DEBUG_INFO:
logging.info(message)
def _validate_appcontext_list(contexts, strict=False):
"""Validates a list of application contexts."""
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
for i in range(len(contexts)):
for j in range(i + 1, len(contexts)):
above = contexts[i]
below = contexts[j]
if below.get_slug().startswith(above.get_slug()):
raise Exception(
'Please reorder course entries to have '
'\'%s\' before \'%s\'.' % (
below.get_slug(), above.get_slug()))
# Make sure '/' is mapped.
if strict:
is_root_mapped = False
for context in contexts:
if context.slug == '/':
is_root_mapped = True
break
if not is_root_mapped:
raise Exception(
'Please add an entry with \'/\' as course URL prefix.')
def get_all_courses(rules_text=None):
"""Reads all course rewrite rule definitions from environment variable."""
# Normalize text definition.
if not rules_text:
rules_text = GCB_COURSES_CONFIG.value
rules_text = rules_text.replace(',', '\n')
# Use cached value if exists.
cached = ApplicationContext.ALL_COURSE_CONTEXTS_CACHE.get(rules_text)
if cached:
return cached
# Compute the list of contexts.
rules = rules_text.split('\n')
slugs = {}
namespaces = {}
all_contexts = []
for rule in rules:
rule = rule.strip()
if not rule or rule.startswith('#'):
continue
parts = rule.split(':')
# validate length
if len(parts) < 3:
raise Exception('Expected rule definition of the form '
' \'type:slug:folder[:ns]\', got %s: ' % rule)
# validate type
if parts[0] != SITE_TYPE_COURSE:
raise Exception('Expected \'%s\', found: \'%s\'.'
% (SITE_TYPE_COURSE, parts[0]))
site_type = parts[0]
# validate slug
slug = parts[1]
slug_parts = urlparse.urlparse(slug)
if slug != slug_parts[2]:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' must be a simple URL fragment.' % (
rule, slug))
if slug in slugs:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' is already defined.' % (rule, slug))
slugs[slug] = True
# validate folder name
if parts[2]:
folder = parts[2]
# pylint: disable-msg=g-long-lambda
create_fs = lambda unused_ns: LocalReadOnlyFileSystem(
logical_home_folder=folder)
else:
folder = '/'
# pylint: disable-msg=g-long-lambda
create_fs = lambda ns: DatastoreBackedFileSystem(
ns=ns,
logical_home_folder=appengine_config.BUNDLE_ROOT,
inherits_from=LocalReadOnlyFileSystem(logical_home_folder='/'),
inheritable_folders=GCB_INHERITABLE_FOLDER_NAMES)
# validate or derive namespace
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
if len(parts) == 4:
namespace = parts[3]
else:
if folder and folder != '/':
namespace = '%s%s' % (GCB_BASE_COURSE_NAMESPACE,
folder.replace('/', '-'))
try:
namespace_manager.validate_namespace(namespace)
except Exception as e:
raise Exception(
'Error validating namespace "%s" in rule "%s"; %s.' % (
namespace, rule, e))
if namespace in namespaces:
raise Exception(
'Bad rule \'%s\'. '
'Namespace \'%s\' is already defined.' % (rule, namespace))
namespaces[namespace] = True
all_contexts.append(ApplicationContext(
site_type, slug, folder, namespace,
AbstractFileSystem(create_fs(namespace)),
raw=rule))
_validate_appcontext_list(all_contexts)
# Cache result to avoid re-parsing over and over.
ApplicationContext.ALL_COURSE_CONTEXTS_CACHE = {rules_text: all_contexts}
return all_contexts
def get_course_for_current_request():
"""Chooses course that matches current request context path."""
# get path if defined
if not has_path_info():
return None
path = get_path_info()
# Get all rules.
courses = get_all_courses()
# Match a path to a course.
# TODO(psimakov): linear search is unacceptable
for course in courses:
if path == course.get_slug() or path.startswith(
'%s/' % course.get_slug()) or course.get_slug() == '/':
return course
debug('No mapping for: %s' % path)
return None
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return AbstractFileSystem.normpath(os.path.join(base, path))
def abspath(home_folder, filename):
"""Creates an absolute URL for a filename in a home folder."""
return path_join(appengine_config.BUNDLE_ROOT,
path_join(home_folder, filename))
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
def set_static_resource_cache_control(handler):
"""Properly sets Cache-Control for a WebOb/webapp2 response."""
handler.response.cache_control.no_cache = None
handler.response.cache_control.public = DEFAULT_CACHE_CONTROL_PUBLIC
handler.response.cache_control.max_age = DEFAULT_CACHE_CONTROL_MAX_AGE
def set_default_response_headers(handler):
"""Sets the default headers for outgoing responses."""
# This conditional is needed for the unit tests to pass, since their
# handlers do not have a response attribute.
if handler.response:
# Only set the headers for dynamic responses. This happens precisely
# when the handler is an instance of utils.ApplicationHandler.
if isinstance(handler, utils.ApplicationHandler):
handler.response.cache_control.no_cache = True
handler.response.cache_control.must_revalidate = True
handler.response.expires = DEFAULT_EXPIRY_DATE
handler.response.pragma = DEFAULT_PRAGMA
def make_zip_handler(zipfilename):
"""Creates a handler that serves files from a zip file."""
class CustomZipHandler(zipserve.ZipHandler):
"""Custom ZipHandler that properly controls caching."""
def get(self, *args):
"""Handles GET request."""
path = None
# try to use path passed explicitly
if args and len(args) >= 1:
path = args[0]
# use path_translated if no name was passed explicitly
if not path:
path = self.path_translated
# we need to remove leading slash and all filenames inside zip
# file must be relative
if path and path.startswith('/') and len(path) > 1:
path = path[1:]
if not path:
self.error(404)
return
ZIP_HANDLER_COUNT.inc()
self.ServeFromZipFile(zipfilename, path)
count_stats(self)
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
return CustomZipHandler
class CssComboZipHandler(zipserve.ZipHandler):
"""A handler which combines a files served from a zip file.
The paths for the files within the zip file are presented
as query parameters.
"""
zipfile_cache = {}
def get(self):
raise NotImplementedError()
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
def serve_from_zip_file(self, zipfilename, static_file_handler):
"""Assemble the download by reading file from zip file."""
zipfile_object = self.zipfile_cache.get(zipfilename)
if zipfile_object is None:
try:
zipfile_object = zipfile.ZipFile(zipfilename)
except (IOError, RuntimeError, zipfile.BadZipfile), err:
# If the zipfile can't be opened, that's probably a
# configuration error in the app, so it's logged as an error.
logging.error('Can\'t open zipfile %s: %s', zipfilename, err)
zipfile_object = '' # Special value to cache negative results.
self.zipfile_cache[zipfilename] = zipfile_object
if not zipfile_object:
self.error(404)
return
all_content_types = set()
for name in self.request.GET:
all_content_types.add(mimetypes.guess_type(name))
if len(all_content_types) == 1:
content_type = all_content_types.pop()[0]
else:
content_type = 'text/plain'
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders()
for name in self.request.GET:
try:
content = zipfile_object.read(name)
if content_type == 'text/css':
content = self._fix_css_paths(
name, content, static_file_handler)
self.response.out.write(content)
except (KeyError, RuntimeError), err:
logging.error('Not found %s in %s', name, zipfilename)
def _fix_css_paths(self, path, css, static_file_handler):
"""Transform relative url() settings in CSS to absolute.
This is necessary because a url setting, e.g., url(foo.png), is
interpreted as relative to the location of the CSS file. However
in the case of a bundled CSS file, obtained from a URL such as
http://place.com/cb/combo?a/b/c/foo.css
the browser would believe that the location for foo.png was
http://place.com/cb/foo.png
and not
http://place.com/cb/a/b/c/foo.png
Thus we transform the url from
url(foo.png)
to
url(/static_file_service/a/b/c/foo.png)
Args:
path: the path to the CSS file within the ZIP file
css: the content of the CSS file
static_file_handler: the base handler to serve the referenced file
Returns:
The CSS with all relative URIs rewritten to absolute URIs.
"""
base = static_file_handler + posixpath.split(path)[0] + '/'
css = css.decode('utf-8')
css = re.sub(r'url\(([^http|^https]\S+)\)', r'url(%s\1)' % base, css)
return css
def make_css_combo_zip_handler(zipfilename, static_file_handler):
class CustomCssComboZipHandler(CssComboZipHandler):
def get(self):
self.serve_from_zip_file(zipfilename, static_file_handler)
return CustomCssComboZipHandler
class AssetHandler(webapp2.RequestHandler):
"""Handles serving of static resources located on the file system."""
def __init__(self, app_context, filename):
self.app_context = app_context
self.filename = filename
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def _can_view(self, fs, stream):
"""Checks if current user can view stream."""
public = not fs.is_draft(stream)
return public or Roles.is_course_admin(self.app_context)
def get(self):
"""Handles GET requests."""
debug('File: %s' % self.filename)
if not self.app_context.fs.isfile(self.filename):
self.error(404)
return
stream = self.app_context.fs.open(self.filename)
if not self._can_view(self.app_context.fs, stream):
self.error(403)
return
set_static_resource_cache_control(self)
self.response.headers['Content-Type'] = self.get_mime_type(
self.filename)
self.response.write(stream.read())
class ApplicationContext(object):
"""An application context for a request/response."""
# Here we store a map of a text definition of the courses to be parsed, and
# a fully validated array of ApplicationContext objects that they define.
# This is cached in process and automatically recomputed when text
# definition changes.
ALL_COURSE_CONTEXTS_CACHE = {}
@classmethod
def get_namespace_name_for_request(cls):
"""Gets the name of the namespace to use for this request.
(Examples of such namespaces are NDB and memcache.)
Returns:
The namespace for the current request, or None if no course matches
the current request context path.
"""
course = get_course_for_current_request()
if course:
return course.namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def after_create(cls, instance):
"""Override this method to manipulate freshly created instance."""
pass
def __init__(self, site_type, slug, homefolder, namespace, fs, raw=None):
"""Creates new application context.
Args:
site_type: Specifies the type of context. Must be 'course' for now.
slug: A common context path prefix for all URLs in the context.
homefolder: A folder with the assets belonging to this context.
namespace: A name of a datastore namespace for use by this context.
fs: A file system object to be used for accessing homefolder.
raw: A raw representation of this course rule (course:/:/).
Returns:
The new instance of namespace object.
"""
self.type = site_type
self.slug = slug
self.homefolder = homefolder
self.namespace = namespace
self._fs = fs
self._raw = raw
self.after_create(self)
@ property
def raw(self):
return self._raw
@ property
def fs(self):
return self._fs
@property
def now_available(self):
course = self.get_environ().get('course')
return course and course.get('now_available')
def get_title(self):
return self.get_environ()['course']['title']
def get_namespace_name(self):
return self.namespace
def get_home_folder(self):
return self.homefolder
def get_slug(self):
return self.slug
def get_config_filename(self):
"""Returns absolute location of a course configuration file."""
filename = abspath(self.get_home_folder(), GCB_CONFIG_FILENAME)
debug('Config file: %s' % filename)
return filename
def get_environ(self):
return Course.get_environ(self)
def get_home(self):
"""Returns absolute location of a course folder."""
path = abspath(self.get_home_folder(), '')
return path
def get_template_home(self):
"""Returns absolute location of a course template folder."""
path = abspath(self.get_home_folder(), GCB_VIEWS_FOLDER_NAME)
return path
def get_data_home(self):
"""Returns absolute location of a course data folder."""
path = abspath(self.get_home_folder(), GCB_DATA_FOLDER_NAME)
return path
def get_template_environ(self, locale, additional_dirs):
"""Create and configure jinja template evaluation environment."""
template_dir = self.get_template_home()
dirs = [template_dir]
if additional_dirs:
dirs += additional_dirs
jinja_environment = self.fs.get_jinja_environ(dirs)
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
jinja_environment.filters['gcb_tags'] = jinja_filters.gcb_tags
return jinja_environment
def _courses_config_validator(rules_text, errors):
"""Validates a textual definition of courses entries."""
try:
_validate_appcontext_list(
get_all_courses(rules_text=rules_text), strict=True)
except Exception as e: # pylint: disable-msg=broad-except
errors.append(str(e))
def validate_new_course_entry_attributes(name, title, admin_email, errors):
"""Validates new course attributes."""
if not name or len(name) < 3:
errors.append(
'The unique name associated with the course must be at least '
'three characters long.')
if not re.match('[_a-z0-9]+$', name, re.IGNORECASE):
errors.append(
'The unique name associated with the course should contain only '
'lowercase letters, numbers, or underscores.')
if not title or len(title) < 3:
errors.append('The course title is too short.')
if not admin_email or '@' not in admin_email:
errors.append('Please enter a valid email address.')
@db.transactional()
def _add_new_course_entry_to_persistent_configuration(raw):
"""Adds new raw course entry definition to the datastore settings.
This loads all current datastore course entries and adds a new one. It
also find the best place to add the new entry at the further down the list
the better, because entries are applied in the order of declaration.
Args:
raw: The course entry rule: 'course:/foo::ns_foo'.
Returns:
True if added, False if not. False almost always means a duplicate rule.
"""
# Get all current entries from a datastore.
entity = ConfigPropertyEntity.get_by_key_name(GCB_COURSES_CONFIG.name)
if not entity:
entity = ConfigPropertyEntity(key_name=GCB_COURSES_CONFIG.name)
entity.is_draft = False
if not entity.value:
entity.value = GCB_COURSES_CONFIG.value
lines = entity.value.splitlines()
# Add new entry to the rest of the entries. Since entries are matched
# in the order of declaration, try to find insertion point further down.
final_lines_text = None
for index in reversed(range(0, len(lines) + 1)):
# Create new rule list putting new item at index position.
new_lines = lines[:]
new_lines.insert(index, raw)
new_lines_text = '\n'.join(new_lines)
# Validate the rule list definition.
errors = []
_courses_config_validator(new_lines_text, errors)
if not errors:
final_lines_text = new_lines_text
break
# Save updated course entries.
if final_lines_text:
entity.value = final_lines_text
entity.put()
return True
return False
def add_new_course_entry(unique_name, title, admin_email, errors):
"""Validates course attributes and adds the course."""
# Validate.
validate_new_course_entry_attributes(
unique_name, title, admin_email, errors)
if errors:
return
# Create new entry and check it is valid.
raw = 'course:/%s::ns_%s' % (unique_name, unique_name)
try:
get_all_courses(rules_text=raw)
except Exception as e: # pylint: disable-msg=broad-except
errors.append('Failed to add entry: %s.\n%s' % (raw, e))
if errors:
return
# Add new entry to persistence.
if not _add_new_course_entry_to_persistent_configuration(raw):
errors.append(
'Unable to add new entry \'%s\'. Entry with the '
'same name \'%s\' already exists.' % (raw, unique_name))
return
return raw
GCB_COURSES_CONFIG = ConfigProperty(
'gcb_courses_config', str,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
A newline separated list of course entries. Each course entry has
four parts, separated by colons (':'). The four parts are:""")
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text(
'The word \'course\', which is a required element.')
).add_child(
safe_dom.Element('li').add_text("""
A unique course URL prefix. Examples could be '/cs101' or '/art'.
Default: '/'""")
).add_child(
safe_dom.Element('li').add_text("""
A file system location of course asset files. If location is left empty,
the course assets are stored in a datastore instead of the file system. A course
with assets in a datastore can be edited online. A course with assets on file
system must be re-deployed to Google App Engine manually.""")
).add_child(
safe_dom.Element('li').add_text("""
A course datastore namespace where course data is stored in App Engine.
Note: this value cannot be changed after the course is created."""))
).append(
safe_dom.Text(
'For example, consider the following two course entries:')
).append(safe_dom.Element('br')).append(
safe_dom.Element('blockquote').add_text(
'course:/cs101::/ns_cs101'
).add_child(
safe_dom.Element('br')
).add_text('course:/:/')
).append(
safe_dom.Element('p').add_text("""
Assuming you are hosting Course Builder on http:/www.example.com, the first
entry defines a course on a http://www.example.com/cs101 and both its assets
and student data are stored in the datastore namespace 'ns_cs101'. The second
entry defines a course hosted on http://www.example.com/, with its assets
stored in the '/' folder of the installation and its data stored in the default
empty datastore namespace.""")
).append(
safe_dom.Element('p').add_text("""
A line that starts with '#' is ignored. Course entries are applied in the
order they are defined.""")
), 'course:/:/:', multiline=True, validator=_courses_config_validator)
class ApplicationRequestHandler(webapp2.RequestHandler):
"""Handles dispatching of all URL's to proper handlers."""
# WARNING! never set this value to True, unless for the production load
# tests; setting this value to True will allow any anonymous third party to
# act as a Course Builder superuser
CAN_IMPERSONATE = False
# the name of the impersonation header
IMPERSONATE_HEADER_NAME = 'Gcb-Impersonate'
def dispatch(self):
if self.CAN_IMPERSONATE:
self.impersonate_and_dispatch()
else:
super(ApplicationRequestHandler, self).dispatch()
def impersonate_and_dispatch(self):
"""Dispatches request with user impersonation."""
impersonate_info = self.request.headers.get(
self.IMPERSONATE_HEADER_NAME)
if not impersonate_info:
super(ApplicationRequestHandler, self).dispatch()
return
impersonate_info = transforms.loads(impersonate_info)
email = impersonate_info.get('email')
user_id = impersonate_info.get('user_id')
def get_impersonated_user():
"""A method that returns impersonated user."""
try:
return users.User(email=email, _user_id=user_id)
except users.UserNotFoundError:
return None
old_get_current_user = users.get_current_user
try:
logging.info('Impersonating %s.', email)
users.get_current_user = get_impersonated_user
super(ApplicationRequestHandler, self).dispatch()
return
finally:
users.get_current_user = old_get_current_user
@classmethod
def bind_to(cls, urls, urls_map):
"""Recursively builds a map from a list of (URL, Handler) tuples."""
for url in urls:
path_prefix = url[0]
handler = url[1]
urls_map[path_prefix] = handler
# add child handlers
if hasattr(handler, 'get_child_routes'):
cls.bind_to(handler.get_child_routes(), urls_map)
@classmethod
def bind(cls, urls):
urls_map = {}
cls.bind_to(urls, urls_map)
cls.urls_map = urls_map
def get_handler(self):
"""Finds a course suitable for handling this request."""
course = get_course_for_current_request()
if not course:
return None
path = get_path_info()
if not path:
return None
return self.get_handler_for_course_type(
course, unprefix(path, course.get_slug()))
def can_handle_course_requests(self, context):
"""Reject all, but authors requests, to an unpublished course."""
return context.now_available or Roles.is_course_admin(context)
def _get_handler_factory_for_path(self, path):
"""Picks a handler to handle the path."""
# Checks if path maps in its entirety.
if path in ApplicationRequestHandler.urls_map:
return ApplicationRequestHandler.urls_map[path]
# Check if partial path maps. For now, let only zipserve.ZipHandler
# handle partial matches. We want to find the longest possible match.
parts = path.split('/')
candidate = None
partial_path = ''
for part in parts:
if part:
partial_path += '/' + part
if partial_path in ApplicationRequestHandler.urls_map:
handler = ApplicationRequestHandler.urls_map[partial_path]
if (
isinstance(handler, zipserve.ZipHandler) or
issubclass(handler, zipserve.ZipHandler)):
candidate = handler
return candidate
def get_handler_for_course_type(self, context, path):
"""Gets the right handler for the given context and path."""
if not self.can_handle_course_requests(context):
return None
# TODO(psimakov): Add docs (including args and returns).
norm_path = os.path.normpath(path)
# Handle static assets here.
if norm_path.startswith(GCB_ASSETS_FOLDER_NAME):
abs_file = abspath(context.get_home_folder(), norm_path)
handler = AssetHandler(self, abs_file)
handler.request = self.request
handler.response = self.response
handler.app_context = context
debug('Course asset: %s' % abs_file)
STATIC_HANDLER_COUNT.inc()
return handler
# Handle all dynamic handlers here.
handler_factory = self._get_handler_factory_for_path(path)
if handler_factory:
handler = handler_factory()
handler.app_context = context
handler.request = self.request
handler.response = self.response
# This variable represents the path after the namespace prefix is
# removed. The full path is still stored in self.request.path. For
# example, if self.request.path is '/new_course/foo/bar/baz/...',
# the path_translated would be '/foo/bar/baz/...'.
handler.path_translated = path
debug('Handler: %s > %s' % (path, handler.__class__.__name__))
DYNAMIC_HANDLER_COUNT.inc()
return handler
NO_HANDLER_COUNT.inc()
return None
def get(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.get()
finally:
count_stats(self)
unset_path_info()
def post(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.post()
finally:
count_stats(self)
unset_path_info()
def put(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.put()
finally:
count_stats(self)
unset_path_info()
def delete(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
set_default_response_headers(handler)
handler.delete()
finally:
count_stats(self)
unset_path_info()
def assert_mapped(src, dest):
try:
set_path_info(src)
course = get_course_for_current_request()
if not dest:
assert course is None
else:
assert course.get_slug() == dest
finally:
unset_path_info()
def assert_handled(src, target_handler):
try:
set_path_info(src)
app_handler = ApplicationRequestHandler()
# For unit tests to work we want all requests to be handled regardless
# of course.now_available flag value. Here we patch for that.
app_handler.can_handle_course_requests = lambda context: True
handler = app_handler.get_handler()
if handler is None and target_handler is None:
return None
assert isinstance(handler, target_handler)
return handler
finally:
unset_path_info()
def assert_fails(func):
success = False
try:
func()
success = True
except Exception: # pylint: disable=W0703
pass
if success:
raise Exception('Function \'%s\' was expected to fail.' % func)
def setup_courses(course_config):
"""Helper method that allows a test to setup courses on the fly."""
Registry.test_overrides[GCB_COURSES_CONFIG.name] = course_config
def reset_courses():
"""Cleanup method to complement setup_courses()."""
Registry.test_overrides[
GCB_COURSES_CONFIG.name] = GCB_COURSES_CONFIG.default_value
def test_unprefix():
assert unprefix('/', '/') == '/'
assert unprefix('/a/b/c', '/a/b') == '/c'
assert unprefix('/a/b/index.html', '/a/b') == '/index.html'
assert unprefix('/a/b', '/a/b') == '/'
def test_rule_validations():
"""Test rules validator."""
courses = get_all_courses(rules_text='course:/:/')
assert 1 == len(courses)
# Check comments.
setup_courses('course:/a:/nsa, course:/b:/nsb')
assert 2 == len(get_all_courses())
setup_courses('course:/a:/nsa, # course:/a:/nsb')
assert 1 == len(get_all_courses())
# Check slug collisions are not allowed.
setup_courses('course:/a:/nsa, course:/a:/nsb')
assert_fails(get_all_courses)
# Check namespace collisions are not allowed.
setup_courses('course:/a:/nsx, course:/b:/nsx')
assert_fails(get_all_courses)
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
setup_courses('course:/a:/nsa, course:/aa:/nsaa, course:/aaa:/nsaaa')
assert_fails(get_all_courses)
# Check namespace names.
setup_courses('course:/a::/nsx')
assert_fails(get_all_courses)
# Check slug validity.
setup_courses('course:/a /b::nsa')
get_all_courses()
setup_courses('course:/a?/b::nsa')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
def test_rule_definitions():
"""Test various rewrite rule definitions."""
# Check that the default site is created when no rules are specified.
assert len(get_all_courses()) == 1
# Test one rule parsing.
setup_courses('course:/google/pswg:/sites/pswg')
rules = get_all_courses()
assert len(get_all_courses()) == 1
rule = rules[0]
assert rule.get_slug() == '/google/pswg'
assert rule.get_home_folder() == '/sites/pswg'
# Test two rule parsing.
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert len(get_all_courses()) == 2
# Test that two of the same slugs are not allowed.
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c/d')
assert_fails(get_all_courses)
# Test that only 'course' is supported.
setup_courses('foo:/a/b:/c/d, bar:/e/f:/g/h')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
# Test namespaces.
set_path_info('/')
try:
setup_courses('course:/:/c/d')
assert ApplicationContext.get_namespace_name_for_request() == (
'gcb-course-c-d')
finally:
unset_path_info()
# Cleanup.
reset_courses()
def test_url_to_rule_mapping():
"""Tests mapping of a URL to a rule."""
# default mapping
assert_mapped('/favicon.ico', '/')
assert_mapped('/assets/img/foo.png', '/')
# explicit mapping
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert_mapped('/a/b', '/a/b')
assert_mapped('/a/b/', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/e/f', '/e/f')
assert_mapped('/e/f/assets', '/e/f')
assert_mapped('/e/f/views', '/e/f')
assert_mapped('e/f', None)
assert_mapped('foo', None)
# Cleanup.
reset_courses()
def test_url_to_handler_mapping_for_course_type():
"""Tests mapping of a URL to a handler for course type."""
# setup rules
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
# setup helper classes
class FakeHandler0(object):
def __init__(self):
self.app_context = None
class FakeHandler1(object):
def __init__(self):
self.app_context = None
class FakeHandler2(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
class FakeHandler3(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
class FakeHandler4(zipserve.ZipHandler):
def __init__(self):
self.app_context = None
# Setup handler.
handler0 = FakeHandler0
handler1 = FakeHandler1
handler2 = FakeHandler2
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
ApplicationRequestHandler.bind(urls)
# Test proper handler mappings.
assert_handled('/a/b', FakeHandler0)
assert_handled('/a/b/', FakeHandler0)
assert_handled('/a/b/foo', FakeHandler1)
assert_handled('/a/b/bar', FakeHandler2)
# Test partial path match.
assert_handled('/a/b/foo/bee', None)
assert_handled('/a/b/bar/bee', FakeHandler2)
# Test assets mapping.
handler = assert_handled('/a/b/assets/img/foo.png', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/c/d/views'))
# This is allowed as we don't go out of /assets/...
handler = assert_handled(
'/a/b/assets/foo/../models/models.py', AssetHandler)
assert AbstractFileSystem.normpath(handler.filename).endswith(
AbstractFileSystem.normpath('/c/d/assets/models/models.py'))
# This is not allowed as we do go out of /assets/...
assert_handled('/a/b/assets/foo/../../models/models.py', None)
# Test negative cases
assert_handled('/foo', None)
assert_handled('/baz', None)
# Site 'views' and 'data' are not accessible
assert_handled('/a/b/view/base.html', None)
assert_handled('/a/b/data/units.csv', None)
# Default mapping
reset_courses()
handler3 = FakeHandler3
handler4 = FakeHandler4
urls = [
('/', handler0),
('/foo', handler1),
('/bar', handler2),
('/zip', handler3),
('/zip/a/b', handler4)]
ApplicationRequestHandler.bind(urls)
# Positive cases
assert_handled('/', FakeHandler0)
assert_handled('/foo', FakeHandler1)
assert_handled('/bar', FakeHandler2)
handler = assert_handled('/assets/js/main.js', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/views'))
# Partial URL matching cases test that the most specific match is found.
assert_handled('/zip', FakeHandler3)
assert_handled('/zip/a', FakeHandler3)
assert_handled('/zip/a/b', FakeHandler4)
assert_handled('/zip/a/b/c', FakeHandler4)
# Negative cases
assert_handled('/baz', None)
assert_handled('/favicon.ico', None)
assert_handled('/e/f/index.html', None)
assert_handled('/foo/foo.css', None)
# Clean up.
ApplicationRequestHandler.bind([])
def test_namespace_collisions_are_detected():
"""Test that namespace collisions are detected and are not allowed."""
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c-d')
assert_fails(get_all_courses)
reset_courses()
def test_path_construction():
"""Checks that path_join() works correctly."""
# Test cases common to all platforms.
assert (os.path.normpath(path_join('/a/b', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b', 'c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', 'c')) ==
os.path.normpath('/a/b/c'))
# Windows-specific test cases.
drive, unused_path = os.path.splitdrive('c:\\windows')
if drive:
assert (os.path.normpath(path_join('/a/b', 'c:/d')) ==
os.path.normpath('/a/b/d'))
assert (os.path.normpath(path_join('/a/b/', 'c:/d')) ==
os.path.normpath('/a/b/d'))
def run_all_unit_tests():
assert not ApplicationRequestHandler.CAN_IMPERSONATE
test_namespace_collisions_are_detected()
test_unprefix()
test_rule_definitions()
test_url_to_rule_mapping()
test_url_to_handler_mapping_for_course_type()
test_path_construction()
test_rule_validations()
if __name__ == '__main__':
DEBUG_INFO = True
run_all_unit_tests()
| apache-2.0 |
fullprogramaciondev/portal | djangocali-portal/users/tests/test_views.py | 367 | 1840 | from django.test import RequestFactory
from test_plus.test import TestCase
from ..views import (
UserRedirectView,
UserUpdateView
)
class BaseUserTestCase(TestCase):
def setUp(self):
self.user = self.make_user()
self.factory = RequestFactory()
class TestUserRedirectView(BaseUserTestCase):
def test_get_redirect_url(self):
# Instantiate the view directly. Never do this outside a test!
view = UserRedirectView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
view.request = request
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
view.get_redirect_url(),
'/users/testuser/'
)
class TestUserUpdateView(BaseUserTestCase):
def setUp(self):
# call BaseUserTestCase.setUp()
super(TestUserUpdateView, self).setUp()
# Instantiate the view directly. Never do this outside a test!
self.view = UserUpdateView()
# Generate a fake request
request = self.factory.get('/fake-url')
# Attach the user to the request
request.user = self.user
# Attach the request to the view
self.view.request = request
def test_get_success_url(self):
# Expect: '/users/testuser/', as that is the default username for
# self.make_user()
self.assertEqual(
self.view.get_success_url(),
'/users/testuser/'
)
def test_get_object(self):
# Expect: self.user, as that is the request's user object
self.assertEqual(
self.view.get_object(),
self.user
)
| bsd-3-clause |
bzennn/blog_flask | python/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/__init__.py | 360 | 2852 | """
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = '1.16'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
| gpl-3.0 |
songfj/scrapy | scrapy/utils/trackref.py | 120 | 2081 | """This module provides some functions and classes to record and report
references to live object instances.
If you want live objects for a particular class to be tracked, you only have to
subclass from object_ref (instead of object).
About performance: This library has a minimal performance impact when enabled,
and no performance penalty at all when disabled (as object_ref becomes just an
alias to object in that case).
"""
from __future__ import print_function
import weakref
from time import time
from operator import itemgetter
from collections import defaultdict
import six
NoneType = type(None)
live_refs = defaultdict(weakref.WeakKeyDictionary)
class object_ref(object):
"""Inherit from this class (instead of object) to a keep a record of live
instances"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
live_refs[cls][obj] = time()
return obj
def format_live_refs(ignore=NoneType):
"""Return a tabular representation of tracked objects"""
s = "Live References\n\n"
now = time()
for cls, wdict in sorted(six.iteritems(live_refs),
key=lambda x: x[0].__name__):
if not wdict:
continue
if issubclass(cls, ignore):
continue
oldest = min(six.itervalues(wdict))
s += "%-30s %6d oldest: %ds ago\n" % (
cls.__name__, len(wdict), now - oldest
)
return s
def print_live_refs(*a, **kw):
"""Print tracked objects"""
print(format_live_refs(*a, **kw))
def get_oldest(class_name):
"""Get the oldest object for a specific class name"""
for cls, wdict in six.iteritems(live_refs):
if cls.__name__ == class_name:
if not wdict:
break
return min(six.iteritems(wdict), key=itemgetter(1))[0]
def iter_all(class_name):
"""Iterate over all objects of the same class by its class name"""
for cls, wdict in six.iteritems(live_refs):
if cls.__name__ == class_name:
return six.iterkeys(wdict)
| bsd-3-clause |
Coelhon/MasterRepo.repository | plugin.video.RabbitMovies/resources/lib/indexers/navigator.py | 2 | 14559 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,urlparse
import time
from resources.lib.libraries import control
artPath = control.artPath()
addonFanart = control.addonFanart()
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
traktMode = False if control.setting('trakt.user') == '' else True
imdbMode = False if control.setting('imdb_user') == '' else True
sysaddon = sys.argv[0]
class navigator:
def __init__(self):
movie_library = os.path.join(control.transPath(control.setting('movie_library')),'')
tv_library = os.path.join(control.transPath(control.setting('tv_library')),'')
tv_downloads = os.path.join(control.transPath(control.setting('tv_downloads')),'')
movie_downloads = os.path.join(control.transPath(control.setting('movie_downloads')),'')
try:
if not os.path.exists(movie_library): os.makedirs(movie_library)
except:
pass
try:
if not os.path.exists(tv_library): os.makedirs(tv_library)
except:
pass
#if not os.path.exists(tv_downloads) and tv_downloads!='' : os.makedirs(tv_downloads)
#if not os.path.exists(movie_downloads) and movie_downloads != '': os.makedirs(movie_downloads)
#if not control.TOKEN:
# last_reminder = control.setting('last_reminder')
# if last_reminder !='':
# last_reminder = int(last_reminder)
# else:
# last_reminder = 0
# print("--- TRAKT ---, last reminder",last_reminder)
# now = int(time.time())
# if last_reminder >= 0 and last_reminder < now - (24 * 60 * 60):
# gui_utils.get_pin()
#else:
# profile = control.traktapi.get_user_profile()
# control.set_setting('trakt_user', '%s (%s)' % (profile['username'], profile['name']))
#def trakt_pin_auth(self):
# gui_utils.get_pin()
def root(self):
self.addDirectoryItem(30001, 'movieNavigator', 'movies.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30002, 'tvNavigator', 'tvshows.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30003, 'channels', 'channels.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30004, 'myNavigator', 'myspecto.jpg', 'DefaultVideoPlaylists.png')
if not control.setting('movie_widget') == '0':
self.addDirectoryItem(30005, 'movieWidget', 'moviesAdded.jpg', 'DefaultRecentlyAddedMovies.png')
if (traktMode == True and not control.setting('tv_alt_widget') == '0') or (traktMode == False and not control.setting('tv_widget') == '0'):
self.addDirectoryItem(30006, 'tvWidget', 'calendarsAdded.jpg', 'DefaultRecentlyAddedEpisodes.png')
if not control.setting('calendar_widget') == '0':
self.addDirectoryItem(30007, 'calendars', 'calendar.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30008, 'toolNavigator', 'tools.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30009, 'searchNavigator', 'search.jpg', 'DefaultFolder.png')
self.endDirectory()
from resources.lib.libraries import cache
from resources.lib.libraries import changelog
cache.get(changelog.get, 600000000, control.addonInfo('version'), table='changelog')
def movies(self):
self.addDirectoryItem(30021, 'movieGenres', 'movieGenres.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30022, 'movieYears', 'movieYears.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30023, 'moviePersons', 'movies.jpg', 'DefaultMovies.png')
#self.addDirectoryItem(30024, 'movieCertificates', 'movieCertificates.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30025, 'movies&url=featured', 'movies.jpg', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(30026, 'movies&url=trending', 'moviesTrending.jpg', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(30027, 'movies&url=popular', 'moviesPopular.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30028, 'movies&url=views', 'moviesViews.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30029, 'movies&url=boxoffice', 'moviesBoxoffice.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30030, 'movies&url=oscars', 'moviesOscars.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30031, 'movies&url=theaters', 'moviesTheaters.jpg', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(30032, 'movies&url=added', 'moviesAdded.jpg', 'DefaultRecentlyAddedMovies.png')
self.addDirectoryItem(30033, 'movieFavourites', 'movieFavourites.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30034, 'moviePerson', 'moviePerson.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30035, 'movieSearch', 'movieSearch.jpg', 'DefaultMovies.png')
self.endDirectory()
def tvshows(self):
self.addDirectoryItem(30051, 'tvGenres', 'tvGenres.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30052, 'tvYears', 'tvshows.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30053, 'tvNetworks', 'tvshows.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30054, 'tvshows&url=trending', 'tvshowsTrending.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30055, 'tvshows&url=popular', 'tvshowsPopular.jpg', 'DefaultTVShows.png')
#self.addDirectoryItem(30056, 'tvshows&url=airing', 'tvshows.jpg', 'DefaultTVShows.png')
#self.addDirectoryItem(30057, 'tvshows&url=active', 'tvshowsActive.jpg', 'DefaultTVShows.png')
#self.addDirectoryItem(30058, 'tvshows&url=premiere', 'tvshows.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30059, 'tvshows&url=rating', 'tvshowsRating.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30060, 'tvshows&url=views', 'tvshowsViews.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30061, 'calendars', 'calendar.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30062, 'calendar&url=added', 'calendarsAdded.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30063, 'episodeFavourites', 'tvFavourites.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30064, 'tvFavourites', 'tvFavourites.jpg', 'DefaultTVShows.png')
#self.addDirectoryItem(30065, 'tvPerson', 'tvPerson.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30066, 'tvSearch', 'tvSearch.jpg', 'DefaultTVShows.png')
self.endDirectory()
def specto(self):
if traktMode == True:
self.addDirectoryItem(30081, 'movies&url=traktcollection', 'moviesTraktcollection.jpg', 'DefaultMovies.png', context=(30191, 'moviesToLibrary&url=traktcollection'))
self.addDirectoryItem(30082, 'movies&url=traktwatchlist', 'moviesTraktwatchlist.jpg', 'DefaultMovies.png', context=(30191, 'moviesToLibrary&url=traktwatchlist'))
self.addDirectoryItem(30083, 'movies&url=traktfeatured', 'movies.jpg', 'DefaultMovies.png')
#self.addDirectoryItem(30084, 'movies&url=traktratings', 'movies.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30085, 'tvshows&url=traktcollection', 'tvshowsTraktcollection.jpg', 'DefaultTVShows.png', context=(30191, 'tvshowsToLibrary&url=traktcollection'))
self.addDirectoryItem(30086, 'tvshows&url=traktwatchlist', 'tvshowsTraktwatchlist.jpg', 'DefaultTVShows.png', context=(30191, 'tvshowsToLibrary&url=traktwatchlist'))
self.addDirectoryItem(30087, 'tvshows&url=traktfeatured', 'tvshows.jpg', 'DefaultTVShows.png')
#self.addDirectoryItem(30088, 'tvshows&url=traktratings', 'tvshows.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30089, 'calendar&url=progress', 'calendarsProgress.jpg', 'DefaultRecentlyAddedEpisodes.png')
self.addDirectoryItem(30090, 'calendar&url=mycalendar', 'calendarsMycalendar.jpg', 'DefaultRecentlyAddedEpisodes.png')
if imdbMode == True:
self.addDirectoryItem(30091, 'movies&url=imdbwatchlist', 'moviesImdbwatchlist.jpg', 'DefaultMovies.png', context=(30191, 'moviesToLibrary&url=imdbwatchlist'))
self.addDirectoryItem(30092, 'tvshows&url=imdbwatchlist', 'tvshowsImdbwatchlist.jpg', 'DefaultTVShows.png', context=(30191, 'tvshowsToLibrary&url=imdbwatchlist'))
if traktMode == True or imdbMode == True:
self.addDirectoryItem(30093, 'movieUserlists', 'movieUserlists.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30094, 'tvUserlists', 'tvUserlists.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30095, 'movieFavourites', 'movieFavourites.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30096, 'episodeFavourites', 'tvFavourites.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30097, 'tvFavourites', 'tvFavourites.jpg', 'DefaultTVShows.png')
movie_downloads = control.setting('movie_downloads')
tv_downloads = control.setting('tv_downloads')
if len(control.listDir(movie_downloads)[0]) > 0 or len(control.listDir(tv_downloads)[0]) > 0:
self.addDirectoryItem(30098, 'downloadNavigator', 'downloads.jpg', 'DefaultFolder.png')
self.endDirectory()
def downloads(self):
movie_downloads = control.setting('movie_downloads')
tv_downloads = control.setting('tv_downloads')
if len(control.listDir(movie_downloads)[0]) > 0:
self.addDirectoryItem(30099, movie_downloads, 'movies.jpg', 'DefaultMovies.png', isAction=False)
if len(control.listDir(tv_downloads)[0]) > 0:
self.addDirectoryItem(30100, tv_downloads, 'tvshows.jpg', 'DefaultTVShows.png', isAction=False)
self.endDirectory()
def tools(self):
self.addDirectoryItem(30111, 'openSettings&query=0.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30112, 'openSettings&query=6.1', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30113, 'openSettings&query=1.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30114, 'openSettings&query=9.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30115, 'openSettings&query=2.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30116, 'openSettings&query=3.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30117, 'openSettings&query=4.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30118, 'openSettings&query=5.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30119, 'clearSources', 'cache.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30120, 'clearCache', 'cache.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30122, 'openSettings&query=8.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30121, 'libtoolNavigator', 'tools.jpg', 'DefaultAddonProgram.png')
#self.addDirectoryItem(30141, 'openSettings&query=10.0', 'tools.jpg', 'DefaultAddonProgram.png')
self.endDirectory()
def library(self):
self.addDirectoryItem(30131, 'openSettings&query=7.0', 'settings.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30132, 'updateLibrary&query=tool', 'update.jpg', 'DefaultAddonProgram.png')
self.addDirectoryItem(30133, control.setting('movie_library'), 'movies.jpg', 'DefaultMovies.png', isAction=False)
self.addDirectoryItem(30134, control.setting('tv_library'), 'tvshows.jpg', 'DefaultTVShows.png', isAction=False)
if traktMode == True:
self.addDirectoryItem(30135, 'moviesToLibrary&url=traktcollection', 'moviesTraktcollection.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30136, 'moviesToLibrary&url=traktwatchlist', 'moviesTraktwatchlist.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30137, 'tvshowsToLibrary&url=traktcollection', 'tvshowsTraktcollection.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30138, 'tvshowsToLibrary&url=traktwatchlist', 'tvshowsTraktwatchlist.jpg', 'DefaultTVShows.png')
if imdbMode == True:
self.addDirectoryItem(30139, 'moviesToLibrary&url=imdbwatchlist', 'moviesImdbwatchlist.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30140, 'tvshowsToLibrary&url=imdbwatchlist', 'tvshowsImdbwatchlist.jpg', 'DefaultTVShows.png')
self.endDirectory()
def search(self):
self.addDirectoryItem(30151, 'movieSearch', 'movieSearch.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30152, 'tvSearch', 'tvSearch.jpg', 'DefaultTVShows.png')
self.addDirectoryItem(30153, 'moviePerson', 'moviePerson.jpg', 'DefaultMovies.png')
self.addDirectoryItem(30154, 'tvPerson', 'tvPerson.jpg', 'DefaultTVShows.png')
self.endDirectory()
def addDirectoryItem(self, name, query, thumb, icon, context=None, isAction=True, isFolder=True):
try: name = control.lang(name).encode('utf-8')
except: pass
url = '%s?action=%s' % (sysaddon, query) if isAction == True else query
thumb = os.path.join(artPath, thumb) if not artPath == None else icon
cm = []
if not context == None: cm.append((control.lang(context[0]).encode('utf-8'), 'RunPlugin(%s?action=%s)' % (sysaddon, context[1])))
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems(cm, replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=isFolder)
def endDirectory(self, cacheToDisc=True):
control.directory(int(sys.argv[1]), cacheToDisc=cacheToDisc)
| gpl-2.0 |
stanta/darfchain | darfchain_docker_vagrant/tests/assets/test_digital_assets.py | 3 | 2956 | import pytest
import random
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_asset_transfer(b, user_pk, user_sk):
from bigchaindb.models import Transaction
tx_input = b.get_owned_ids(user_pk).pop()
tx_create = b.get_transaction(tx_input.txid)
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([user_pk], 1)],
tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
assert tx_transfer_signed.validate(b) == tx_transfer_signed
assert tx_transfer_signed.asset['id'] == tx_create.id
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_validate_transfer_asset_id_mismatch(b, user_pk, user_sk):
from bigchaindb.common.exceptions import AssetIdMismatch
from bigchaindb.models import Transaction
tx_create = b.get_owned_ids(user_pk).pop()
tx_create = b.get_transaction(tx_create.txid)
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([user_pk], 1)],
tx_create.id)
tx_transfer.asset['id'] = 'aaa'
tx_transfer_signed = tx_transfer.sign([user_sk])
with pytest.raises(AssetIdMismatch):
tx_transfer_signed.validate(b)
def test_get_asset_id_create_transaction(b, user_pk):
from bigchaindb.models import Transaction
tx_create = Transaction.create([b.me], [([user_pk], 1)])
asset_id = Transaction.get_asset_id(tx_create)
assert asset_id == tx_create.id
@pytest.mark.bdb
@pytest.mark.usefixtures('inputs')
def test_get_asset_id_transfer_transaction(b, user_pk, user_sk):
from bigchaindb.models import Transaction
tx_create = b.get_owned_ids(user_pk).pop()
tx_create = b.get_transaction(tx_create.txid)
# create a transfer transaction
tx_transfer = Transaction.transfer(tx_create.to_inputs(), [([user_pk], 1)],
tx_create.id)
tx_transfer_signed = tx_transfer.sign([user_sk])
# create a block
block = b.create_block([tx_transfer_signed])
b.write_block(block)
# vote the block valid
vote = b.vote(block.id, b.get_last_voted_block().id, True)
b.write_vote(vote)
asset_id = Transaction.get_asset_id(tx_transfer)
assert asset_id == tx_transfer.asset['id']
def test_asset_id_mismatch(b, user_pk):
from bigchaindb.models import Transaction
from bigchaindb.common.exceptions import AssetIdMismatch
tx1 = Transaction.create([b.me], [([user_pk], 1)],
metadata={'msg': random.random()})
tx2 = Transaction.create([b.me], [([user_pk], 1)],
metadata={'msg': random.random()})
with pytest.raises(AssetIdMismatch):
Transaction.get_asset_id([tx1, tx2])
def test_create_valid_divisible_asset(b, user_pk, user_sk):
from bigchaindb.models import Transaction
tx = Transaction.create([user_pk], [([user_pk], 2)])
tx_signed = tx.sign([user_sk])
tx_signed.validate(b)
| gpl-3.0 |
cfobel/sconspiracy | Python/racy/plugins/graphlibext/__init__.py | 1 | 2002 | # -*- coding: UTF8 -*-
# ***** BEGIN LICENSE BLOCK *****
# Sconspiracy - Copyright (C) IRCAD, 2004-2010.
# Distributed under the terms of the BSD Licence as
# published by the Open Source Initiative.
# ****** END LICENSE BLOCK ******
import os
import racy
from racy import renv
from generategraph import generate_graph
class GraphLibExtError(racy.RacyPluginError):
pass
KEYWORD = 'GRAPHLIBEXT'
GRAPHLIBEXT_INSTALL_PATH = os.path.join(racy.renv.dirs.install,"graphlibext")
class Plugin(racy.rplugins.Plugin):
name = "graphlibext"
options = { KEYWORD : None }
#allowed_values = { KEYWORD : ['no', 'yes'] }
commandline_opts = [ KEYWORD ]
#commandline_prj_opts = [ KEYWORD ]
descriptions_opts = {
KEYWORD : 'Generate libext graph for "all" or '
'specified libext.'
}
def init(self):
libext = renv.options.get_option( KEYWORD )
if libext:
if libext == "all":
libext = []
dot_filename = "all_libext"
else:
libext = libext.split(',')
dot_filename = "_".join(libext)
graph = generate_graph(libext)
if graph:
file = os.path.join(
GRAPHLIBEXT_INSTALL_PATH,
"{0}.dot".format(dot_filename)
)
fp=None
try:
if not os.path.exists(GRAPHLIBEXT_INSTALL_PATH):
os.mkdir(GRAPHLIBEXT_INSTALL_PATH)
fp = open(file, 'w')
fp.write(graph)
fp.flush()
racy.print_msg("Written: {0}".format(file))
except Exception,e:
racy.print_error( 'GraphLibExt error', GraphLibExtError(e) )
finally:
if fp:
fp.close()
return True
| bsd-3-clause |
dapangmao/minitwit | lib/werkzeug/contrib/securecookie.py | 318 | 12204 | # -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ, start_response)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if not b'=' in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
| apache-2.0 |
robocomp/robocomp-robolab | components/detection/test/emotionrecognitionclient/src/specificworker.py | 1 | 1855 | #
# Copyright (C) 2018 by YOUR NAME HERE
#
# This file is part of RoboComp
#
# RoboComp is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RoboComp is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RoboComp. If not, see <http://www.gnu.org/licenses/>.
#
import sys, os, traceback, time
import numpy as np
import cv2
from PySide import QtGui, QtCore
from genericworker import *
class SpecificWorker(GenericWorker):
def __init__(self, proxy_map):
super(SpecificWorker, self).__init__(proxy_map)
self.timer.timeout.connect(self.compute)
self.Period = 100
self.timer.start(self.Period)
def setParams(self, params):
return True
@QtCore.Slot()
def compute(self):
print('SpecificWorker.compute...')
# Get image from camera
data = self.camerasimple_proxy.getImage()
arr = np.fromstring(data.image, np.uint8)
frame = np.reshape(arr,(data.height, data.width, data.depth))
# Get emotion list
emotionL = self.emotionrecognition_proxy.getEmotionList()
print(emotionL)
# Showing data on the frame
for emotionData in emotionL:
x = emotionData.x
y = emotionData.y
w = emotionData.w
h = emotionData.h
emotion = emotionData.emotion
cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0),2)
cv2.putText(frame, emotion, (x,y-2), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255,255,255) ,2 , cv2.LINE_AA)
cv2.imshow('Emotion', frame)
return True
| gpl-3.0 |
ropik/chromium | net/tools/testserver/testserver.py | 6 | 78374 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a simple HTTP/FTP/SYNC/TCP/UDP/ server used for testing Chrome.
It supports several test URLs, as specified by the handlers in TestPageHandler.
By default, it listens on an ephemeral port and sends the port number back to
the originating process over a pipe. The originating process can specify an
explicit port if necessary.
It can use https if you specify the flag --https=CERT where CERT is the path
to a pem file containing the certificate and private key that should be used.
"""
import asyncore
import base64
import BaseHTTPServer
import cgi
import errno
import httplib
import minica
import optparse
import os
import random
import re
import select
import socket
import SocketServer
import struct
import sys
import threading
import time
import urllib
import urlparse
import warnings
import zlib
# Ignore deprecation warnings, they make our output more cluttered.
warnings.filterwarnings("ignore", category=DeprecationWarning)
import echo_message
import pyftpdlib.ftpserver
import tlslite
import tlslite.api
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
try:
import json
except ImportError:
import simplejson as json
if sys.platform == 'win32':
import msvcrt
SERVER_HTTP = 0
SERVER_FTP = 1
SERVER_SYNC = 2
SERVER_TCP_ECHO = 3
SERVER_UDP_ECHO = 4
# Using debug() seems to cause hangs on XP: see http://crbug.com/64515 .
debug_output = sys.stderr
def debug(str):
debug_output.write(str + "\n")
debug_output.flush()
class RecordingSSLSessionCache(object):
"""RecordingSSLSessionCache acts as a TLS session cache and maintains a log of
lookups and inserts in order to test session cache behaviours."""
def __init__(self):
self.log = []
def __getitem__(self, sessionID):
self.log.append(('lookup', sessionID))
raise KeyError()
def __setitem__(self, sessionID, session):
self.log.append(('insert', sessionID))
class ClientRestrictingServerMixIn:
"""Implements verify_request to limit connections to our configured IP
address."""
def verify_request(self, request, client_address):
return client_address[0] == self.server_address[0]
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""This is a specialization of BaseHTTPServer to allow it
to be exited cleanly (by setting its "stop" member to True)."""
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class HTTPServer(ClientRestrictingServerMixIn, StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that adds client
verification."""
pass
class OCSPServer(ClientRestrictingServerMixIn, BaseHTTPServer.HTTPServer):
"""This is a specialization of HTTPServer that serves an
OCSP response"""
def serve_forever_on_thread(self):
self.thread = threading.Thread(target = self.serve_forever,
name = "OCSPServerThread")
self.thread.start()
def stop_serving(self):
self.shutdown()
self.thread.join()
class HTTPSServer(tlslite.api.TLSSocketServerMixIn,
ClientRestrictingServerMixIn,
StoppableHTTPServer):
"""This is a specialization of StoppableHTTPServer that add https support and
client verification."""
def __init__(self, server_address, request_hander_class, pem_cert_and_key,
ssl_client_auth, ssl_client_cas, ssl_bulk_ciphers,
record_resume_info, tls_intolerant):
self.cert_chain = tlslite.api.X509CertChain().parseChain(pem_cert_and_key)
self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key, private=True)
self.ssl_client_auth = ssl_client_auth
self.ssl_client_cas = []
self.tls_intolerant = tls_intolerant
for ca_file in ssl_client_cas:
s = open(ca_file).read()
x509 = tlslite.api.X509()
x509.parse(s)
self.ssl_client_cas.append(x509.subject)
self.ssl_handshake_settings = tlslite.api.HandshakeSettings()
if ssl_bulk_ciphers is not None:
self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers
if record_resume_info:
# If record_resume_info is true then we'll replace the session cache with
# an object that records the lookups and inserts that it sees.
self.session_cache = RecordingSSLSessionCache()
else:
self.session_cache = tlslite.api.SessionCache()
StoppableHTTPServer.__init__(self, server_address, request_hander_class)
def handshake(self, tlsConnection):
"""Creates the SSL connection."""
try:
tlsConnection.handshakeServer(certChain=self.cert_chain,
privateKey=self.private_key,
sessionCache=self.session_cache,
reqCert=self.ssl_client_auth,
settings=self.ssl_handshake_settings,
reqCAs=self.ssl_client_cas,
tlsIntolerant=self.tls_intolerant)
tlsConnection.ignoreAbruptClose = True
return True
except tlslite.api.TLSAbruptCloseError:
# Ignore abrupt close.
return True
except tlslite.api.TLSError, error:
print "Handshake failure:", str(error)
return False
class SyncHTTPServer(ClientRestrictingServerMixIn, StoppableHTTPServer):
"""An HTTP server that handles sync commands."""
def __init__(self, server_address, request_handler_class):
# We import here to avoid pulling in chromiumsync's dependencies
# unless strictly necessary.
import chromiumsync
import xmppserver
StoppableHTTPServer.__init__(self, server_address, request_handler_class)
self._sync_handler = chromiumsync.TestServer()
self._xmpp_socket_map = {}
self._xmpp_server = xmppserver.XmppServer(
self._xmpp_socket_map, ('localhost', 0))
self.xmpp_port = self._xmpp_server.getsockname()[1]
self.authenticated = True
def GetXmppServer(self):
return self._xmpp_server
def HandleCommand(self, query, raw_request):
return self._sync_handler.HandleCommand(query, raw_request)
def HandleRequestNoBlock(self):
"""Handles a single request.
Copied from SocketServer._handle_request_noblock().
"""
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.close_request(request)
def SetAuthenticated(self, auth_valid):
self.authenticated = auth_valid
def GetAuthenticated(self):
return self.authenticated
def serve_forever(self):
"""This is a merge of asyncore.loop() and SocketServer.serve_forever().
"""
def HandleXmppSocket(fd, socket_map, handler):
"""Runs the handler for the xmpp connection for fd.
Adapted from asyncore.read() et al.
"""
xmpp_connection = socket_map.get(fd)
# This could happen if a previous handler call caused fd to get
# removed from socket_map.
if xmpp_connection is None:
return
try:
handler(xmpp_connection)
except (asyncore.ExitNow, KeyboardInterrupt, SystemExit):
raise
except:
xmpp_connection.handle_error()
while True:
read_fds = [ self.fileno() ]
write_fds = []
exceptional_fds = []
for fd, xmpp_connection in self._xmpp_socket_map.items():
is_r = xmpp_connection.readable()
is_w = xmpp_connection.writable()
if is_r:
read_fds.append(fd)
if is_w:
write_fds.append(fd)
if is_r or is_w:
exceptional_fds.append(fd)
try:
read_fds, write_fds, exceptional_fds = (
select.select(read_fds, write_fds, exceptional_fds))
except select.error, err:
if err.args[0] != errno.EINTR:
raise
else:
continue
for fd in read_fds:
if fd == self.fileno():
self.HandleRequestNoBlock()
continue
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_read_event)
for fd in write_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_write_event)
for fd in exceptional_fds:
HandleXmppSocket(fd, self._xmpp_socket_map,
asyncore.dispatcher.handle_expt_event)
class FTPServer(ClientRestrictingServerMixIn, pyftpdlib.ftpserver.FTPServer):
"""This is a specialization of FTPServer that adds client verification."""
pass
class TCPEchoServer(ClientRestrictingServerMixIn, SocketServer.TCPServer):
"""A TCP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class UDPEchoServer(ClientRestrictingServerMixIn, SocketServer.UDPServer):
"""A UDP echo server that echoes back what it has received."""
def server_bind(self):
"""Override server_bind to store the server name."""
SocketServer.UDPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def serve_forever(self):
self.stop = False
self.nonce_time = None
while not self.stop:
self.handle_request()
self.socket.close()
class BasePageHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, head_handlers, post_handlers,
put_handlers):
self._connect_handlers = connect_handlers
self._get_handlers = get_handlers
self._head_handlers = head_handlers
self._post_handlers = post_handlers
self._put_handlers = put_handlers
BaseHTTPServer.BaseHTTPRequestHandler.__init__(
self, request, client_address, socket_server)
def log_request(self, *args, **kwargs):
# Disable request logging to declutter test log output.
pass
def _ShouldHandleRequest(self, handler_name):
"""Determines if the path can be handled by the handler.
We consider a handler valid if the path begins with the
handler name. It can optionally be followed by "?*", "/*".
"""
pattern = re.compile('%s($|\?|/).*' % handler_name)
return pattern.match(self.path)
def do_CONNECT(self):
for handler in self._connect_handlers:
if handler():
return
def do_GET(self):
for handler in self._get_handlers:
if handler():
return
def do_HEAD(self):
for handler in self._head_handlers:
if handler():
return
def do_POST(self):
for handler in self._post_handlers:
if handler():
return
def do_PUT(self):
for handler in self._put_handlers:
if handler():
return
class TestPageHandler(BasePageHandler):
def __init__(self, request, client_address, socket_server):
connect_handlers = [
self.RedirectConnectHandler,
self.ServerAuthConnectHandler,
self.DefaultConnectResponseHandler]
get_handlers = [
self.NoCacheMaxAgeTimeHandler,
self.NoCacheTimeHandler,
self.CacheTimeHandler,
self.CacheExpiresHandler,
self.CacheProxyRevalidateHandler,
self.CachePrivateHandler,
self.CachePublicHandler,
self.CacheSMaxAgeHandler,
self.CacheMustRevalidateHandler,
self.CacheMustRevalidateMaxAgeHandler,
self.CacheNoStoreHandler,
self.CacheNoStoreMaxAgeHandler,
self.CacheNoTransformHandler,
self.DownloadHandler,
self.DownloadFinishHandler,
self.EchoHeader,
self.EchoHeaderCache,
self.EchoAllHandler,
self.ZipFileHandler,
self.GDataAuthHandler,
self.GDataDocumentsFeedQueryHandler,
self.FileHandler,
self.SetCookieHandler,
self.SetHeaderHandler,
self.AuthBasicHandler,
self.AuthDigestHandler,
self.SlowServerHandler,
self.ChunkedServerHandler,
self.ContentTypeHandler,
self.NoContentHandler,
self.ServerRedirectHandler,
self.ClientRedirectHandler,
self.MultipartHandler,
self.MultipartSlowHandler,
self.GetSSLSessionCacheHandler,
self.CloseSocketHandler,
self.DefaultResponseHandler]
post_handlers = [
self.EchoTitleHandler,
self.EchoHandler,
self.DeviceManagementHandler,
self.PostOnlyFileHandler] + get_handlers
put_handlers = [
self.EchoTitleHandler,
self.EchoHandler] + get_handlers
head_handlers = [
self.FileHandler,
self.DefaultResponseHandler]
self._mime_types = {
'crx' : 'application/x-chrome-extension',
'exe' : 'application/octet-stream',
'gif': 'image/gif',
'jpeg' : 'image/jpeg',
'jpg' : 'image/jpeg',
'json': 'application/json',
'pdf' : 'application/pdf',
'xml' : 'text/xml'
}
self._default_mime_type = 'text/html'
BasePageHandler.__init__(self, request, client_address, socket_server,
connect_handlers, get_handlers, head_handlers,
post_handlers, put_handlers)
def GetMIMETypeFromName(self, file_name):
"""Returns the mime type for the specified file_name. So far it only looks
at the file extension."""
(shortname, extension) = os.path.splitext(file_name.split("?")[0])
if len(extension) == 0:
# no extension.
return self._default_mime_type
# extension starts with a dot, so we need to remove it
return self._mime_types.get(extension[1:], self._default_mime_type)
def NoCacheMaxAgeTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime/maxage"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=0')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def NoCacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and no caching requested."""
if not self._ShouldHandleRequest("/nocachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheTimeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for one minute."""
if not self._ShouldHandleRequest("/cachetime"):
return False
self.send_response(200)
self.send_header('Cache-Control', 'max-age=60')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheExpiresHandler(self):
"""This request handler yields a page with the title set to the current
system time, and set the page to expire on 1 Jan 2099."""
if not self._ShouldHandleRequest("/cache/expires"):
return False
self.send_response(200)
self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheProxyRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 60 seconds"""
if not self._ShouldHandleRequest("/cache/proxy-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, proxy-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePrivateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/private"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, private')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CachePublicHandler(self):
"""This request handler yields a page with the title set to the current
system time, and allows caching for 5 seconds."""
if not self._ShouldHandleRequest("/cache/public"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=3, public')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheSMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow for caching."""
if not self._ShouldHandleRequest("/cache/s-maxage"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching."""
if not self._ShouldHandleRequest("/cache/must-revalidate"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheMustRevalidateMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow caching event though max-age of 60
seconds is specified."""
if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, must-revalidate')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored."""
if not self._ShouldHandleRequest("/cache/no-store"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoStoreMaxAgeHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the page to be stored even though max-age
of 60 seconds is specified."""
if not self._ShouldHandleRequest("/cache/no-store/max-age"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=60, no-store')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def CacheNoTransformHandler(self):
"""This request handler yields a page with the title set to the current
system time, and does not allow the content to transformed during
user-agent caching"""
if not self._ShouldHandleRequest("/cache/no-transform"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'no-transform')
self.end_headers()
self.wfile.write('<html><head><title>%s</title></head></html>' %
time.time())
return True
def EchoHeader(self):
"""This handler echoes back the value of a specific request header."""
return self.EchoHeaderHelper("/echoheader")
"""This function echoes back the value of a specific request header"""
"""while allowing caching for 16 hours."""
def EchoHeaderCache(self):
return self.EchoHeaderHelper("/echoheadercache")
def EchoHeaderHelper(self, echo_header):
"""This function echoes back the value of the request header passed in."""
if not self._ShouldHandleRequest(echo_header):
return False
query_char = self.path.find('?')
if query_char != -1:
header_name = self.path[query_char+1:]
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
if echo_header == '/echoheadercache':
self.send_header('Cache-control', 'max-age=60000')
else:
self.send_header('Cache-control', 'no-cache')
# insert a vary header to properly indicate that the cachability of this
# request is subject to value of the request header being echoed.
if len(header_name) > 0:
self.send_header('Vary', header_name)
self.end_headers()
if len(header_name) > 0:
self.wfile.write(self.headers.getheader(header_name))
return True
def ReadRequestBody(self):
"""This function reads the body of the current HTTP request, handling
both plain and chunked transfer encoded requests."""
if self.headers.getheader('transfer-encoding') != 'chunked':
length = int(self.headers.getheader('content-length'))
return self.rfile.read(length)
# Read the request body as chunks.
body = ""
while True:
line = self.rfile.readline()
length = int(line, 16)
if length == 0:
self.rfile.readline()
break
body += self.rfile.read(length)
self.rfile.read(2)
return body
def EchoHandler(self):
"""This handler just echoes back the payload of the request, for testing
form submission."""
if not self._ShouldHandleRequest("/echo"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(self.ReadRequestBody())
return True
def EchoTitleHandler(self):
"""This handler is like Echo, but sets the page title to the request."""
if not self._ShouldHandleRequest("/echotitle"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
request = self.ReadRequestBody()
self.wfile.write('<html><head><title>')
self.wfile.write(request)
self.wfile.write('</title></head></html>')
return True
def EchoAllHandler(self):
"""This handler yields a (more) human-readable page listing information
about the request header & contents."""
if not self._ShouldHandleRequest("/echoall"):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head><style>'
'pre { border: 1px solid black; margin: 5px; padding: 5px }'
'</style></head><body>'
'<div style="float: right">'
'<a href="/echo">back to referring page</a></div>'
'<h1>Request Body:</h1><pre>')
if self.command == 'POST' or self.command == 'PUT':
qs = self.ReadRequestBody()
params = cgi.parse_qs(qs, keep_blank_values=1)
for param in params:
self.wfile.write('%s=%s\n' % (param, params[param][0]))
self.wfile.write('</pre>')
self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers)
self.wfile.write('</body></html>')
return True
def DownloadHandler(self):
"""This handler sends a downloadable file with or without reporting
the size (6K)."""
if self.path.startswith("/download-unknown-size"):
send_length = False
elif self.path.startswith("/download-known-size"):
send_length = True
else:
return False
#
# The test which uses this functionality is attempting to send
# small chunks of data to the client. Use a fairly large buffer
# so that we'll fill chrome's IO buffer enough to force it to
# actually write the data.
# See also the comments in the client-side of this test in
# download_uitest.cc
#
size_chunk1 = 35*1024
size_chunk2 = 10*1024
self.send_response(200)
self.send_header('Content-Type', 'application/octet-stream')
self.send_header('Cache-Control', 'max-age=0')
if send_length:
self.send_header('Content-Length', size_chunk1 + size_chunk2)
self.end_headers()
# First chunk of data:
self.wfile.write("*" * size_chunk1)
self.wfile.flush()
# handle requests until one of them clears this flag.
self.server.waitForDownload = True
while self.server.waitForDownload:
self.server.handle_request()
# Second chunk of data:
self.wfile.write("*" * size_chunk2)
return True
def DownloadFinishHandler(self):
"""This handler just tells the server to finish the current download."""
if not self._ShouldHandleRequest("/download-finish"):
return False
self.server.waitForDownload = False
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-Control', 'max-age=0')
self.end_headers()
return True
def _ReplaceFileData(self, data, query_parameters):
"""Replaces matching substrings in a file.
If the 'replace_text' URL query parameter is present, it is expected to be
of the form old_text:new_text, which indicates that any old_text strings in
the file are replaced with new_text. Multiple 'replace_text' parameters may
be specified.
If the parameters are not present, |data| is returned.
"""
query_dict = cgi.parse_qs(query_parameters)
replace_text_values = query_dict.get('replace_text', [])
for replace_text_value in replace_text_values:
replace_text_args = replace_text_value.split(':')
if len(replace_text_args) != 2:
raise ValueError(
'replace_text must be of form old_text:new_text. Actual value: %s' %
replace_text_value)
old_text_b64, new_text_b64 = replace_text_args
old_text = base64.urlsafe_b64decode(old_text_b64)
new_text = base64.urlsafe_b64decode(new_text_b64)
data = data.replace(old_text, new_text)
return data
def ZipFileHandler(self):
"""This handler sends the contents of the requested file in compressed form.
Can pass in a parameter that specifies that the content length be
C - the compressed size (OK),
U - the uncompressed size (Non-standard, but handled),
S - less than compressed (OK because we keep going),
M - larger than compressed but less than uncompressed (an error),
L - larger than uncompressed (an error)
Example: compressedfiles/Picture_1.doc?C
"""
prefix = "/compressedfiles/"
if not self.path.startswith(prefix):
return False
# Consume a request body if present.
if self.command == 'POST' or self.command == 'PUT' :
self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
if not query in ('C', 'U', 'S', 'M', 'L'):
return False
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
uncompressed_len = len(data)
f.close()
# Compress the data.
data = zlib.compress(data)
compressed_len = len(data)
content_length = compressed_len
if query == 'U':
content_length = uncompressed_len
elif query == 'S':
content_length = compressed_len / 2
elif query == 'M':
content_length = (compressed_len + uncompressed_len) / 2
elif query == 'L':
content_length = compressed_len + uncompressed_len
self.send_response(200)
self.send_header('Content-Type', 'application/msword')
self.send_header('Content-encoding', 'deflate')
self.send_header('Connection', 'close')
self.send_header('Content-Length', content_length)
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
self.wfile.write(data)
return True
def FileHandler(self):
"""This handler sends the contents of the requested file. Wow, it's like
a real webserver!"""
prefix = self.server.file_root_url
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def PostOnlyFileHandler(self):
"""This handler sends the contents of the requested file on a POST."""
prefix = urlparse.urljoin(self.server.file_root_url, 'post/')
if not self.path.startswith(prefix):
return False
return self._FileHandlerHelper(prefix)
def _FileHandlerHelper(self, prefix):
request_body = ''
if self.command == 'POST' or self.command == 'PUT':
# Consume a request body if present.
request_body = self.ReadRequestBody()
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_dict = cgi.parse_qs(query)
expected_body = query_dict.get('expected_body', [])
if expected_body and request_body not in expected_body:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
expected_headers = query_dict.get('expected_headers', [])
for expected_header in expected_headers:
header_name, expected_value = expected_header.split(':')
if self.headers.getheader(header_name) != expected_value:
self.send_response(404)
self.end_headers()
self.wfile.write('')
return True
sub_path = url_path[len(prefix):]
entries = sub_path.split('/')
file_path = os.path.join(self.server.data_dir, *entries)
if os.path.isdir(file_path):
file_path = os.path.join(file_path, 'index.html')
if not os.path.isfile(file_path):
print "File not found " + sub_path + " full path:" + file_path
self.send_error(404)
return True
f = open(file_path, "rb")
data = f.read()
f.close()
data = self._ReplaceFileData(data, query)
old_protocol_version = self.protocol_version
# If file.mock-http-headers exists, it contains the headers we
# should send. Read them in and parse them.
headers_path = file_path + '.mock-http-headers'
if os.path.isfile(headers_path):
f = open(headers_path, "r")
# "HTTP/1.1 200 OK"
response = f.readline()
http_major, http_minor, status_code = re.findall(
'HTTP/(\d+).(\d+) (\d+)', response)[0]
self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor)
self.send_response(int(status_code))
for line in f:
header_values = re.findall('(\S+):\s*(.*)', line)
if len(header_values) > 0:
# "name: value"
name, value = header_values[0]
self.send_header(name, value)
f.close()
else:
# Could be more generic once we support mime-type sniffing, but for
# now we need to set it explicitly.
range = self.headers.get('Range')
if range and range.startswith('bytes='):
# Note this doesn't handle all valid byte range values (i.e. left
# open ended ones), just enough for what we needed so far.
range = range[6:].split('-')
start = int(range[0])
if range[1]:
end = int(range[1])
else:
end = len(data) - 1
self.send_response(206)
content_range = 'bytes ' + str(start) + '-' + str(end) + '/' + \
str(len(data))
self.send_header('Content-Range', content_range)
data = data[start: end + 1]
else:
self.send_response(200)
self.send_header('Content-Type', self.GetMIMETypeFromName(file_path))
self.send_header('Accept-Ranges', 'bytes')
self.send_header('Content-Length', len(data))
self.send_header('ETag', '\'' + file_path + '\'')
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(data)
self.protocol_version = old_protocol_version
return True
def SetCookieHandler(self):
"""This handler just sets a cookie, for testing cookie handling."""
if not self._ShouldHandleRequest("/set-cookie"):
return False
query_char = self.path.find('?')
if query_char != -1:
cookie_values = self.path[query_char + 1:].split('&')
else:
cookie_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for cookie_value in cookie_values:
self.send_header('Set-Cookie', '%s' % cookie_value)
self.end_headers()
for cookie_value in cookie_values:
self.wfile.write('%s' % cookie_value)
return True
def SetHeaderHandler(self):
"""This handler sets a response header. Parameters are in the
key%3A%20value&key2%3A%20value2 format."""
if not self._ShouldHandleRequest("/set-header"):
return False
query_char = self.path.find('?')
if query_char != -1:
headers_values = self.path[query_char + 1:].split('&')
else:
headers_values = ("",)
self.send_response(200)
self.send_header('Content-Type', 'text/html')
for header_value in headers_values:
header_value = urllib.unquote(header_value)
(key, value) = header_value.split(': ', 1)
self.send_header(key, value)
self.end_headers()
for header_value in headers_values:
self.wfile.write('%s' % header_value)
return True
def AuthBasicHandler(self):
"""This handler tests 'Basic' authentication. It just sends a page with
title 'user/pass' if you succeed."""
if not self._ShouldHandleRequest("/auth-basic"):
return False
username = userpass = password = b64str = ""
expected_password = 'secret'
realm = 'testrealm'
set_cookie_if_challenged = False
_, _, url_path, _, query, _ = urlparse.urlparse(self.path)
query_params = cgi.parse_qs(query, True)
if 'set-cookie-if-challenged' in query_params:
set_cookie_if_challenged = True
if 'password' in query_params:
expected_password = query_params['password'][0]
if 'realm' in query_params:
realm = query_params['realm'][0]
auth = self.headers.getheader('authorization')
try:
if not auth:
raise Exception('no auth')
b64str = re.findall(r'Basic (\S+)', auth)[0]
userpass = base64.b64decode(b64str)
username, password = re.findall(r'([^:]+):(\S+)', userpass)[0]
if password != expected_password:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
self.send_header('Content-Type', 'text/html')
if set_cookie_if_challenged:
self.send_header('Set-Cookie', 'got_challenged=true')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('b64str=%s<p>' % b64str)
self.wfile.write('username: %s<p>' % username)
self.wfile.write('userpass: %s<p>' % userpass)
self.wfile.write('password: %s<p>' % password)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
return True
# Authentication successful. (Return a cachable response to allow for
# testing cached pages that require authentication.)
old_protocol_version = self.protocol_version
self.protocol_version = "HTTP/1.1"
if_none_match = self.headers.getheader('if-none-match')
if if_none_match == "abc":
self.send_response(304)
self.end_headers()
elif url_path.endswith(".gif"):
# Using chrome/test/data/google/logo.gif as the test image
test_image_path = ['google', 'logo.gif']
gif_path = os.path.join(self.server.data_dir, *test_image_path)
if not os.path.isfile(gif_path):
self.send_error(404)
self.protocol_version = old_protocol_version
return True
f = open(gif_path, "rb")
data = f.read()
f.close()
self.send_response(200)
self.send_header('Content-Type', 'image/gif')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write(data)
else:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Cache-control', 'max-age=60000')
self.send_header('Etag', 'abc')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (username, password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('</body></html>')
self.protocol_version = old_protocol_version
return True
def GDataAuthHandler(self):
"""This handler verifies the Authentication header for GData requests."""
if not self.server.gdata_auth_token:
# --auth-token is not specified, not the test case for GData.
return False
if not self._ShouldHandleRequest('/files/chromeos/gdata'):
return False
if 'GData-Version' not in self.headers:
self.send_error(httplib.BAD_REQUEST, 'GData-Version header is missing.')
return True
if 'Authorization' not in self.headers:
self.send_error(httplib.UNAUTHORIZED)
return True
field_prefix = 'Bearer '
authorization = self.headers['Authorization']
if not authorization.startswith(field_prefix):
self.send_error(httplib.UNAUTHORIZED)
return True
code = authorization[len(field_prefix):]
if code != self.server.gdata_auth_token:
self.send_error(httplib.UNAUTHORIZED)
return True
return False
def GDataDocumentsFeedQueryHandler(self):
"""This handler verifies if required parameters are properly
specified for the GData DocumentsFeed request."""
if not self.server.gdata_auth_token:
# --auth-token is not specified, not the test case for GData.
return False
if not self._ShouldHandleRequest('/files/chromeos/gdata/root_feed.json'):
return False
(path, question, query_params) = self.path.partition('?')
self.query_params = urlparse.parse_qs(query_params)
if 'v' not in self.query_params:
self.send_error(httplib.BAD_REQUEST, 'v is not specified.')
return True
elif 'alt' not in self.query_params or self.query_params['alt'] != ['json']:
# currently our GData client only uses JSON format.
self.send_error(httplib.BAD_REQUEST, 'alt parameter is wrong.')
return True
return False
def GetNonce(self, force_reset=False):
"""Returns a nonce that's stable per request path for the server's lifetime.
This is a fake implementation. A real implementation would only use a given
nonce a single time (hence the name n-once). However, for the purposes of
unittesting, we don't care about the security of the nonce.
Args:
force_reset: Iff set, the nonce will be changed. Useful for testing the
"stale" response.
"""
if force_reset or not self.server.nonce_time:
self.server.nonce_time = time.time()
return _new_md5('privatekey%s%d' %
(self.path, self.server.nonce_time)).hexdigest()
def AuthDigestHandler(self):
"""This handler tests 'Digest' authentication.
It just sends a page with title 'user/pass' if you succeed.
A stale response is sent iff "stale" is present in the request path.
"""
if not self._ShouldHandleRequest("/auth-digest"):
return False
stale = 'stale' in self.path
nonce = self.GetNonce(force_reset=stale)
opaque = _new_md5('opaque').hexdigest()
password = 'secret'
realm = 'testrealm'
auth = self.headers.getheader('authorization')
pairs = {}
try:
if not auth:
raise Exception('no auth')
if not auth.startswith('Digest'):
raise Exception('not digest')
# Pull out all the name="value" pairs as a dictionary.
pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth))
# Make sure it's all valid.
if pairs['nonce'] != nonce:
raise Exception('wrong nonce')
if pairs['opaque'] != opaque:
raise Exception('wrong opaque')
# Check the 'response' value and make sure it matches our magic hash.
# See http://www.ietf.org/rfc/rfc2617.txt
hash_a1 = _new_md5(
':'.join([pairs['username'], realm, password])).hexdigest()
hash_a2 = _new_md5(':'.join([self.command, pairs['uri']])).hexdigest()
if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs:
response = _new_md5(':'.join([hash_a1, nonce, pairs['nc'],
pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest()
else:
response = _new_md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest()
if pairs['response'] != response:
raise Exception('wrong password')
except Exception, e:
# Authentication failed.
self.send_response(401)
hdr = ('Digest '
'realm="%s", '
'domain="/", '
'qop="auth", '
'algorithm=MD5, '
'nonce="%s", '
'opaque="%s"') % (realm, nonce, opaque)
if stale:
hdr += ', stale="TRUE"'
self.send_header('WWW-Authenticate', hdr)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>Denied: %s</title>' % e)
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('You sent:<br>%s<p>' % self.headers)
self.wfile.write('We are replying:<br>%s<p>' % hdr)
self.wfile.write('</body></html>')
return True
# Authentication successful.
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password))
self.wfile.write('</head><body>')
self.wfile.write('auth=%s<p>' % auth)
self.wfile.write('pairs=%s<p>' % pairs)
self.wfile.write('</body></html>')
return True
def SlowServerHandler(self):
"""Wait for the user suggested time before responding. The syntax is
/slow?0.5 to wait for half a second."""
if not self._ShouldHandleRequest("/slow"):
return False
query_char = self.path.find('?')
wait_sec = 1.0
if query_char >= 0:
try:
wait_sec = int(self.path[query_char + 1:])
except ValueError:
pass
time.sleep(wait_sec)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write("waited %d seconds" % wait_sec)
return True
def ChunkedServerHandler(self):
"""Send chunked response. Allows to specify chunks parameters:
- waitBeforeHeaders - ms to wait before sending headers
- waitBetweenChunks - ms to wait between chunks
- chunkSize - size of each chunk in bytes
- chunksNumber - number of chunks
Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5
waits one second, then sends headers and five chunks five bytes each."""
if not self._ShouldHandleRequest("/chunked"):
return False
query_char = self.path.find('?')
chunkedSettings = {'waitBeforeHeaders' : 0,
'waitBetweenChunks' : 0,
'chunkSize' : 5,
'chunksNumber' : 5}
if query_char >= 0:
params = self.path[query_char + 1:].split('&')
for param in params:
keyValue = param.split('=')
if len(keyValue) == 2:
try:
chunkedSettings[keyValue[0]] = int(keyValue[1])
except ValueError:
pass
time.sleep(0.001 * chunkedSettings['waitBeforeHeaders']);
self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.send_header('Connection', 'close')
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
# Chunked encoding: sending all chunks, then final zero-length chunk and
# then final CRLF.
for i in range(0, chunkedSettings['chunksNumber']):
if i > 0:
time.sleep(0.001 * chunkedSettings['waitBetweenChunks'])
self.sendChunkHelp('*' * chunkedSettings['chunkSize'])
self.wfile.flush(); # Keep in mind that we start flushing only after 1kb.
self.sendChunkHelp('')
return True
def ContentTypeHandler(self):
"""Returns a string of html with the given content type. E.g.,
/contenttype?text/css returns an html file with the Content-Type
header set to text/css."""
if not self._ShouldHandleRequest("/contenttype"):
return False
query_char = self.path.find('?')
content_type = self.path[query_char + 1:].strip()
if not content_type:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', content_type)
self.end_headers()
self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n");
return True
def NoContentHandler(self):
"""Returns a 204 No Content response."""
if not self._ShouldHandleRequest("/nocontent"):
return False
self.send_response(204)
self.end_headers()
return True
def ServerRedirectHandler(self):
"""Sends a server redirect to the given URL. The syntax is
'/server-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/server-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?')
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(301) # moved permanently
self.send_header('Location', dest)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def ClientRedirectHandler(self):
"""Sends a client redirect to the given URL. The syntax is
'/client-redirect?http://foo.bar/asdf' to redirect to
'http://foo.bar/asdf'"""
test_name = "/client-redirect"
if not self._ShouldHandleRequest(test_name):
return False
query_char = self.path.find('?');
if query_char < 0 or len(self.path) <= query_char + 1:
self.sendRedirectHelp(test_name)
return True
dest = self.path[query_char + 1:]
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><head>')
self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest)
self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest)
return True
def MultipartHandler(self):
"""Send a multipart response (10 text/html pages)."""
test_name = '/multipart'
if not self._ShouldHandleRequest(test_name):
return False
num_frames = 10
bound = '12345'
self.send_response(200)
self.send_header('Content-Type',
'multipart/x-mixed-replace;boundary=' + bound)
self.end_headers()
for i in xrange(num_frames):
self.wfile.write('--' + bound + '\r\n')
self.wfile.write('Content-Type: text/html\r\n\r\n')
self.wfile.write('<title>page ' + str(i) + '</title>')
self.wfile.write('page ' + str(i))
self.wfile.write('--' + bound + '--')
return True
def MultipartSlowHandler(self):
"""Send a multipart response (3 text/html pages) with a slight delay
between each page. This is similar to how some pages show status using
multipart."""
test_name = '/multipart-slow'
if not self._ShouldHandleRequest(test_name):
return False
num_frames = 3
bound = '12345'
self.send_response(200)
self.send_header('Content-Type',
'multipart/x-mixed-replace;boundary=' + bound)
self.end_headers()
for i in xrange(num_frames):
self.wfile.write('--' + bound + '\r\n')
self.wfile.write('Content-Type: text/html\r\n\r\n')
time.sleep(0.25)
if i == 2:
self.wfile.write('<title>PASS</title>')
else:
self.wfile.write('<title>page ' + str(i) + '</title>')
self.wfile.write('page ' + str(i) + '<!-- ' + ('x' * 2048) + '-->')
self.wfile.write('--' + bound + '--')
return True
def GetSSLSessionCacheHandler(self):
"""Send a reply containing a log of the session cache operations."""
if not self._ShouldHandleRequest('/ssl-session-cache'):
return False
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
try:
for (action, sessionID) in self.server.session_cache.log:
self.wfile.write('%s\t%s\n' % (action, sessionID.encode('hex')))
except AttributeError, e:
self.wfile.write('Pass --https-record-resume in order to use' +
' this request')
return True
def CloseSocketHandler(self):
"""Closes the socket without sending anything."""
if not self._ShouldHandleRequest('/close-socket'):
return False
self.wfile.close()
return True
def DefaultResponseHandler(self):
"""This is the catch-all response handler for requests that aren't handled
by one of the special handlers above.
Note that we specify the content-length as without it the https connection
is not closed properly (and the browser keeps expecting data)."""
contents = "Default response given for path: " + self.path
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
if (self.command != 'HEAD'):
self.wfile.write(contents)
return True
def RedirectConnectHandler(self):
"""Sends a redirect to the CONNECT request for www.redirect.com. This
response is not specified by the RFC, so the browser should not follow
the redirect."""
if (self.path.find("www.redirect.com") < 0):
return False
dest = "http://www.destination.com/foo.js"
self.send_response(302) # moved temporarily
self.send_header('Location', dest)
self.send_header('Connection', 'close')
self.end_headers()
return True
def ServerAuthConnectHandler(self):
"""Sends a 401 to the CONNECT request for www.server-auth.com. This
response doesn't make sense because the proxy server cannot request
server authentication."""
if (self.path.find("www.server-auth.com") < 0):
return False
challenge = 'Basic realm="WallyWorld"'
self.send_response(401) # unauthorized
self.send_header('WWW-Authenticate', challenge)
self.send_header('Connection', 'close')
self.end_headers()
return True
def DefaultConnectResponseHandler(self):
"""This is the catch-all response handler for CONNECT requests that aren't
handled by one of the special handlers above. Real Web servers respond
with 400 to CONNECT requests."""
contents = "Your client has issued a malformed or illegal request."
self.send_response(400) # bad request
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(contents))
self.end_headers()
self.wfile.write(contents)
return True
def DeviceManagementHandler(self):
"""Delegates to the device management service used for cloud policy."""
if not self._ShouldHandleRequest("/device_management"):
return False
raw_request = self.ReadRequestBody()
if not self.server._device_management_handler:
import device_management
policy_path = os.path.join(self.server.data_dir, 'device_management')
self.server._device_management_handler = (
device_management.TestServer(policy_path,
self.server.policy_keys,
self.server.policy_user))
http_response, raw_reply = (
self.server._device_management_handler.HandleRequest(self.path,
self.headers,
raw_request))
self.send_response(http_response)
if (http_response == 200):
self.send_header('Content-Type', 'application/x-protobuffer')
self.end_headers()
self.wfile.write(raw_reply)
return True
# called by the redirect handling function when there is no parameter
def sendRedirectHelp(self, redirect_name):
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write('<html><body><h1>Error: no redirect destination</h1>')
self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name)
self.wfile.write('</body></html>')
# called by chunked handling function
def sendChunkHelp(self, chunk):
# Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF
self.wfile.write('%X\r\n' % len(chunk))
self.wfile.write(chunk)
self.wfile.write('\r\n')
class SyncPageHandler(BasePageHandler):
"""Handler for the main HTTP sync server."""
def __init__(self, request, client_address, sync_http_server):
get_handlers = [self.ChromiumSyncTimeHandler,
self.ChromiumSyncMigrationOpHandler,
self.ChromiumSyncCredHandler,
self.ChromiumSyncDisableNotificationsOpHandler,
self.ChromiumSyncEnableNotificationsOpHandler,
self.ChromiumSyncSendNotificationOpHandler,
self.ChromiumSyncBirthdayErrorOpHandler,
self.ChromiumSyncTransientErrorOpHandler,
self.ChromiumSyncErrorOpHandler,
self.ChromiumSyncSyncTabsOpHandler,
self.ChromiumSyncCreateSyncedBookmarksOpHandler]
post_handlers = [self.ChromiumSyncCommandHandler,
self.ChromiumSyncTimeHandler]
BasePageHandler.__init__(self, request, client_address,
sync_http_server, [], get_handlers, [],
post_handlers, [])
def ChromiumSyncTimeHandler(self):
"""Handle Chromium sync .../time requests.
The syncer sometimes checks server reachability by examining /time.
"""
test_name = "/chromiumsync/time"
if not self._ShouldHandleRequest(test_name):
return False
# Chrome hates it if we send a response before reading the request.
if self.headers.getheader('content-length'):
length = int(self.headers.getheader('content-length'))
raw_request = self.rfile.read(length)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write('0123456789')
return True
def ChromiumSyncCommandHandler(self):
"""Handle a chromiumsync command arriving via http.
This covers all sync protocol commands: authentication, getupdates, and
commit.
"""
test_name = "/chromiumsync/command"
if not self._ShouldHandleRequest(test_name):
return False
length = int(self.headers.getheader('content-length'))
raw_request = self.rfile.read(length)
http_response = 200
raw_reply = None
if not self.server.GetAuthenticated():
http_response = 401
challenge = 'GoogleLogin realm="http://%s", service="chromiumsync"' % (
self.server.server_address[0])
else:
http_response, raw_reply = self.server.HandleCommand(
self.path, raw_request)
### Now send the response to the client. ###
self.send_response(http_response)
if http_response == 401:
self.send_header('www-Authenticate', challenge)
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncMigrationOpHandler(self):
test_name = "/chromiumsync/migrate"
if not self._ShouldHandleRequest(test_name):
return False
http_response, raw_reply = self.server._sync_handler.HandleMigrate(
self.path)
self.send_response(http_response)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncCredHandler(self):
test_name = "/chromiumsync/cred"
if not self._ShouldHandleRequest(test_name):
return False
try:
query = urlparse.urlparse(self.path)[4]
cred_valid = urlparse.parse_qs(query)['valid']
if cred_valid[0] == 'True':
self.server.SetAuthenticated(True)
else:
self.server.SetAuthenticated(False)
except:
self.server.SetAuthenticated(False)
http_response = 200
raw_reply = 'Authenticated: %s ' % self.server.GetAuthenticated()
self.send_response(http_response)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True
def ChromiumSyncDisableNotificationsOpHandler(self):
test_name = "/chromiumsync/disablenotifications"
if not self._ShouldHandleRequest(test_name):
return False
self.server.GetXmppServer().DisableNotifications()
result = 200
raw_reply = ('<html><title>Notifications disabled</title>'
'<H1>Notifications disabled</H1></html>')
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncEnableNotificationsOpHandler(self):
test_name = "/chromiumsync/enablenotifications"
if not self._ShouldHandleRequest(test_name):
return False
self.server.GetXmppServer().EnableNotifications()
result = 200
raw_reply = ('<html><title>Notifications enabled</title>'
'<H1>Notifications enabled</H1></html>')
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncSendNotificationOpHandler(self):
test_name = "/chromiumsync/sendnotification"
if not self._ShouldHandleRequest(test_name):
return False
query = urlparse.urlparse(self.path)[4]
query_params = urlparse.parse_qs(query)
channel = ''
data = ''
if 'channel' in query_params:
channel = query_params['channel'][0]
if 'data' in query_params:
data = query_params['data'][0]
self.server.GetXmppServer().SendNotification(channel, data)
result = 200
raw_reply = ('<html><title>Notification sent</title>'
'<H1>Notification sent with channel "%s" '
'and data "%s"</H1></html>'
% (channel, data))
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncBirthdayErrorOpHandler(self):
test_name = "/chromiumsync/birthdayerror"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleCreateBirthdayError()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncTransientErrorOpHandler(self):
test_name = "/chromiumsync/transienterror"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetTransientError()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncErrorOpHandler(self):
test_name = "/chromiumsync/error"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetInducedError(
self.path)
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncSyncTabsOpHandler(self):
test_name = "/chromiumsync/synctabs"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleSetSyncTabs()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def ChromiumSyncCreateSyncedBookmarksOpHandler(self):
test_name = "/chromiumsync/createsyncedbookmarks"
if not self._ShouldHandleRequest(test_name):
return False
result, raw_reply = self.server._sync_handler.HandleCreateSyncedBookmarks()
self.send_response(result)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(raw_reply))
self.end_headers()
self.wfile.write(raw_reply)
return True;
def MakeDataDir():
if options.data_dir:
if not os.path.isdir(options.data_dir):
print 'specified data dir not found: ' + options.data_dir + ' exiting...'
return None
my_data_dir = options.data_dir
else:
# Create the default path to our data dir, relative to the exe dir.
my_data_dir = os.path.dirname(sys.argv[0])
my_data_dir = os.path.join(my_data_dir, "..", "..", "..", "..",
"test", "data")
#TODO(ibrar): Must use Find* funtion defined in google\tools
#i.e my_data_dir = FindUpward(my_data_dir, "test", "data")
return my_data_dir
class OCSPHandler(BasePageHandler):
def __init__(self, request, client_address, socket_server):
handlers = [self.OCSPResponse]
self.ocsp_response = socket_server.ocsp_response
BasePageHandler.__init__(self, request, client_address, socket_server,
[], handlers, [], handlers, [])
def OCSPResponse(self):
self.send_response(200)
self.send_header('Content-Type', 'application/ocsp-response')
self.send_header('Content-Length', str(len(self.ocsp_response)))
self.end_headers()
self.wfile.write(self.ocsp_response)
class TCPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for TCP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request.recv(65536).strip()
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
self.request.send(return_data)
class UDPEchoHandler(SocketServer.BaseRequestHandler):
"""The RequestHandler class for UDP echo server.
It is instantiated once per connection to the server, and overrides the
handle() method to implement communication to the client.
"""
def handle(self):
"""Handles the request from the client and constructs a response."""
data = self.request[0].strip()
socket = self.request[1]
# Verify the "echo request" message received from the client. Send back
# "echo response" message if "echo request" message is valid.
try:
return_data = echo_message.GetEchoResponseData(data)
if not return_data:
return
except ValueError:
return
socket.sendto(return_data, self.client_address)
class FileMultiplexer:
def __init__(self, fd1, fd2) :
self.__fd1 = fd1
self.__fd2 = fd2
def __del__(self) :
if self.__fd1 != sys.stdout and self.__fd1 != sys.stderr:
self.__fd1.close()
if self.__fd2 != sys.stdout and self.__fd2 != sys.stderr:
self.__fd2.close()
def write(self, text) :
self.__fd1.write(text)
self.__fd2.write(text)
def flush(self) :
self.__fd1.flush()
self.__fd2.flush()
def main(options, args):
logfile = open('testserver.log', 'w')
sys.stderr = FileMultiplexer(sys.stderr, logfile)
if options.log_to_console:
sys.stdout = FileMultiplexer(sys.stdout, logfile)
else:
sys.stdout = logfile
port = options.port
host = options.host
server_data = {}
server_data['host'] = host
ocsp_server = None
if options.server_type == SERVER_HTTP:
if options.https:
pem_cert_and_key = None
if options.cert_and_key_file:
if not os.path.isfile(options.cert_and_key_file):
print ('specified server cert file not found: ' +
options.cert_and_key_file + ' exiting...')
return
pem_cert_and_key = file(options.cert_and_key_file, 'r').read()
else:
# generate a new certificate and run an OCSP server for it.
ocsp_server = OCSPServer((host, 0), OCSPHandler)
print ('OCSP server started on %s:%d...' %
(host, ocsp_server.server_port))
ocsp_der = None
ocsp_revoked = False
ocsp_invalid = False
if options.ocsp == 'ok':
pass
elif options.ocsp == 'revoked':
ocsp_revoked = True
elif options.ocsp == 'invalid':
ocsp_invalid = True
else:
print 'unknown OCSP status: ' + options.ocsp_status
return
(pem_cert_and_key, ocsp_der) = \
minica.GenerateCertKeyAndOCSP(
subject = "127.0.0.1",
ocsp_url = ("http://%s:%d/ocsp" %
(host, ocsp_server.server_port)),
ocsp_revoked = ocsp_revoked)
if ocsp_invalid:
ocsp_der = '3'
ocsp_server.ocsp_response = ocsp_der
for ca_cert in options.ssl_client_ca:
if not os.path.isfile(ca_cert):
print 'specified trusted client CA file not found: ' + ca_cert + \
' exiting...'
return
server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key,
options.ssl_client_auth, options.ssl_client_ca,
options.ssl_bulk_cipher, options.record_resume,
options.tls_intolerant)
print 'HTTPS server started on %s:%d...' % (host, server.server_port)
else:
server = HTTPServer((host, port), TestPageHandler)
print 'HTTP server started on %s:%d...' % (host, server.server_port)
server.data_dir = MakeDataDir()
server.file_root_url = options.file_root_url
server_data['port'] = server.server_port
server._device_management_handler = None
server.policy_keys = options.policy_keys
server.policy_user = options.policy_user
server.gdata_auth_token = options.auth_token
elif options.server_type == SERVER_SYNC:
server = SyncHTTPServer((host, port), SyncPageHandler)
print 'Sync HTTP server started on port %d...' % server.server_port
print 'Sync XMPP server started on port %d...' % server.xmpp_port
server_data['port'] = server.server_port
server_data['xmpp_port'] = server.xmpp_port
elif options.server_type == SERVER_TCP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = TCPEchoServer((host, port), TCPEchoHandler)
print 'Echo TCP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
elif options.server_type == SERVER_UDP_ECHO:
# Used for generating the key (randomly) that encodes the "echo request"
# message.
random.seed()
server = UDPEchoServer((host, port), UDPEchoHandler)
print 'Echo UDP server started on port %d...' % server.server_port
server_data['port'] = server.server_port
# means FTP Server
else:
my_data_dir = MakeDataDir()
# Instantiate a dummy authorizer for managing 'virtual' users
authorizer = pyftpdlib.ftpserver.DummyAuthorizer()
# Define a new user having full r/w permissions and a read-only
# anonymous user
authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw')
authorizer.add_anonymous(my_data_dir)
# Instantiate FTP handler class
ftp_handler = pyftpdlib.ftpserver.FTPHandler
ftp_handler.authorizer = authorizer
# Define a customized banner (string returned when client connects)
ftp_handler.banner = ("pyftpdlib %s based ftpd ready." %
pyftpdlib.ftpserver.__ver__)
# Instantiate FTP server class and listen to address:port
server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler)
server_data['port'] = server.socket.getsockname()[1]
print 'FTP server started on port %d...' % server_data['port']
# Notify the parent that we've started. (BaseServer subclasses
# bind their sockets on construction.)
if options.startup_pipe is not None:
server_data_json = json.dumps(server_data)
server_data_len = len(server_data_json)
print 'sending server_data: %s (%d bytes)' % (
server_data_json, server_data_len)
if sys.platform == 'win32':
fd = msvcrt.open_osfhandle(options.startup_pipe, 0)
else:
fd = options.startup_pipe
startup_pipe = os.fdopen(fd, "w")
# First write the data length as an unsigned 4-byte value. This
# is _not_ using network byte ordering since the other end of the
# pipe is on the same machine.
startup_pipe.write(struct.pack('=L', server_data_len))
startup_pipe.write(server_data_json)
startup_pipe.close()
if ocsp_server is not None:
ocsp_server.serve_forever_on_thread()
try:
server.serve_forever()
except KeyboardInterrupt:
print 'shutting down server'
if ocsp_server is not None:
ocsp_server.stop_serving()
server.stop = True
if __name__ == '__main__':
option_parser = optparse.OptionParser()
option_parser.add_option("-f", '--ftp', action='store_const',
const=SERVER_FTP, default=SERVER_HTTP,
dest='server_type',
help='start up an FTP server.')
option_parser.add_option('', '--sync', action='store_const',
const=SERVER_SYNC, default=SERVER_HTTP,
dest='server_type',
help='start up a sync server.')
option_parser.add_option('', '--tcp-echo', action='store_const',
const=SERVER_TCP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a tcp echo server.')
option_parser.add_option('', '--udp-echo', action='store_const',
const=SERVER_UDP_ECHO, default=SERVER_HTTP,
dest='server_type',
help='start up a udp echo server.')
option_parser.add_option('', '--log-to-console', action='store_const',
const=True, default=False,
dest='log_to_console',
help='Enables or disables sys.stdout logging to '
'the console.')
option_parser.add_option('', '--port', default='0', type='int',
help='Port used by the server. If unspecified, the '
'server will listen on an ephemeral port.')
option_parser.add_option('', '--data-dir', dest='data_dir',
help='Directory from which to read the files.')
option_parser.add_option('', '--https', action='store_true', dest='https',
help='Specify that https should be used.')
option_parser.add_option('', '--cert-and-key-file', dest='cert_and_key_file',
help='specify the path to the file containing the '
'certificate and private key for the server in PEM '
'format')
option_parser.add_option('', '--ocsp', dest='ocsp', default='ok',
help='The type of OCSP response generated for the '
'automatically generated certificate. One of '
'[ok,revoked,invalid]')
option_parser.add_option('', '--tls-intolerant', dest='tls_intolerant',
const=True, default=False, action='store_const',
help='If true, TLS connections will be aborted '
' in order to test SSLv3 fallback.')
option_parser.add_option('', '--https-record-resume', dest='record_resume',
const=True, default=False, action='store_const',
help='Record resumption cache events rather than'
' resuming as normal. Allows the use of the'
' /ssl-session-cache request')
option_parser.add_option('', '--ssl-client-auth', action='store_true',
help='Require SSL client auth on every connection.')
option_parser.add_option('', '--ssl-client-ca', action='append', default=[],
help='Specify that the client certificate request '
'should include the CA named in the subject of '
'the DER-encoded certificate contained in the '
'specified file. This option may appear multiple '
'times, indicating multiple CA names should be '
'sent in the request.')
option_parser.add_option('', '--ssl-bulk-cipher', action='append',
help='Specify the bulk encryption algorithm(s)'
'that will be accepted by the SSL server. Valid '
'values are "aes256", "aes128", "3des", "rc4". If '
'omitted, all algorithms will be used. This '
'option may appear multiple times, indicating '
'multiple algorithms should be enabled.');
option_parser.add_option('', '--file-root-url', default='/files/',
help='Specify a root URL for files served.')
option_parser.add_option('', '--startup-pipe', type='int',
dest='startup_pipe',
help='File handle of pipe to parent process')
option_parser.add_option('', '--policy-key', action='append',
dest='policy_keys',
help='Specify a path to a PEM-encoded private key '
'to use for policy signing. May be specified '
'multiple times in order to load multipe keys into '
'the server. If ther server has multiple keys, it '
'will rotate through them in at each request a '
'round-robin fashion. The server will generate a '
'random key if none is specified on the command '
'line.')
option_parser.add_option('', '--policy-user', default='user@example.com',
dest='policy_user',
help='Specify the user name the server should '
'report back to the client as the user owning the '
'token used for making the policy request.')
option_parser.add_option('', '--host', default='127.0.0.1',
dest='host',
help='Hostname or IP upon which the server will '
'listen. Client connections will also only be '
'allowed from this address.')
option_parser.add_option('', '--auth-token', dest='auth_token',
help='Specify the auth token which should be used'
'in the authorization header for GData.')
options, args = option_parser.parse_args()
sys.exit(main(options, args))
| bsd-3-clause |
S01780/python-social-auth | social/storage/sqlalchemy_orm.py | 14 | 7117 | """SQLAlchemy models for Social Auth"""
import base64
import six
import json
try:
import transaction
except ImportError:
transaction = None
from sqlalchemy import Column, Integer, String
from sqlalchemy.exc import IntegrityError
from sqlalchemy.types import PickleType, Text
from sqlalchemy.schema import UniqueConstraint
from sqlalchemy.ext.mutable import MutableDict
from social.storage.base import UserMixin, AssociationMixin, NonceMixin, \
CodeMixin, BaseStorage
# JSON type field
class JSONType(PickleType):
impl = Text
def __init__(self, *args, **kwargs):
kwargs['pickler'] = json
super(JSONType, self).__init__(*args, **kwargs)
class SQLAlchemyMixin(object):
@classmethod
def _session(cls):
raise NotImplementedError('Implement in subclass')
@classmethod
def _query(cls):
return cls._session().query(cls)
@classmethod
def _new_instance(cls, model, *args, **kwargs):
return cls._save_instance(model(*args, **kwargs))
@classmethod
def _save_instance(cls, instance):
cls._session().add(instance)
cls._flush()
return instance
@classmethod
def _flush(cls):
try:
cls._session().flush()
except AssertionError:
if transaction:
with transaction.manager as manager:
manager.commit()
else:
cls._session().commit()
def save(self):
self._save_instance(self)
class SQLAlchemyUserMixin(SQLAlchemyMixin, UserMixin):
"""Social Auth association model"""
__tablename__ = 'social_auth_usersocialauth'
__table_args__ = (UniqueConstraint('provider', 'uid'),)
id = Column(Integer, primary_key=True)
provider = Column(String(32))
extra_data = Column(MutableDict.as_mutable(JSONType))
uid = None
user_id = None
user = None
@classmethod
def changed(cls, user):
cls._save_instance(user)
def set_extra_data(self, extra_data=None):
if super(SQLAlchemyUserMixin, self).set_extra_data(extra_data):
self._save_instance(self)
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
if association_id is not None:
qs = cls._query().filter(cls.id != association_id)
else:
qs = cls._query().filter(cls.provider != backend_name)
qs = qs.filter(cls.user == user)
if hasattr(user, 'has_usable_password'): # TODO
valid_password = user.has_usable_password()
else:
valid_password = True
return valid_password or qs.count() > 0
@classmethod
def disconnect(cls, entry):
cls._session().delete(entry)
cls._flush()
@classmethod
def user_query(cls):
return cls._session().query(cls.user_model())
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
return cls.user_query().filter_by(*args, **kwargs).count() > 0
@classmethod
def get_username(cls, user):
return getattr(user, 'username', None)
@classmethod
def create_user(cls, *args, **kwargs):
return cls._new_instance(cls.user_model(), *args, **kwargs)
@classmethod
def get_user(cls, pk):
return cls.user_query().get(pk)
@classmethod
def get_users_by_email(cls, email):
return cls.user_query().filter_by(email=email)
@classmethod
def get_social_auth(cls, provider, uid):
if not isinstance(uid, six.string_types):
uid = str(uid)
try:
return cls._query().filter_by(provider=provider,
uid=uid)[0]
except IndexError:
return None
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
qs = cls._query().filter_by(user_id=user.id)
if provider:
qs = qs.filter_by(provider=provider)
if id:
qs = qs.filter_by(id=id)
return qs
@classmethod
def create_social_auth(cls, user, uid, provider):
if not isinstance(uid, six.string_types):
uid = str(uid)
return cls._new_instance(cls, user=user, uid=uid, provider=provider)
class SQLAlchemyNonceMixin(SQLAlchemyMixin, NonceMixin):
__tablename__ = 'social_auth_nonce'
__table_args__ = (UniqueConstraint('server_url', 'timestamp', 'salt'),)
id = Column(Integer, primary_key=True)
server_url = Column(String(255))
timestamp = Column(Integer)
salt = Column(String(40))
@classmethod
def use(cls, server_url, timestamp, salt):
kwargs = {'server_url': server_url, 'timestamp': timestamp,
'salt': salt}
try:
return cls._query().filter_by(**kwargs)[0]
except IndexError:
return cls._new_instance(cls, **kwargs)
class SQLAlchemyAssociationMixin(SQLAlchemyMixin, AssociationMixin):
__tablename__ = 'social_auth_association'
__table_args__ = (UniqueConstraint('server_url', 'handle'),)
id = Column(Integer, primary_key=True)
server_url = Column(String(255))
handle = Column(String(255))
secret = Column(String(255)) # base64 encoded
issued = Column(Integer)
lifetime = Column(Integer)
assoc_type = Column(String(64))
@classmethod
def store(cls, server_url, association):
# Don't use get_or_create because issued cannot be null
try:
assoc = cls._query().filter_by(server_url=server_url,
handle=association.handle)[0]
except IndexError:
assoc = cls(server_url=server_url,
handle=association.handle)
assoc.secret = base64.encodestring(association.secret).decode()
assoc.issued = association.issued
assoc.lifetime = association.lifetime
assoc.assoc_type = association.assoc_type
cls._save_instance(assoc)
@classmethod
def get(cls, *args, **kwargs):
return cls._query().filter_by(*args, **kwargs)
@classmethod
def remove(cls, ids_to_delete):
cls._query().filter(cls.id.in_(ids_to_delete)).delete(
synchronize_session='fetch'
)
class SQLAlchemyCodeMixin(SQLAlchemyMixin, CodeMixin):
__tablename__ = 'social_auth_code'
__table_args__ = (UniqueConstraint('code', 'email'),)
id = Column(Integer, primary_key=True)
email = Column(String(200))
code = Column(String(32), index=True)
@classmethod
def get_code(cls, code):
return cls._query().filter(cls.code == code).first()
class BaseSQLAlchemyStorage(BaseStorage):
user = SQLAlchemyUserMixin
nonce = SQLAlchemyNonceMixin
association = SQLAlchemyAssociationMixin
code = SQLAlchemyCodeMixin
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is IntegrityError
| bsd-3-clause |
tboyce021/home-assistant | homeassistant/components/demo/humidifier.py | 21 | 3456 | """Demo platform that offers a fake humidifier device."""
from homeassistant.components.humidifier import HumidifierEntity
from homeassistant.components.humidifier.const import (
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
SUPPORT_MODES,
)
SUPPORT_FLAGS = 0
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo humidifier devices."""
async_add_entities(
[
DemoHumidifier(
name="Humidifier",
mode=None,
target_humidity=68,
device_class=DEVICE_CLASS_HUMIDIFIER,
),
DemoHumidifier(
name="Dehumidifier",
mode=None,
target_humidity=54,
device_class=DEVICE_CLASS_DEHUMIDIFIER,
),
DemoHumidifier(
name="Hygrostat",
mode="home",
available_modes=["home", "eco"],
target_humidity=50,
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo humidifier devices config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoHumidifier(HumidifierEntity):
"""Representation of a demo humidifier device."""
def __init__(
self,
name,
mode,
target_humidity,
available_modes=None,
is_on=True,
device_class=None,
):
"""Initialize the humidifier device."""
self._name = name
self._state = is_on
self._support_flags = SUPPORT_FLAGS
if mode is not None:
self._support_flags = self._support_flags | SUPPORT_MODES
self._target_humidity = target_humidity
self._mode = mode
self._available_modes = available_modes
self._device_class = device_class
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the humidity device."""
return self._name
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._target_humidity
@property
def mode(self):
"""Return current mode."""
return self._mode
@property
def available_modes(self):
"""Return available modes."""
return self._available_modes
@property
def is_on(self):
"""Return true if the humidifier is on."""
return self._state
@property
def device_class(self):
"""Return the device class of the humidifier."""
return self._device_class
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
self._state = False
self.async_write_ha_state()
async def async_set_humidity(self, humidity):
"""Set new humidity level."""
self._target_humidity = humidity
self.async_write_ha_state()
async def async_set_mode(self, mode):
"""Update mode."""
self._mode = mode
self.async_write_ha_state()
| apache-2.0 |
supergentle/migueltutorial | flask/lib/python2.7/site-packages/flask/testsuite/basic.py | 406 | 43777 | # -*- coding: utf-8 -*-
"""
flask.testsuite.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import uuid
import flask
import pickle
import unittest
from datetime import datetime
from threading import Thread
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
class BasicFunctionalityTestCase(FlaskTestCase):
def test_options_work(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
self.assert_equal(rv.data, b'')
def test_options_on_multiple_rules(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
def test_options_handling_disabled(self):
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(rv.status_code, 405)
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['OPTIONS'])
def test_request_dispatching(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_url_mapping(self):
app = flask.Flask(__name__)
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_werkzeug_routing(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
self.assert_equal(c.post('/set', data={'value': '42'}).data, b'value set')
self.assert_equal(c.get('/get').data, b'42')
def test_session_using_server_name(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_and_port(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_port_and_path(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/foo')
self.assert_in('domain=example.com', rv.headers['set-cookie'].lower())
self.assert_in('path=/foo', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_application_root(self):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('path=/bar', rv.headers['set-cookie'].lower())
def test_session_using_session_settings(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
self.assert_in('domain=.example.com', cookie)
self.assert_in('path=/', cookie)
self.assert_in('secure', cookie)
self.assert_not_in('httponly', cookie)
def test_missing_session(self):
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError as e:
self.assert_true(e.args and 'session is unavailable' in e.args[0])
else:
self.assert_true(False, 'expected exception')
with app.test_request_context():
self.assert_true(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(self):
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)(?i)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
self.assert_equal(expires.year, expected.year)
self.assert_equal(expires.month, expected.month)
self.assert_equal(expires.day, expected.day)
rv = client.get('/test')
self.assert_equal(rv.data, b'True')
permanent = False
rv = app.test_client().get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
self.assert_true(match is None)
def test_session_stored_last(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
self.assert_equal(c.get('/').data, b'None')
self.assert_equal(c.get('/').data, b'42')
def test_session_special_types(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['u'] = the_uuid
flask.session['dt'] = now
flask.session['b'] = b'\xff'
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
self.assert_equal(rv['m'], flask.Markup('Hello!'))
self.assert_equal(type(rv['m']), flask.Markup)
self.assert_equal(rv['dt'], now)
self.assert_equal(rv['u'], the_uuid)
self.assert_equal(rv['b'], b'\xff')
self.assert_equal(type(rv['b']), bytes)
self.assert_equal(rv['t'], (1, 2, 3))
def test_flashes(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
self.assert_false(flask.session.modified)
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
self.assert_true(flask.session.modified)
self.assert_equal(list(flask.get_flashed_messages()), ['Zap', 'Zip'])
def test_extended_flashing(self):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], u'Hello World')
self.assert_equal(messages[2], flask.Markup(u'<em>Testing</em>'))
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('error', u'Hello World'))
self.assert_equal(messages[2], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(category_filter=['message'], with_categories=True)
self.assert_equal(len(messages), 1)
self.assert_equal(messages[0], ('message', u'Hello World'))
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'], with_categories=True)
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'])
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], flask.Markup(u'<em>Testing</em>'))
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing(self):
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
self.assert_in('before', evts)
self.assert_not_in('after', evts)
return 'request'
self.assert_not_in('after', evts)
rv = app.test_client().get('/').data
self.assert_in('after', evts)
self.assert_equal(rv, b'request|after')
def test_after_request_processing(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['X-Foo'], 'a header')
def test_teardown_request_handler(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_debug_mode(self):
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_error(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request1(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
self.assert_equal(len(called), 2)
def test_before_after_request_order(self):
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
self.assert_equal(called, [1, 2, 3, 4, 5, 6])
def test_error_handling(self):
app = flask.Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_before_request_and_routing_errors(self):
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'value')
def test_user_error_handling(self):
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
self.assert_true(isinstance(e, MyException))
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
self.assert_equal(c.get('/').data, b'42')
def test_trapping_of_bad_request_key_errors(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
self.assert_equal(c.get('/fail').status_code, 400)
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError as e:
self.assert_true(isinstance(e, BadRequest))
else:
self.fail('Expected exception')
def test_trapping_of_all_http_exceptions(self):
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
try:
c.get('/fail')
except NotFound as e:
pass
else:
self.fail('Expected exception')
def test_enctype_debug_helper(self):
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError as e:
self.assert_in('no file contents were transmitted', str(e))
self.assert_in('This was submitted: "index.txt"', str(e))
else:
self.fail('Expected exception')
def test_response_creation(self):
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
c = app.test_client()
self.assert_equal(c.get('/unicode').data, u'Hällo Wörld'.encode('utf-8'))
self.assert_equal(c.get('/string').data, u'Hällo Wörld'.encode('utf-8'))
rv = c.get('/args')
self.assert_equal(rv.data, b'Meh')
self.assert_equal(rv.headers['X-Foo'], 'Testing')
self.assert_equal(rv.status_code, 400)
self.assert_equal(rv.mimetype, 'text/plain')
def test_make_response(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('Awesome')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'Awesome')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('W00t', 404)
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'W00t')
self.assert_equal(rv.mimetype, 'text/html')
def test_make_response_with_response_instance(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'{\n "msg": "W00t"\n}')
self.assertEqual(rv.mimetype, 'application/json')
rv = flask.make_response(
flask.Response(''), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'')
self.assertEqual(rv.mimetype, 'text/html')
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers['Content-Type'], 'text/html')
self.assertEqual(rv.headers['X-Foo'], 'bar')
def test_url_generation(self):
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
self.assert_equal(flask.url_for('hello', name='test x'), '/hello/test%20x')
self.assert_equal(flask.url_for('hello', name='test x', _external=True),
'http://localhost/hello/test%20x')
def test_build_error_handler(self):
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
self.assertRaises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
self.assertRaises(BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
self.assert_equal(flask.url_for('spam'), '/test_handler/')
def test_custom_converters(self):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
self.assert_equal(c.get('/1,2,3').data, b'1|2|3')
def test_static_files(self):
app = flask.Flask(__name__)
app.testing = True
rv = app.test_client().get('/static/index.html')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data.strip(), b'<h1>Hello World!</h1>')
with app.test_request_context():
self.assert_equal(flask.url_for('static', filename='index.html'),
'/static/index.html')
rv.close()
def test_none_response(self):
app = flask.Flask(__name__)
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError as e:
self.assert_equal(str(e), 'View function did not return a response')
pass
else:
self.assert_true("Expected ValueError")
def test_request_locals(self):
self.assert_equal(repr(flask.g), '<LocalProxy unbound>')
self.assertFalse(flask.g)
def test_test_app_proper_environ(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
self.assert_equal(rv.data, b'Foo')
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:443') does not match the " + \
"server name from the WSGI environment ('localhost.localdomain')")
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " + \
"('localhost.localdomain') does not match the " + \
"server name from the WSGI environment ('foo.localhost')")
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
self.assert_equal(rv.data, b'Foo SubDomain')
def test_exception_propagation(self):
def apprunner(configkey):
app = flask.Flask(__name__)
@app.route('/')
def index():
1 // 0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
resp = c.get('/')
except Exception:
pass
else:
self.fail('expected exception')
else:
self.assert_equal(c.get('/').status_code, 500)
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length(self):
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
self.assert_true(False)
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
self.assert_true(False)
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
self.assert_equal(rv.data, b'42')
def test_url_processors(self):
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/foo')
self.assert_equal(c.get('/foo').data, b'/en/about')
def test_inject_blueprint_url_defaults(self):
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page): pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
self.assert_equal(values, expected)
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
self.assert_equal(url, expected)
def test_nonascii_pathinfo(self):
app = flask.Flask(__name__)
app.testing = True
@app.route(u'/киртест')
def index():
return 'Hello World!'
c = app.test_client()
rv = c.get(u'/киртест')
self.assert_equal(rv.data, b'Hello World!')
def test_debug_mode_complains_after_first_request(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
self.assert_false(app.got_first_request)
self.assert_equal(app.test_client().get('/').data, b'Awesome')
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError as e:
self.assert_in('A setup function was called', str(e))
else:
self.fail('Expected exception')
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
self.assert_equal(app.test_client().get('/foo').data, b'Meh')
self.assert_true(app.got_first_request)
def test_before_first_request_functions(self):
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
self.assert_equal(got, [42])
c.get('/')
self.assert_equal(got, [42])
self.assert_true(app.got_first_request)
def test_routing_redirect_debugging(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError as e:
self.assert_in('http://localhost/foo/', str(e))
self.assert_in('Make sure to directly send your POST-request '
'to this URL', str(e))
else:
self.fail('Expected exception')
rv = c.get('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
def test_route_decorator_custom_endpoint(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
self.assertEqual(c.get('/foo/').data, b'foo')
self.assertEqual(c.get('/bar/').data, b'bar')
self.assertEqual(c.get('/bar/123').data, b'123')
def test_preserve_only_once(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
c = app.test_client()
for x in range(3):
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_true(flask._request_ctx_stack.top is not None)
self.assert_true(flask._app_ctx_stack.top is not None)
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
self.assert_true(flask._request_ctx_stack.top is None)
self.assert_true(flask._app_ctx_stack.top is None)
def test_preserve_remembers_exception(self):
app = flask.Flask(__name__)
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
c = app.test_client()
# After this failure we did not yet call the teardown handler
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_equal(errors, [])
# But this request triggers it, and it's an error
c.get('/success')
self.assert_equal(len(errors), 2)
self.assert_true(isinstance(errors[0], ZeroDivisionError))
# At this point another request does nothing.
c.get('/success')
self.assert_equal(len(errors), 3)
self.assert_equal(errors[1], None)
def test_get_method_on_g(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
self.assert_equal(flask.g.get('x'), None)
self.assert_equal(flask.g.get('x', 11), 11)
flask.g.x = 42
self.assert_equal(flask.g.get('x'), 42)
self.assert_equal(flask.g.x, 42)
def test_g_iteration_protocol(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
flask.g.foo = 23
flask.g.bar = 42
self.assert_equal('foo' in flask.g, True)
self.assert_equal('foos' in flask.g, False)
self.assert_equal(sorted(flask.g), ['bar', 'foo'])
class SubdomainTestCase(FlaskTestCase):
def test_basic_support(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
self.assert_equal(rv.data, b'normal index')
rv = c.get('/', 'http://test.localhost/')
self.assert_equal(rv.data, b'test index')
@emits_module_deprecation_warning
def test_module_static_path_subdomain(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
from subdomaintestmodule import mod
app.register_module(mod)
c = app.test_client()
rv = c.get('/static/hello.txt', 'http://foo.example.com/')
rv.direct_passthrough = False
self.assert_equal(rv.data.strip(), b'Hello Subdomain')
rv.close()
def test_subdomain_matching(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
self.assert_equal(rv.data, b'index for mitsuhiko')
def test_subdomain_matching_with_ports(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
self.assert_equal(rv.data, b'index for mitsuhiko')
@emits_module_deprecation_warning
def test_module_subdomain_support(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'test', subdomain='testing')
app.config['SERVER_NAME'] = 'localhost'
@mod.route('/test')
def test():
return 'Test'
@mod.route('/outside', subdomain='xtesting')
def bar():
return 'Outside'
app.register_module(mod)
c = app.test_client()
rv = c.get('/test', 'http://testing.localhost/')
self.assert_equal(rv.data, b'Test')
rv = c.get('/outside', 'http://xtesting.localhost/')
self.assert_equal(rv.data, b'Outside')
def test_multi_route_rules(self):
app = flask.Flask(__name__)
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def test_multi_route_class_views(self):
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
app = flask.Flask(__name__)
_ = View(app)
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicFunctionalityTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| bsd-3-clause |
wezhang/vim-setup | bundle/powerline/tools/colors_find.py | 1 | 1599 | #!/usr/bin/env python
from __future__ import division, print_function
import sys
import os
from colormath.color_objects import sRGBColor, LabColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie2000
def get_lab(name, rgb):
rgb = sRGBColor(int(rgb[:2], 16), int(rgb[2:4], 16), int(rgb[4:6], 16),
is_upscaled=True)
lab = convert_color(rgb, LabColor)
return name, lab
with open(os.path.join(os.path.dirname(__file__), 'colors.map'), 'r') as f:
colors = [get_lab(*line.split('\t')) for line in f]
ulab = get_lab(None, sys.argv[1])[1]
def find_color(urgb, colors):
cur_distance = 3 * (255 ** 2 + 1)
cur_color = None
for color, clab in colors:
dist = delta_e_cie2000(ulab, clab)
if dist < cur_distance:
cur_distance = dist
cur_color = (color, clab)
return cur_color
cur_color = find_color(ulab, colors)
def lab_to_csi(lab):
rgb = convert_color(lab, sRGBColor)
colstr = ';2;' + ';'.join((str(i) for i in get_upscaled_values(rgb)))
return colstr + 'm'
def get_upscaled_values(rgb):
return [min(max(0, i), 255) for i in rgb.get_upscaled_value_tuple()]
def get_rgb(lab):
rgb = convert_color(lab, sRGBColor)
rgb = sRGBColor(*get_upscaled_values(rgb), is_upscaled=True)
return rgb.get_rgb_hex()[1:]
print(get_rgb(ulab), ':', cur_color[0], ':', get_rgb(cur_color[1]))
col_1 = lab_to_csi(ulab)
col_2 = lab_to_csi(cur_color[1])
sys.stdout.write('\033[48' + col_1 + '\033[38' + col_2 + 'abc\033[0m <-- bg:urgb, fg:crgb\n')
sys.stdout.write('\033[48' + col_2 + '\033[38' + col_1 + 'abc\033[0m <-- bg:crgb, fg:urgb\n')
| apache-2.0 |
Nick-OpusVL/odoo | openerp/addons/base/tests/test_basecase.py | 379 | 3895 | # -*- coding: utf-8 -*-
import unittest2
from openerp.tests import common
class test_single_transaction_case(common.SingleTransactionCase):
"""
Check the whole-class transaction behavior of SingleTransactionCase.
"""
def test_00(self):
"""Create a partner."""
cr, uid = self.cr, self.uid
self.registry('res.partner').create(cr, uid, {'name': 'test_per_class_teardown_partner'})
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_01(self):
"""Find the created partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_20a(self):
""" Create a partner with a XML ID """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
ir_model_data = self.registry('ir.model.data')
pid, _ = res_partner.name_create(cr, uid, 'Mr Blue')
ir_model_data.create(cr, uid, {'name': 'test_partner_blue',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
def test_20b(self):
""" Resolve xml id with ref() and browse_ref() """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
xid = 'base.test_partner_blue'
p_ref = self.ref(xid)
self.assertTrue(p_ref, "ref() should resolve xid to database ID")
partner = res_partner.browse(cr, uid, p_ref)
p_browse_ref = self.browse_ref(xid)
self.assertEqual(partner, p_browse_ref, "browse_ref() should resolve xid to browse records")
class test_transaction_case(common.TransactionCase):
"""
Check the per-method transaction behavior of TransactionCase.
"""
def test_00(self):
"""Create a partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(ids), "Test partner found.")
self.registry('res.partner').create(cr, uid, {'name': 'test_per_class_teardown_partner'})
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(1, len(ids), "Test partner not found.")
def test_01(self):
"""Don't find the created partner."""
cr, uid = self.cr, self.uid
ids = self.registry('res.partner').search(cr, uid, [('name', '=', 'test_per_class_teardown_partner')])
self.assertEqual(0, len(ids), "Test partner found.")
def test_20a(self):
""" Create a partner with a XML ID then resolve xml id with ref() and browse_ref() """
cr, uid = self.cr, self.uid
res_partner = self.registry('res.partner')
ir_model_data = self.registry('ir.model.data')
pid, _ = res_partner.name_create(cr, uid, 'Mr Yellow')
ir_model_data.create(cr, uid, {'name': 'test_partner_yellow',
'module': 'base',
'model': 'res.partner',
'res_id': pid})
xid = 'base.test_partner_yellow'
p_ref = self.ref(xid)
self.assertEquals(p_ref, pid, "ref() should resolve xid to database ID")
partner = res_partner.browse(cr, uid, pid)
p_browse_ref = self.browse_ref(xid)
self.assertEqual(partner, p_browse_ref, "browse_ref() should resolve xid to browse records")
if __name__ == '__main__':
unittest2.main()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vipmunot/Data-Science-Course | ML/Python/Decision Tree/decision tree.py | 1 | 4104 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 10:38:03 2016
@author: Vipul Munot
"""
tennis=[line.strip().split(',') for line in open('tennis.txt')]
test_tennis=[line.strip().split(',') for line in open('test-tennis.txt')]
data=[['slashdot','USA','yes',18,'None'],
['google','France','yes',23,'Premium'],
['digg','USA','yes',24,'Basic'],
['kiwitobes','France','yes',23,'Basic'],
['google','UK','no',21,'Premium'],
['(direct)','New Zealand','no',12,'None'],
['(direct)','UK','no',21,'Basic'],
['google','USA','no',24,'Premium'],
['slashdot','France','yes',19,'None'],
['digg','USA','no',18,'None'],
['google','UK','no',18,'None'],
['kiwitobes','UK','no',19,'None'],
['digg','New Zealand','yes',12,'Basic'],
['slashdot','UK','no',21,'None'],
['google','UK','yes',18,'Basic'],
['kiwitobes','France','yes',19,'Basic']]
class node:
def __init__(self,col=-1,value=None,results=None,left=None,right=None):
self.col=col
self.value = value
self.results=results # Stores Predicted Output (None: for non leaf nodes)
self.left=left # True values
self.right=right # False values
def split_data(data,column,value):
split = None
if isinstance(value,int) or isinstance(value,float):
split = lambda item:item[column]>=value
else:
split = lambda item:item[column]==value
subset1 = [item for item in data if split(item)]
subset2 = [item for item in data if not split(item)]
return (subset1,subset2)
r1,r2 = split_data(data,2,'yes')
#print ("\nSubset 1: ",r1)
#print ("\nSubset 2: ",r2)
def frequency(data):
result ={}
for value in data:
cols = value[len(value)-1]
if cols not in result: result[cols]=0
result[cols]+=1
return result
def entropy(data):
result =0.0
freq = frequency(data)
for val in freq.keys():
import math
prob = float(freq[val])/len(data)
result = result - prob*math.log2(prob)
return result
#print ("\nEntropy: ",entropy(data))
#print ("\nEntropy: ",entropy(r1))
def build_tree(rows):
if len(rows)==0: return node()
currentScore=entropy(rows)
best_gain=0.0
best_criteria=None
best_sets=None
column_count=len(rows[0])-1
for col in range(0,column_count):
column_values={}
for row in rows:
column_values[row[col]]=1
for value in column_values.keys():
(set1,set2)=split_data(rows,col,value)
p=float(len(set1))/len(rows)
gain=currentScore-p*entropy(set1)-(1-p)*entropy(set2)
if gain>best_gain and len(set1)>0 and len(set2)>0:
best_gain=gain
best_criteria=(col,value)
best_sets=(set1,set2)
if best_gain>0:
trueBranch=build_tree(best_sets[0])
falseBranch=build_tree(best_sets[1])
return node(col=best_criteria[0],value=best_criteria[1],
left=trueBranch,right=falseBranch)
else:
return node(results=frequency(rows))
tree = build_tree(data)
tennis_tree = build_tree(tennis)
def print_tree(tree,indent = ' '):
if tree.results!= None:
print (str(tree.results))
else:
print (str(tree.col)+':'+str(tree.value))
print (indent+ 'Left ->',print_tree(tree.left,indent+' '))
print (indent+ 'Right ->',print_tree(tree.right,indent+' '))
print (print_tree(tree))
print ("\nTennis:\n")
print (print_tree(tennis_tree))
def classify(observation,tree):
if tree.results!=None: return tree.results
else:
val = observation[tree.col]
branch = None
if isinstance(val,int)or isinstance(val,float):
if val >=tree.value: branch= tree.left
else: branch = tree.right
else:
if val == tree.value: branch=tree.left
else: branch = tree.right
return classify(observation,branch)
print ("\nClasificaton: ",classify(['(direct)','USA','yes',5],tree))
for values in test_tennis:
print ("\n",values,"\tClasificaton: ",classify(values,tennis_tree)) | mit |
AnotherIvan/calibre | src/chardet/sjisprober.py | 190 | 3549 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from mbcharsetprober import MultiByteCharSetProber
from codingstatemachine import CodingStateMachine
from chardistribution import SJISDistributionAnalysis
from jpcntx import SJISContextAnalysis
from mbcssm import SJISSMModel
import constants, sys
from constants import eStart, eError, eItsMe
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == eError:
if constants._debug:
sys.stderr.write(self.get_charset_name() + ' prober hit error at byte ' + str(i) + '\n')
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen :], charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen : i + 3 - charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1 : i + 1], charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if self._mContextAnalyzer.got_enough_data() and \
(self.get_confidence() > constants.SHORTCUT_THRESHOLD):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| gpl-3.0 |
adrianlee/rcon-cs | rconsoft/rcon/receiver.py | 2 | 4212 | # Read LICENSE for licensing details.
"""The Rcon receiver."""
import re
import logging
from twisted.internet.protocol import DatagramProtocol
from rconsoft.dispatch.dispatcher import Signal
from rconsoft.config import config
log = logging.getLogger('general')
log_detail = logging.getLogger('detail')
#------------------------------
class EventError(Exception):
"""Is thrown when something involving events errors."""
pass
#------------------------------
class RconReceiver(DatagramProtocol):
#==============================
def __init__(self):
self._init_signals()
self.events = {
'user_connected': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" connected, address "(?P<ip>.*?):(?P<port>.*?)"')
},
'user_disconnected': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" disconnected')
},
'user_validated': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" STEAM USERID validated')
},
'user_entered': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" entered the game')
},
'user_joined_team': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><.*?>" joined team "(?P<team>.*?)"')
},
'user_say': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" say(_(?P<to>.*?))? "(?P<message>.*)"( \((?P<status>.*?)\))?')
},
'user_changed_name': {
'regex': re.compile(r'^"(?P<old_name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><.*?>" changed name to "(?P<name>.*?)"')
},
'user_triggered': {
'regex': re.compile(r'^"(?P<name>.*?)<(?P<userid>\d+)><(?P<uniqueid>.*?)><(?P<team>.*?)>" triggered "(?P<event>.*?)"')
},
'world_triggered': {
'regex': re.compile(r'^World triggered "(?P<event>.*?)"( \(CT "(?P<ct_score>\d+)"\) \(T "(?P<t_score>\d+)"\))?')
},
'server_say': {
'regex': re.compile(r'^Server say "(?P<message>.*?)"')
},
'server_cvar': {
'regex': re.compile(r'^Server cvar "(?P<cvar>.*?)" = "(?P<value>.*?)"')
},
'team_scored': {
'regex': re.compile(r'^Team "(?P<team>.*?)" scored "(?P<score>.*?)" with "(?P<players>.*?)" players')
},
'team_triggered': {
'regex': re.compile(r'^Team "(?P<team>.*?)" triggered "(?P<event>.*?)"( \(CT "(?P<ct_score>\d+)"\) \(T "(?P<t_score>\d+)"\))?')
},
'rcon_command': {
'regex': re.compile(r'^Rcon: \"rcon (?P<challenge>\d+) \"(?P<password>.*?)\" (?P<command>.*?)\" from \"(?P<ip>.*?):(?P<port>.*?)\"')
}
}
#==============================
def _init_signals(self):
self.data = Signal()
self.event = Signal()
self.unhandled_event = Signal()
#==============================
def add_event(self, name, regex):
if name in self.events:
raise EventError('Event already exists')
if isinstance(regex, str):
regex = re.compile(str)
self.events[name] = {
'regex': regex
}
#==============================
# Twisted event
def datagramReceived(self, data, (host, port)):
self.data.send(sender=self.__class__, data=data)
#print data
#log L date - time: response
null, null, date, null, time, response = data[4:-2].split(' ', 5)
#log.debug(response)
found_event = False
for event in self.events:
m = self.events[event]['regex'].match(response)
if m:
log_detail.debug('[%s] event [%s]: %s' % (self.__class__.__name__, event, m.groupdict()))
self.event.send(sender=self.__class__, event=event, data=m.groupdict())
found_event = True
break
# If the event wasn't found, then fire the unhandled_event in case some plugin wants to handle it.
# Though, you could just use add_event instead.
if not found_event:
self.unhandled_event.send(sender=self.__class__, data=response)
| mit |
SimonGreenhill/pyvolve | setup.py | 1 | 1912 | #!/usr/bin/env python
##############################################################################
## pyvolve: Python platform for simulating evolutionary sequences.
##
## Written by Stephanie J. Spielman (stephanie.spielman@gmail.com)
##############################################################################
'''
Setup.py script (uses setuptools) for building, testing, and installing pyvolve.
To build and install the package as root (globally), enter (from this directory!) -
sudo python setup.py build
sudo python setup.py test # OPTIONAL BUT RECOMMENDED. Please contact author with any failed tests! Note that every once in a while the tests for functions which must generate random numbers take excessively long, so just ctrl-C these and run tests again.
sudo python setup.py install
To install for a particular user (locally), enter -
python setup.py build
python setup.py test # OPTIONAL BUT RECOMMENDED. Please contact author with any failed tests! Note that every once in a while the tests for functions which must generate random numbers take excessively long, so just ctrl-C these and run tests again.
python setup.py build --user # where user is the computer account to install pyolve in
'''
from setuptools import setup
setup(name = 'Pyvolve',
version = '0.6.1',
description = 'Sequence simulation along phylogenies according to continuous-time Markov models',
author = 'Stephanie J. Spielman',
author_email = 'stephanie.spielman@gmail.com',
url = 'https://github.com/sjspielman/pyvolve',
download_url = 'https://github.com/sjspielman/pyvolve/tarball/0.5',
platforms = 'Tested on Mac OS X.',
package_dir = {'pyvolve':'src'},
packages = ['pyvolve', 'tests'],
package_data = {'tests': ['freqFiles/*', 'evolFiles/*']},
install_requires=['numpy>=1.7', 'scipy', 'Biopython'],
test_suite = "tests"
)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.