repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
40123148/40123148 | static/Brython3.1.1-20150328-091302/Lib/inspect.py | 637 | 78935 | """Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
# This module is in the public domain. No warranties.
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import imp
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
# Create constants for the compiler flags in Include/code.h
# We try to get them from dis to avoid duplication, but fall
# back to hardcoding so the dependency is optional
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
# See Include/object.h
TPFLAGS_IS_ABSTRACT = 1 << 20
# ----------------------------------------------------------- type-checking
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support iteration over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
# ----------------------------------------------------------- class helpers
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
# -------------------------------------------------- source code extraction
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[:1] + file[-1:] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
# --------------------------------------------------- class tree extraction
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
if c not in children[parent]:
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
# ------------------------------------------------ argument list extraction
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
#brython fix me
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
#brython fix me
#def formatargvalues(args, varargs, varkw, locals,
# formatarg=str,
# formatvarargs=lambda name: '*' + name,
# formatvarkw=lambda name: '**' + name,
# formatvalue=lambda value: '=' + repr(value)):
# """Format an argument spec from the 4 values returned by getargvalues.
# The first four arguments are (args, varargs, varkw, locals). The
# next four arguments are the corresponding optional formatting functions
# that are called to turn names and values into strings. The ninth
# argument is an optional function to format the sequence of arguments."""
# def convert(name, locals=locals,
# formatarg=formatarg, formatvalue=formatvalue):
# return formatarg(name) + formatvalue(locals[name])
# specs = []
# for i in range(len(args)):
# specs.append(convert(args[i]))
# if varargs:
# specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
# if varkw:
# specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
# return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
# -------------------------------------------------- stack frame extraction
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
# ------------------------------------------------ static version of getattr
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
# ------------------------------------------------ generator introspection
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
###############################################################################
### Function Signature Object (PEP 362)
###############################################################################
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def _get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(obj)
raise ValueError(msg) from ex
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not name.isidentifier():
msg = '{!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {} before {}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs)
def bind_partial(__bind_self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return __bind_self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
| lgpl-3.0 |
jozz68x/PyBrowser | src/browser.py | 1 | 7502 | #!usr/bin/python
__author__="Jose Diaz - email: jozz.18x@gmail.com"
__date__ ="$04/03/2015 05:55:19 AM$"
from homepage import Homepage
from tkinter import *
from tkinter import ttk
from PIL import Image, ImageTk
from urllib.request import urlopen # Interaccion con la web
from urllib.error import HTTPError,URLError # Excepciones
class GuiBrowser(ttk.Frame):
MENUBUTTON = dict(relief=FLAT, bd=0, width=30, height=30,
font=("Arial", 11), activebackground="#d6d9db",
cursor="hand2")
ENTRY = dict(relief=FLAT, bd=1, font=("Arial", 11), width=50,
highlightbackground="#acb1b4", highlightcolor="#549beb",
highlightthickness=1)
TEXT = dict(font=("Arial",10), cursor='arrow', state='normal',
autoseparators=5, spacing1=5, wrap=WORD)
FONT = ("Arial", 11)
def __init__(self, master):
""" Contructor."""
super().__init__(master)
self.cargar_imagenes()
self.variables_declarados()
# Crear todos los widgets.
self.barra_navegacion = self.crear_barra_navegacion()
self.separador = Frame(self, bg="#8a9398", height=1)
self.area_navegacion = self.crear_area_navegacion()
self.area_detalles = self.crear_area_detalles()
# Posicionar los widgets.
self.barra_navegacion.pack(side=TOP, fill=X, expand=True)
self.separador.pack(side=TOP, fill=X)
self.area_navegacion.pack(side=TOP, fill=BOTH, expand=True)
self.area_detalles.pack(side=BOTTOM, fill=X, expand=True)
def cargar_imagenes(self):
""" Carga todas las imagenes usados en este script."""
imenu_option = Image.open(r"images\menu_option.png")
self.imagenMenuOptions = ImageTk.PhotoImage(imenu_option)
ihome = Image.open(r"images\home.png")
self.imagenHome = ImageTk.PhotoImage(ihome)
iprueba = Image.open(r"images\icon_prueba.png")
self.imagenPrueba = ImageTk.PhotoImage(iprueba)
def variables_declarados(self):
""" Declaracion de variables. """
self.var_entry_search_url = StringVar()
self.var_entry_search_url.set("https://www.python.org/")
def crear_barra_navegacion(self):
""" Crea la barra de navegacion o cabecera del browser implementado con
sus widgets internos donde retona el frame principal."""
barra_browser = Frame(self)
# Crear widget interno.
btn_home = Menubutton(barra_browser, image=self.imagenHome, bg=barra_browser['bg'],
**self.MENUBUTTON)
lb_url = Label(barra_browser, text= 'URL: ', font=(self.FONT[0],10,"bold"),
bg=barra_browser['bg'])
entry = Entry(barra_browser, textvariable=self.var_entry_search_url,
**self.ENTRY)
btn_menu = Menubutton(barra_browser, image=self.imagenMenuOptions, bg=barra_browser['bg'],
**self.MENUBUTTON)
# Posiciona los widgets.
btn_home.pack(side=LEFT, padx=5, pady=5)
lb_url.pack(side=LEFT)
entry.pack(side=LEFT, fill=X, expand=True, pady=5)
btn_menu.pack(side=RIGHT, padx=5, pady=5)
# Eventos de los widgets.
btn_home .bind("<Button-1>", lambda e: self.homepage())
entry.bind("<Return>", lambda e: self.search_url())
# Retorna el Frame.
return barra_browser
def crear_area_navegacion(self):
""" Crea la area de navegacion o cuerpo del browser implementado con
sus widgets internos donde retona el frame principal."""
area_navegacion = Frame(self)
# Crear widget interno.
self.text = Text(area_navegacion, **self.TEXT)
scroller = ttk.Scrollbar(area_navegacion, command=self.text.yview)
self.text.config(yscrollcommand=scroller.set)
# Posiciona los widgets.
scroller.pack(side=RIGHT, fill=Y)
self.text.pack(fill=BOTH, expand=True)
self.text.configure(state="disabled")
# Retorna el Frame.
return area_navegacion
def crear_area_detalles(self):
""" Crea un area para los detalles de la pagina consultada implementado
con sus widgets internos donde retona el frame principal."""
area_detalles = Frame(self)
# Crear widget interno.
self.text_detalles = Text(area_detalles, **self.TEXT)
scroller = ttk.Scrollbar(area_detalles, command=self.text_detalles.yview)
self.text_detalles.config(yscrollcommand=scroller.set)
# Posiciona los widgets.
scroller.pack(side=RIGHT, fill=Y)
self.text_detalles.pack(fill=BOTH, expand=True)
#Retorna el Frame.
return area_detalles
def homepage(self):
homepage = Homepage(self.text)
homepage.pack(fill=BOTH, expand=True)
def search_url(self):
""" Metodo para Buscar y obtiener informacion de una url.
Escribe y muestra los resultados en los widgets
de Text creados anteriormente."""
self.text.configure(state="normal")
try:
if self.var_entry_search_url.get()=="":
self.message_estado("Ingrese url de una pagina web.")
elif self.var_entry_search_url.get()=="https://":
self.message_estado("Ingrese url de una pagina web.")
else:
try:
# Muestra los datos de la url el en area de navegacion principal.
self.message_estado("Leyendo archivos...")
self.text.delete(1.0, END)
data = urlopen(self.var_entry_search_url.get())
self.text.insert(INSERT, data.read())
# Muestra la url de la pagina en la barra de estado.
geturl = data.geturl()
self.message_estado(geturl)
# Muestra los detalles de la url el en area de detalles.
self.text_detalles.configure(state="normal")
self.text_detalles.delete(1.0, END)
headers = data.info()
self.text_detalles.insert(INSERT, headers)
self.text_detalles.configure(state="disabled")
data.close()
except URLError as e:
msj = "URL Error:",e.reason , self.var_entry_search_url.get()
self.message_estado(msj)
except HTTPError as e:
msj = "HTTP Error:",e.code , self.var_entry_search_url.get()
self.message_estado(msj)
except ValueError:
self.message_estado("Ingrese url valida: Error digitacion: '%s'" %self.var_entry_search_url.get())
self.text.configure(state="disabled")
def message_estado(self, text):
""" Muestra un mensaje en la parte inferior de la ventana principal.
Tiene como parametro el texto del mensaje."""
msj_estado = Message(self, text=text, bg='#c6dedd', font=("Arial",8), width=1400)
msj_estado.place(in_=self, relx=0, rely=1, x=0, y=0, anchor="sw", bordermode="outside")
msj_estado.after(2000,lambda: msj_estado.destroy())
| gpl-3.0 |
daniel-kurushin/iisu | biu/khc.py | 1 | 6304 | import sys
from struct import pack, unpack
from time import sleep
class KHC(object):
NAME = 'KHC'
cmd_inc_engine = b'\xae\xae\x01\x00\x01\x08\x00' # увеличить обороты и подтвердить результат
cmd_dec_engine = b'\xae\xae\x01\x00\x02\x08\x00' # уменьшить обороты и подтвердить результат
cmd_stop_engine = b'\xae\xae\x01\x00\x07\x07\x00' # остановка
cmd_get_distances = b'\xae\xae\x01\x00\x08\x07\x00' # вернуть расстояния от дальномеров
cmd_get_encoders = b'\xae\xae\x01\x00\x09\x07\x00' # энкодеры колес
cmd_reverse = b'\xae\xae\x01\x00\x0a\x08\x00' # вкл-выкл реверса
cmd_brakes = b'\xae\xae\x01\x00\x11\x0a\x00' # тормоза
# | | +--- правый 0 - выкл, 1 - вкл
# | +----- левый
# +------- передний
cmd_get_state = b'\xae\xae\x01\x00\xff\x07\x00' # вернуть состояние КХЧ
# currentAccelPos - обороты
# is_frw_brake ff - вкл передний тормоз 00 - выкл
# is_lgt_brake ff - вкл левый тормоз 00 - выкл
# is_rgt_brake ff - вкл правый тормоз 00 - выкл
# is_reverse ff - вкл реверс 00 - выкл
# enc_sec - срабатываний энкодера в сек
# enc_min - срабатываний энкодера в мин
currentAccelPos = 0
def parse_distances(self, x):
return dict(
ok = True,
rear = int(unpack('<B', x[3:4])[0]) * 128.0 / 58.0 / 100.0,
left = int(unpack('<B', x[4:5])[0]) * 128.0 / 58.0 / 100.0,
front = int(unpack('<B', x[5:6])[0]) * 128.0 / 58.0 / 100.0,
right = int(unpack('<B', x[6:7])[0]) * 128.0 / 58.0 / 100.0,
)
def parse_engine(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3:4])[0]),
)
def parse_reverse(self, x):
return dict(
ok = True,
is_reverse = bool(unpack('<b', x[3:4])[0]),
)
def parse_brakes(self, x):
return dict(
ok = True,
is_frw_brake = bool(unpack('<b', x[3:4])[0]),
is_lgt_brake = bool(unpack('<b', x[4:5])[0]),
is_rgt_brake = bool(unpack('<b', x[5:6])[0]),
)
def parse_encoders(self, x):
return dict(a=0)
def parse_state(self, x):
return dict(
ok = True,
currentAccelPos = int(unpack('<b', x[3: 4])[0]),
is_frw_brake = bool(unpack('<b', x[4: 5])[0]),
is_lgt_brake = bool(unpack('<b', x[5: 6])[0]),
is_rgt_brake = bool(unpack('<b', x[6: 7])[0]),
is_reverse = bool(unpack('<b', x[7: 8])[0]),
enc_sec = int(unpack('<b', x[8: 9])[0]),
enc_min = int(unpack('<b', x[9:10])[0]),
)
def inc_engine(self):
cmd = self.cmd_inc_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos += 1
return self.parse_engine(ret)
def dec_engine(self):
cmd = self.cmd_dec_engine
v = pack('>b', 1)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos -= 1
return self.parse_engine(ret)
def gooo(self, req_acc_pos = 31, rgt_brk = 0, lgt_brk = 0):
backward_needed = req_acc_pos < 0
acc_pos = abs(req_acc_pos)
stop_needed = acc_pos == 0
self.state = self.get_state()
self.brakes(rgt = rgt_brk, lgt = lgt_brk, frw = 0)
if self.state['is_reverse'] != backward_needed and backward_needed:
print(backward_needed, self.state['is_reverse'])
self.reverse(1)
if self.state['is_reverse'] != backward_needed and not backward_needed: self.reverse(0)
self.state = self.get_state()
D = int(acc_pos - self.state['currentAccelPos'])
if D > 0: f = self.inc_engine
else: f = self.dec_engine
for i in range(abs(D)): f()
_ = self.get_state()
pos = _['currentAccelPos']
if _['is_reverse']: pos = -1 * pos
return dict(
ok = pos == req_acc_pos,
requiredAccelPos = req_acc_pos,
currentAccelPos = pos,
)
def stop_engine(self):
cmd = self.cmd_stop_engine
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
self.currentAccelPos = 0
return self.parse_engine(ret)
def reverse(self, v = 1):
cmd = self.cmd_reverse
v = pack('>b', v)
print('>>>', cmd, v, file = sys.stderr)
self.port.write(cmd)
self.port.write(v)
ret = self.port.read(4)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 4
return self.parse_reverse(ret)
def brakes(self, rgt = 0, lgt = 0, frw = 1):
cmd = self.cmd_brakes
rgt = pack('>b', rgt)
lgt = pack('>b', lgt)
frw = pack('>b', frw)
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
self.port.write(frw)
self.port.write(rgt)
self.port.write(lgt)
ret = self.port.read(6)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 6
return self.parse_brakes(ret)
def get_encoders(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_encoders(ret)
def get_state(self):
cmd = self.cmd_get_state
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(10)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 10
return self.parse_state(ret)
def get_distances(self):
cmd = self.cmd_get_distances
print('>>>', cmd, file = sys.stderr)
self.port.write(cmd)
ret = self.port.read(7)
print('<<<', ret, file = sys.stderr)
assert len(ret) == 7
return self.parse_distances(ret)
def __init__(self, port = None):
if port != None:
self.port = port
else:
raise Exception('port is None')
self.state = self.get_state()
if __name__ == "__main__":
from biu import BIU
khc = KHC(BIU())
print(khc.get_distances())
# print(khc.gooo(31))
# sleep(6)
# print(khc.gooo(-31))
# sleep(6)
print(khc.stop_engine())
| gpl-3.0 |
musically-ut/numpy | numpy/lib/tests/test_nanfunctions.py | 85 | 26868 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_raises, assert_array_equal
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
else:
assert_(len(w) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanmedian, d, axis=-5)
assert_raises(IndexError, np.nanmedian, d, axis=(0, -5))
assert_raises(IndexError, np.nanmedian, d, axis=4)
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', RuntimeWarning)
a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
assert_equal(np.nanmedian(a), np.inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
# no mask path
a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
assert_equal(np.nanmedian(a, axis=1), np.inf)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
tgt = [np.percentile(d, (28, 98)) for d in _rdat]
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanpercentile(0., 100) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=-5)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, -5))
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=4)
assert_raises(IndexError, np.nanpercentile, d, q=5, axis=(0, 4))
assert_raises(ValueError, np.nanpercentile, d, q=5, axis=(1, 1))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
seanli9jan/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io_test.py | 25 | 7883 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.cached_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.cached_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
sometallgit/AutoUploader | Python27/Lib/hashlib.py | 51 | 7842 | # $Id$
#
# Copyright (C) 2005 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
__doc__ = """hashlib module - A common interface to many hash functions.
new(name, string='') - returns a new hash object implementing the
given hash function; initializing the hash
using the given string data.
Named constructor functions are also available, these are much faster
than using new():
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
More algorithms may be available on your platform but the above are guaranteed
to exist. See the algorithms_guaranteed and algorithms_available attributes
to find out what algorithm names can be passed to new().
NOTE: If you want the adler32 or crc32 hash functions they are available in
the zlib module.
Choose your hash function wisely. Some have known collision weaknesses.
sha384 and sha512 will be slow on 32 bit platforms.
Hash objects have these methods:
- update(arg): Update the hash object with the string arg. Repeated calls
are equivalent to a single call with the concatenation of all
the arguments.
- digest(): Return the digest of the strings passed to the update() method
so far. This may contain non-ASCII characters, including
NUL bytes.
- hexdigest(): Like digest() except the digest is returned as a string of
double length, containing only hexadecimal digits.
- copy(): Return a copy (clone) of the hash object. This can be used to
efficiently compute the digests of strings that share a common
initial substring.
For example, to obtain the digest of the string 'Nobody inspects the
spammish repetition':
>>> import hashlib
>>> m = hashlib.md5()
>>> m.update("Nobody inspects")
>>> m.update(" the spammish repetition")
>>> m.digest()
'\\xbbd\\x9c\\x83\\xdd\\x1e\\xa5\\xc9\\xd9\\xde\\xc9\\xa1\\x8d\\xf0\\xff\\xe9'
More condensed:
>>> hashlib.sha224("Nobody inspects the spammish repetition").hexdigest()
'a4337bc45a8fc544c03f52dc550cd6e1e87021bc896588bd79e901e2'
"""
# This tuple and __get_builtin_constructor() must be modified if a new
# always available algorithm is added.
__always_supported = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
algorithms_guaranteed = set(__always_supported)
algorithms_available = set(__always_supported)
algorithms = __always_supported
__all__ = __always_supported + ('new', 'algorithms_guaranteed',
'algorithms_available', 'algorithms',
'pbkdf2_hmac')
def __get_builtin_constructor(name):
try:
if name in ('SHA1', 'sha1'):
import _sha
return _sha.new
elif name in ('MD5', 'md5'):
import _md5
return _md5.new
elif name in ('SHA256', 'sha256', 'SHA224', 'sha224'):
import _sha256
bs = name[3:]
if bs == '256':
return _sha256.sha256
elif bs == '224':
return _sha256.sha224
elif name in ('SHA512', 'sha512', 'SHA384', 'sha384'):
import _sha512
bs = name[3:]
if bs == '512':
return _sha512.sha512
elif bs == '384':
return _sha512.sha384
except ImportError:
pass # no extension module, this hash is unsupported.
raise ValueError('unsupported hash type ' + name)
def __get_openssl_constructor(name):
try:
f = getattr(_hashlib, 'openssl_' + name)
# Allow the C module to raise ValueError. The function will be
# defined but the hash not actually available thanks to OpenSSL.
f()
# Use the C function directly (very fast)
return f
except (AttributeError, ValueError):
return __get_builtin_constructor(name)
def __py_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
return __get_builtin_constructor(name)(string)
def __hash_new(name, string=''):
"""new(name, string='') - Return a new hashing object using the named algorithm;
optionally initialized with a string.
"""
try:
return _hashlib.new(name, string)
except ValueError:
# If the _hashlib module (OpenSSL) doesn't support the named
# hash, try using our builtin implementations.
# This allows for SHA224/256 and SHA384/512 support even though
# the OpenSSL library prior to 0.9.8 doesn't provide them.
return __get_builtin_constructor(name)(string)
try:
import _hashlib
new = __hash_new
__get_hash = __get_openssl_constructor
algorithms_available = algorithms_available.union(
_hashlib.openssl_md_meth_names)
except ImportError:
new = __py_new
__get_hash = __get_builtin_constructor
for __func_name in __always_supported:
# try them all, some may not work due to the OpenSSL
# version not supporting that algorithm.
try:
globals()[__func_name] = __get_hash(__func_name)
except ValueError:
import logging
logging.exception('code for hash %s was not found.', __func_name)
try:
# OpenSSL's PKCS5_PBKDF2_HMAC requires OpenSSL 1.0+ with HMAC and SHA
from _hashlib import pbkdf2_hmac
except ImportError:
import binascii
import struct
_trans_5C = b"".join(chr(x ^ 0x5C) for x in range(256))
_trans_36 = b"".join(chr(x ^ 0x36) for x in range(256))
def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
"""Password based key derivation function 2 (PKCS #5 v2.0)
This Python implementations based on the hmac module about as fast
as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords and much faster
for long passwords.
"""
if not isinstance(hash_name, str):
raise TypeError(hash_name)
if not isinstance(password, (bytes, bytearray)):
password = bytes(buffer(password))
if not isinstance(salt, (bytes, bytearray)):
salt = bytes(buffer(salt))
# Fast inline HMAC implementation
inner = new(hash_name)
outer = new(hash_name)
blocksize = getattr(inner, 'block_size', 64)
if len(password) > blocksize:
password = new(hash_name, password).digest()
password = password + b'\x00' * (blocksize - len(password))
inner.update(password.translate(_trans_36))
outer.update(password.translate(_trans_5C))
def prf(msg, inner=inner, outer=outer):
# PBKDF2_HMAC uses the password as key. We can re-use the same
# digest objects and just update copies to skip initialization.
icpy = inner.copy()
ocpy = outer.copy()
icpy.update(msg)
ocpy.update(icpy.digest())
return ocpy.digest()
if iterations < 1:
raise ValueError(iterations)
if dklen is None:
dklen = outer.digest_size
if dklen < 1:
raise ValueError(dklen)
hex_format_string = "%%0%ix" % (new(hash_name).digest_size * 2)
dkey = b''
loop = 1
while len(dkey) < dklen:
prev = prf(salt + struct.pack(b'>I', loop))
rkey = int(binascii.hexlify(prev), 16)
for i in xrange(iterations - 1):
prev = prf(prev)
rkey ^= int(binascii.hexlify(prev), 16)
loop += 1
dkey += binascii.unhexlify(hex_format_string % rkey)
return dkey[:dklen]
# Cleanup locals()
del __always_supported, __func_name, __get_hash
del __py_new, __hash_new, __get_openssl_constructor
| mit |
pigmej/solar-agent | solar_agent/core.py | 2 | 7841 | # Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License attached#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See then
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import string
from contextlib import nested
from fabric import api as fabric_api
from subprocess import check_output
import shlex
from itertools import takewhile
from solar_agent.logger import logger
# XXX: not used for now vvv
# def common_path(paths, sep=os.path.sep):
# paths = [x.split(sep) for x in paths]
# dirs = zip(*(p for p in paths))
# return [x[0] for x in takewhile(lambda x: all(n == x[0] for n in x[1:]), dirs)]
# class SolarAgentContext(object):
# def __init__(self):
# self._dirs = {}
# self._files = {}
# def file(self, path):
# try:
# return self._files[path]
# except KeyError:
# if self.is_safe_file(path):
# cls = SolarAgentSafeFile
# else:
# cls = SolarAgentFile
# self._files[path] = f = cls(self, path)
# return f
# def dir(self, path):
# try:
# return self._dirs[path]
# except KeyError:
# self._dirs[path] = solar_agent_dir = SolarAgentDir(self, path)
# return solar_agent_dir
# def is_safe_file(self, path):
# dirname = os.path.dirname(path)
# common = SolarAgentContext.common_path(dirname, self._dirs.keys())
# if common not in ((), ('/', )):
# return False
# return True
# def is_safe_dir(self, path):
# common = SolarAgentContext.common_path(path, self._dirs.keys())
# if common not in ((), ('/', )):
# return False
# return True
# @staticmethod
# def common_path(path, paths, sep=os.path.sep):
# all_paths = paths + [path]
# paths = [x.split(sep) for x in all_paths]
# dirs = zip(*(p for p in all_paths))
# return tuple(x[0] for x in takewhile(lambda x: all(n == x[0] for n in x[1:]), dirs))
# class SolarAgentSafeFile(object):
# def __init__(self, context, target):
# self._f = None
# self._rnd = 'solar' + ''.join((random.choice(string.ascii_lowercase) for _ in xrange(6)))
# self._path = target
# self._safe_path = self._path + '_' + self._rnd
# def open(self):
# self._f = open(self._safe_path, 'wb')
# def write(self, data):
# return self._f.write(data)
# def close(self):
# self._f.close()
# def finish(self):
# self.close()
# os.rename(self._safe_path, self._path)
# class SolarAgentFile(object):
# def __init__(self, context, target):
# self._f = None
# self._path = target
# def open(self):
# self._f = open(self._path, 'wb')
# def write(self, data):
# self._f.write(data)
# def close(self):
# self._f.close()
# def finish(self):
# self.close()
# class SolarAgentSafeDir(object):
# def __init__(self, context, target):
# self._rnd = 'solar' + ''.join((random.choice(string.ascii_lowercase) for _ in xrange(6)))
# self._path = target
# self._safe_path = self._path + '_' + self._rnd
# def start(self):
# os.makedirs(self._safe_path)
# def finish(self):
# os.rename(self._safe_path, self._path)
# class SolarAgentDir(object):
# def __init__(self, context, target):
# self._path = target
# def start(self):
# os.makedirs(self._path)
# def finish(self):
# pass
# XXX: not used for now ^^^
class SolarAgentContext(object):
def __init__(self):
self.files = {}
def file(self, path):
try:
return self.files[path]
except KeyError:
self.files[path] = r = SolarAgentFile(self, path)
return r
class SolarAgentFile(object):
def __init__(self, context, target):
self.ctx = context
self._rnd = 'solar' + ''.join((random.choice(string.ascii_lowercase) for _ in xrange(6)))
self._path = target
self._f = None
self._safe_path = self._path + '_' + self._rnd
def open(self):
dirname = os.path.dirname(self._safe_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
if self._f is None:
self._f = open(self._safe_path, 'wb')
def write(self, data):
self._f.write(data)
def finish(self):
self._f.close()
self._f = None
os.rename(self._safe_path, self._path)
class SolarAgentIface(object):
@staticmethod
def run(solar_agent_context, cmd, **kwargs):
# return check_output(shlex.split(cmd))
executor = fabric_api.local
# if kwargs.get('use_sudo', False):
# cmd = 'sudo ' + cmd
managers = []
cwd = kwargs.get('cwd')
if cwd:
managers.append(fabric_api.cd(kwargs['cwd']))
env = kwargs.get('env')
if env:
managers.append(fabric_api.shell_env(**kwargs['env']))
# we just warn, don't exit on solar_agent
# correct data is returned
managers.append(fabric_api.warn_only())
with nested(*managers):
out = executor(cmd, capture=True)
result = {}
for name in ('failed', 'return_code', 'stdout', 'stderr',
'succeeded', 'command', 'real_command'):
result[name] = getattr(out, name)
return result
@staticmethod
def copy_file(solar_agent_context, stream_reader, path, size=None):
f = SolarAgentIface.file_start(solar_agent_context, path)
rdr = stream_reader(size)
for data in rdr:
f.write(data)
SolarAgentIface.file_end(solar_agent_context, path)
return True
@staticmethod
def copy_files(solar_agent_context, stream_reader, paths, total_size):
# total_size not used for now
for _to, _size in paths:
logger.debug("Starting %s size=%d", _to, _size)
f = SolarAgentIface.file_start(solar_agent_context, _to)
if _size > 0:
rdr = stream_reader(_size)
for data in rdr:
f.write(data)
SolarAgentIface.file_end(solar_agent_context, _to)
logger.debug("Done %s size=%d", _to, _size)
return True
# # TODO: not used YET fully
# @staticmethod
# def dir_start(solar_agent_context, path):
# solar_agent_dir = solar_agent_context.dir(path)
# solar_agent_dir.start()
# return solar_agent_dir
# @staticmethod
# def dir_finish(solar_agent_context, path):
# solar_agent_dir = solar_agent_context.dir(path)
# solar_agent_dir.finish()
# return True
@staticmethod
def file_start(solar_agent_context, path):
solar_agent_file = solar_agent_context.file(path)
solar_agent_file.open()
return solar_agent_file
@staticmethod
def file_put_data(solar_agent_context, path, data):
solar_agent_file = solar_agent_context.file(path)
return solar_agent_file.write(data)
@staticmethod
def file_end(solar_agent_context, path):
solar_agent_file = solar_agent_context.file(path)
solar_agent_file.finish()
return True
| apache-2.0 |
markgw/jazzparser | src/jazzparser/utils/latex.py | 1 | 1979 | """Latex output utility functions to help with producing valid Latex files.
Utility functions for handling processing and output of Latex.
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>"
def filter_latex(text):
"""
Applies necessary filters to Latex text before outputting. Mainly
involves escaping strings.
"""
text = text.replace("#","\\#")
text = text.replace("%","\\%")
text = text.replace("_", "\\_")
return text
def start_document(title=None, author=None, packages=[], options=[], toc=False):
output = ""
output += "\\documentclass[%s]{article}\n" % ",".join(options+['a4paper'])
for package in packages:
output += "\\usepackage{%s}\n" % package
output += "\\begin{document}\n"
if title is not None:
output += "\\title{%s}\n" % title
if author is not None:
output += "\\author{%s}\n" % author
else:
output += "\\author{}\n"
output += "\\maketitle\n"
if toc:
output += "\\tableofcontents\n"
return output
| gpl-3.0 |
ezequielpereira/Time-Line | timelinelib/wxgui/components/errorpanel.py | 2 | 2093 | # Copyright (C) 2009, 2010, 2011, 2012, 2013, 2014, 2015 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import wx
from timelinelib.utils import ex_msg
from timelinelib.wxgui.components.hyperlinkbutton import HyperlinkButton
class ErrorPanel(wx.Panel):
def __init__(self, parent, main_frame):
wx.Panel.__init__(self, parent)
self.main_frame = main_frame
self._create_gui()
def populate(self, error):
self.txt_error.SetLabel(ex_msg(error))
def _create_gui(self):
vsizer = wx.BoxSizer(wx.VERTICAL)
# Error text
self.txt_error = wx.StaticText(self, label="")
vsizer.Add(self.txt_error, flag=wx.ALIGN_CENTER_HORIZONTAL)
# Spacer
vsizer.AddSpacer(20)
# Help text
txt_help = wx.StaticText(self, label=_("Relevant help topics:"))
vsizer.Add(txt_help, flag=wx.ALIGN_CENTER_HORIZONTAL)
# Button
btn_contact = HyperlinkButton(self, _("Contact"))
self.Bind(wx.EVT_HYPERLINK, self._btn_contact_on_click, btn_contact)
vsizer.Add(btn_contact, flag=wx.ALIGN_CENTER_HORIZONTAL)
# Sizer
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(vsizer, flag=wx.ALIGN_CENTER_HORIZONTAL | wx.ALIGN_CENTER_VERTICAL, proportion=1)
self.SetSizer(hsizer)
def _btn_contact_on_click(self, e):
self.main_frame.help_browser.show_page("contact")
def activated(self):
pass
| gpl-3.0 |
noroutine/ansible | lib/ansible/modules/network/aos/aos_logical_device_map.py | 15 | 8922 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <community@apstra.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_logical_device_map
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Logical Device Map
description:
- Apstra AOS Logical Device Map module let you manage your Logical Device Map easily. You can create
create and delete Logical Device Map by Name, ID or by using a JSON File. This module
is idempotent and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Logical Device Map to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Logical Device Map to manage (can't be used to create a new Logical Device Map),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Logical Device Map to manage. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value). Only one of I(name), I(id) or I(content) can be set.
state:
description:
- Indicate what is the expected state of the Logical Device Map (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Create an Logical Device Map with one subnet"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: present
- name: "Create an Logical Device Map with multiple subnets"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-other-logical-device-map"
state: present
- name: "Check if an Logical Device Map exist with same subnets by ID"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: present
- name: "Delete an Logical Device Map by name"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: absent
- name: "Delete an Logical Device Map by id"
aos_logical_device_map:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save an Logical Device Map to a file
- name: "Access Logical Device Map 1/3"
aos_logical_device_map:
session: "{{ aos_session }}"
name: "my-logical-device-map"
state: present
register: logical_device_map
- name: "Save Logical Device Map into a file in JSON 2/3"
copy:
content: "{{ logical_device_map.value | to_nice_json }}"
dest: logical_device_map_saved.json
- name: "Save Logical Device Map into a file in YAML 3/3"
copy:
content: "{{ logical_device_map.value | to_nice_yaml }}"
dest: logical_device_map_saved.yaml
- name: "Load Logical Device Map from a JSON file"
aos_logical_device_map:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_map_saved.json') }}"
state: present
- name: "Load Logical Device Map from a YAML file"
aos_logical_device_map:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/logical_device_map_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Logical Device Map
returned: always
type: str
sample: Server-IpAddrs
id:
description: AOS unique ID assigned to the Logical Device Map
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def logical_device_map_absent(module, aos, my_log_dev_map):
margs = module.params
# If the module do not exist, return directly
if my_log_dev_map.exists is False:
module.exit_json(changed=False, name=margs['name'], id='', value={})
# If not in check mode, delete Logical Device Map
if not module.check_mode:
try:
# Need to wait for 1sec before a delete to workaround a current
# limitation in AOS
time.sleep(1)
my_log_dev_map.delete()
except:
module.fail_json(msg="An error occurred, while trying to delete the Logical Device Map")
module.exit_json(changed=True,
name=my_log_dev_map.name,
id=my_log_dev_map.id,
value={})
def logical_device_map_present(module, aos, my_log_dev_map):
margs = module.params
# if content is defined, create object from Content
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.LogicalDeviceMaps, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if my_log_dev_map doesn't exist already, create a new one
if my_log_dev_map.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'Content' is mandatory for module that don't exist currently")
module.exit_json(changed=False,
name=my_log_dev_map.name,
id=my_log_dev_map.id,
value=my_log_dev_map.value)
#########################################################
# Main Function
#########################################################
def logical_device_map(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'])
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
# ----------------------------------------------------
# Find Object if available based on ID or Name
# ----------------------------------------------------
try:
my_log_dev_map = find_collection_item(aos.LogicalDeviceMaps,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the Logical Device Map based on name or ID, something went wrong")
# ----------------------------------------------------
# Proceed based on State value
# ----------------------------------------------------
if margs['state'] == 'absent':
logical_device_map_absent(module, aos, my_log_dev_map)
elif margs['state'] == 'present':
logical_device_map_present(module, aos, my_log_dev_map)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False),
content=dict(required=False, type="json"),
state=dict(required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive=[('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
logical_device_map(module)
if __name__ == "__main__":
main()
| gpl-3.0 |
ttm/aa01 | aa.py | 2 | 1628 | #! /usr/bin/env python
#-*- coding: utf8 -*-
# use com:
# sudo cp aa.py /usr/local/bin/aa
# aí na linha de comando:
# $aa fazendo x pq y
# Configuration:
NICK="anonymous"
import sys, string, urllib2
if len(sys.argv)==1:
print("usage: aa this is a aa shout, for registering ongoing work")
else:
shout=string.join(sys.argv[1:]," ")
mirrors="http://aaserver.herokuapp.com/", "http://aa.daniloshiga.com/"
aastring="shout?nick=%s&shout=%s"%(NICK,urllib2.quote(shout))
n_mirrors = len(mirrors)
for i in range(n_mirrors):
url = mirrors[i] + aastring
# print 'trying ' + url
req = urllib2.Request(url)
try:
r = urllib2.urlopen(req)
except urllib2.URLError as e:
print("Warning: aa shout could not log at " + url + " (URLError)")
print("Cannot reach this server, reason: ")
print(e.reason)
except urllib2.HTTPError as e:
print("Warning: aa shout could not log at " + url + " (HTTPError)")
print("Server returned error: ")
print e.code()
print e.read()
else:
print("shout logged")
break
# Debug
# r.read()
# if r.find("Shout succeeded") || r.find("Hello World!")
# print("shout logged")
# else
# print("ERROR: aa shout could not log at " + url)
# print("Server request " + url + " returned:")
# print r.read()
# break
if (i + 1 < n_mirrors):
print("trying another mirror")
| artistic-2.0 |
waythe/closure-low-poly-background | closure-library/closure/bin/build/depstree.py | 455 | 6375 | # Copyright 2009 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent a full Closure Library dependency tree.
Offers a queryable tree of dependencies of a given set of sources. The tree
will also do logical validation to prevent duplicate provides and circular
dependencies.
"""
__author__ = 'nnaze@google.com (Nathan Naze)'
class DepsTree(object):
"""Represents the set of dependencies between source files."""
def __init__(self, sources):
"""Initializes the tree with a set of sources.
Args:
sources: A set of JavaScript sources.
Raises:
MultipleProvideError: A namespace is provided by muplitple sources.
NamespaceNotFoundError: A namespace is required but never provided.
"""
self._sources = sources
self._provides_map = dict()
# Ensure nothing was provided twice.
for source in sources:
for provide in source.provides:
if provide in self._provides_map:
raise MultipleProvideError(
provide, [self._provides_map[provide], source])
self._provides_map[provide] = source
# Check that all required namespaces are provided.
for source in sources:
for require in source.requires:
if require not in self._provides_map:
raise NamespaceNotFoundError(require, source)
def GetDependencies(self, required_namespaces):
"""Get source dependencies, in order, for the given namespaces.
Args:
required_namespaces: A string (for one) or list (for one or more) of
namespaces.
Returns:
A list of source objects that provide those namespaces and all
requirements, in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
if isinstance(required_namespaces, str):
required_namespaces = [required_namespaces]
deps_sources = []
for namespace in required_namespaces:
for source in DepsTree._ResolveDependencies(
namespace, [], self._provides_map, []):
if source not in deps_sources:
deps_sources.append(source)
return deps_sources
@staticmethod
def _ResolveDependencies(required_namespace, deps_list, provides_map,
traversal_path):
"""Resolve dependencies for Closure source files.
Follows the dependency tree down and builds a list of sources in dependency
order. This function will recursively call itself to fill all dependencies
below the requested namespaces, and then append its sources at the end of
the list.
Args:
required_namespace: String of required namespace.
deps_list: List of sources in dependency order. This function will append
the required source once all of its dependencies are satisfied.
provides_map: Map from namespace to source that provides it.
traversal_path: List of namespaces of our path from the root down the
dependency/recursion tree. Used to identify cyclical dependencies.
This is a list used as a stack -- when the function is entered, the
current namespace is pushed and popped right before returning.
Each recursive call will check that the current namespace does not
appear in the list, throwing a CircularDependencyError if it does.
Returns:
The given deps_list object filled with sources in dependency order.
Raises:
NamespaceNotFoundError: A namespace is requested but doesn't exist.
CircularDependencyError: A cycle is detected in the dependency tree.
"""
source = provides_map.get(required_namespace)
if not source:
raise NamespaceNotFoundError(required_namespace)
if required_namespace in traversal_path:
traversal_path.append(required_namespace) # do this *after* the test
# This must be a cycle.
raise CircularDependencyError(traversal_path)
# If we don't have the source yet, we'll have to visit this namespace and
# add the required dependencies to deps_list.
if source not in deps_list:
traversal_path.append(required_namespace)
for require in source.requires:
# Append all other dependencies before we append our own.
DepsTree._ResolveDependencies(require, deps_list, provides_map,
traversal_path)
deps_list.append(source)
traversal_path.pop()
return deps_list
class BaseDepsTreeError(Exception):
"""Base DepsTree error."""
def __init__(self):
Exception.__init__(self)
class CircularDependencyError(BaseDepsTreeError):
"""Raised when a dependency cycle is encountered."""
def __init__(self, dependency_list):
BaseDepsTreeError.__init__(self)
self._dependency_list = dependency_list
def __str__(self):
return ('Encountered circular dependency:\n%s\n' %
'\n'.join(self._dependency_list))
class MultipleProvideError(BaseDepsTreeError):
"""Raised when a namespace is provided more than once."""
def __init__(self, namespace, sources):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._sources = sources
def __str__(self):
source_strs = map(str, self._sources)
return ('Namespace "%s" provided more than once in sources:\n%s\n' %
(self._namespace, '\n'.join(source_strs)))
class NamespaceNotFoundError(BaseDepsTreeError):
"""Raised when a namespace is requested but not provided."""
def __init__(self, namespace, source=None):
BaseDepsTreeError.__init__(self)
self._namespace = namespace
self._source = source
def __str__(self):
msg = 'Namespace "%s" never provided.' % self._namespace
if self._source:
msg += ' Required in %s' % self._source
return msg
| apache-2.0 |
thumbimigwe/echorizr | lib/python2.7/site-packages/django/core/cache/backends/base.py | 28 | 9775 | "Base Cache class."
from __future__ import unicode_literals
import time
import warnings
from django.core.exceptions import DjangoRuntimeWarning, ImproperlyConfigured
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Returns the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). A different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def get_or_set(self, key, default=None, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
the key is added and set to the default value. The default value can
also be any callable. If timeout is given, that timeout will be used
for the key; otherwise the default cache timeout will be used.
Return the value of the key stored or retrieved.
"""
if default is None:
raise ValueError('You need to specify a value.')
val = self.get(key, version=version)
if val is None:
if callable(default):
default = default()
self.add(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another caller
# added a value between the first get() and the add() above.
return self.get(key, default, version=version)
return val
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Delete a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""Subtracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
| mit |
1245816264/fresco | run_comparison.py | 40 | 9509 | #!/usr/bin/env python
# This file provided by Facebook is for non-commercial testing and evaluation
# purposes only. Facebook reserves all rights not expressly granted.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# FACEBOOK BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
This script runs a comparative test with the sample app.
It builds and runs the sample app, switching from one library to the next,
taking measurements as it goes.
To select a subset of the libraries, use the -s option with a
space-separated list.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import glob
import os
import re
import tempfile
from collections import namedtuple
from subprocess import check_call, PIPE, Popen
""" List of tested libraries """
TESTS = (
'fresco',
'fresco-okhttp',
'glide',
'picasso',
'uil',
'volley',
'drawee-volley'
)
TEST_SOURCES = (
'network',
'local'
)
ABIS = (
'arm64-v8a',
'armeabi',
'armeabi-v7a',
'x86',
'x86_64'
)
""" Appends test class name to method name """
TEST_PATTERN = 'test{}{}'
""" Named tuple containing relevant numbers reported by a test """
Stats = namedtuple('Stats', [
'success_wait_times',
'failure_wait_times',
'cancellation_wait_times',
'java_heap_sizes',
'native_heap_sizes',
'skipped_frames'])
def parse_args():
parser = argparse.ArgumentParser(
description='Runs comparison test and processes results')
parser.add_argument('-s', '--scenarios', choices=TESTS, nargs='+')
parser.add_argument('-d', '--sources', choices=TEST_SOURCES, nargs='+')
parser.add_argument('-c', '--cpu', choices=ABIS, required=True)
return parser.parse_args()
def start_subprocess(command, **kwargs):
""" Starts subprocess after printing command to stdout. """
return Popen(command.split(), **kwargs)
def run_command(command):
""" Runs given command and waits for it to terminate.
Prints the command to stdout and redirects its output to /dev/null. """
with open('/dev/null', 'w') as devnull:
check_call(command.split(), stdout=devnull, stderr=devnull)
def gradle(*tasks):
""" Runs given gradle tasks """
if tasks:
run_command('./gradlew {}'.format(" ".join(tasks)))
def adb(command):
""" Runs adb command - arguments are given as single string"""
run_command('adb {}'.format(command))
def install_apks(abi):
""" Installs comparison app and test apks """
print("Installing comparison app...")
gradle(':samples:comparison:assembleDebug',
':samples:comparison:assembleDebugAndroidTest')
adb('uninstall com.facebook.samples.comparison')
adb('uninstall com.facebook.samples.comparison.test')
cmd = ('install -r samples/comparison/build/outputs/apk/comparison-'
'{}-debug.apk'.format(abi))
adb(cmd)
adb('install -r samples/comparison/build/outputs/apk/'
'comparison-debug-androidTest-unaligned.apk')
class ComparisonTest:
""" Comparison test case """
def __init__(
self,
method_name,
class_name='com.facebook.samples.comparison.test.ScrollTest',
test_package='com.facebook.samples.comparison.test',
test_runner='android.test.InstrumentationTestRunner'):
self.method_name = method_name
self.class_name = class_name
self.test_package = test_package
self.test_runner = test_runner
def __call__(self):
""" Executes test case and captures logcat output """
adb('logcat -c')
with tempfile.TemporaryFile() as logcat_file:
logcat_reader = start_subprocess(
'adb logcat',
stdout=logcat_file)
adb('shell am instrument -w -e class {}#{} {}/{}'.format(
self.class_name,
self.method_name,
self.test_package,
self.test_runner))
logcat_reader.terminate()
logcat_reader.wait()
logcat_file.seek(0)
self.logcat = logcat_file.readlines()
def get_float_from_logs(regex, logs):
pattern = re.compile(regex)
return [float(match.group(1)) for match in map(pattern.search, logs) if match]
def get_int_from_logs(regex, logs):
pattern = re.compile(regex)
return [int(match.group(1)) for match in map(pattern.search, logs) if match]
def get_stats(logs):
pattern = re.compile("""]: loaded after (\d+) ms""")
success_wait_times = [
int(match.group(1)) for match in map(pattern.search, logs) if match]
pattern = re.compile("""]: failed after (\d+) ms""")
failure_wait_times = [
int(match.group(1)) for match in map(pattern.search, logs) if match]
pattern = re.compile("""]: cancelled after (\d+) ms""")
cancellation_wait_times = [
int(match.group(1)) for match in map(pattern.search, logs) if match]
pattern = re.compile("""\s+(\d+.\d+) MB Java""")
java_heap_sizes = [
float(match.group(1)) for match in map(pattern.search, logs) if match]
pattern = re.compile("""\s+(\d+.\d+) MB native""")
native_heap_sizes = [
float(match.group(1)) for match in map(pattern.search, logs) if match]
pattern = re.compile("""Skipped (\d+) frames! The application may be""")
skipped_frames = [
int(match.group(1)) for match in map(pattern.search, logs) if match]
return Stats(
success_wait_times,
failure_wait_times,
cancellation_wait_times,
java_heap_sizes,
native_heap_sizes,
skipped_frames)
def print_stats(stats):
successes = len(stats.success_wait_times)
cancellations = len(stats.cancellation_wait_times)
failures = len(stats.failure_wait_times)
total_count = successes + cancellations + failures
total_wait_time = (
sum(stats.success_wait_times) +
sum(stats.cancellation_wait_times) +
sum(stats.failure_wait_times))
avg_wait_time = float(total_wait_time) / total_count
max_java_heap = max(stats.java_heap_sizes)
max_native_heap = max(stats.native_heap_sizes)
total_skipped_frames = sum(stats.skipped_frames)
print("Average wait time = {0:.1f}".format(avg_wait_time))
print("Successful requests = {}".format(successes))
print("Failures = {}".format(failures))
print("Cancellations = {}".format(cancellations))
print("Max java heap = {0:.1f}".format(max_java_heap))
print("Max native heap = {0:.1f}".format(max_native_heap))
print("Total skipped frames = {}".format(total_skipped_frames))
def get_test_name(option_name, source_name):
return TEST_PATTERN.format(
''.join(word.capitalize() for word in option_name.split('-')), source_name.capitalize())
def valid_scenario(scenario_name, source_name):
return source_name != 'local' or (scenario_name != 'volley' and scenario_name != 'drawee-volley')
def list_producers():
sdir = os.path.dirname(os.path.abspath(__file__))
producer_path = '%s/imagepipeline/src/main/java/com/facebook/imagepipeline/producers/*Producer.java' % sdir
files = glob.glob(producer_path)
return [f.split('.')[0].split('/')[-1] for f in files]
def print_fresco_perf_line(margin, name, times):
length = len(times)
if length == 0:
return
print("%s: %d requests, avg %d" % (name.rjust(margin), length, float(sum(times)) / length))
def print_fresco_perf(logs):
producers = list_producers()
margin = max([len(p) for p in producers])
requests = get_int_from_logs(""".*RequestLoggingListener.*onRequestSuccess.*elapsedTime:\s(\d+).*""", logs)
print_fresco_perf_line(margin, 'Total', requests)
for producer in producers:
queue = get_int_from_logs(".*onProducerFinishWithSuccess.*producer:\s%s.*queueTime=(\d+).*" % producer, logs)
print_fresco_perf_line(margin, '%s queue' % producer, queue)
times = get_int_from_logs(".*onProducerFinishWithSuccess.*producer:\s%s.*elapsedTime:\s(\d+).*" % producer, logs)
print_fresco_perf_line(margin, producer, times)
def main():
args = parse_args()
scenarios = []
sources = []
if args.scenarios:
scenarios = args.scenarios
else:
scenarios = TESTS
if args.sources:
sources = args.sources
else:
sources = TEST_SOURCES
install_apks(args.cpu)
for scenario_name in scenarios:
for source_name in sources:
if valid_scenario(scenario_name, source_name):
print()
print('Testing {} {}'.format(scenario_name, source_name))
print(get_test_name(scenario_name, source_name))
test = ComparisonTest(get_test_name(scenario_name, source_name))
test()
stats = get_stats(test.logcat)
print_stats(stats)
if scenario_name[:6] == 'fresco':
print()
print_fresco_perf(test.logcat)
if __name__ == "__main__":
main()
| bsd-3-clause |
naousse/odoo | addons/account_analytic_plans/report/crossovered_analytic.py | 321 | 8149 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class crossovered_analytic(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(crossovered_analytic, self).__init__(cr, uid, name, context = context)
self.localcontext.update( {
'time': time,
'lines': self._lines,
'ref_lines': self._ref_lines,
'find_children': self.find_children,
})
self.base_amount = 0.00
def find_children(self, ref_ids):
if not ref_ids: return []
to_return_ids = []
final_list = []
parent_list = []
set_list = []
analytic_obj = self.pool.get('account.analytic.account')
for id in ref_ids:
# to avoid duplicate entries
if id not in to_return_ids:
to_return_ids.append(analytic_obj.search(self.cr,self.uid,[('parent_id','child_of',[id])]))
data_accnt = analytic_obj.browse(self.cr,self.uid,to_return_ids[0])
for data in data_accnt:
if data.parent_id and data.parent_id.id == ref_ids[0]:
parent_list.append(data.id)
final_list.append(ref_ids[0])
set_list = self.set_account(parent_list)
final_list.extend(set_list)
return final_list #to_return_ids[0]
def set_account(self, cats):
lst = []
category = self.pool.get('account.analytic.account').read(self.cr, self.uid, cats)
for cat in category:
lst.append(cat['id'])
if cat['child_ids']:
lst.extend(self.set_account(cat['child_ids']))
return lst
def _ref_lines(self, form):
result = []
res = {}
acc_pool = self.pool.get('account.analytic.account')
line_pool = self.pool.get('account.analytic.line')
self.dict_acc_ref = {}
if form['journal_ids']:
journal = " in (" + ','.join(map(lambda x: str(x), form['journal_ids'])) + ")"
else:
journal = 'is not null'
query_general = "SELECT id FROM account_analytic_line WHERE (journal_id " + journal +") AND date>='"+ str(form['date1']) +"'"" AND date<='" + str(form['date2']) + "'"
self.cr.execute(query_general)
l_ids = self.cr.fetchall()
line_ids = [x[0] for x in l_ids]
obj_line = line_pool.browse(self.cr,self.uid,line_ids)
#this structure will be usefull for easily knowing the account_analytic_line that are related to the reference account. At this purpose, we save the move_id of analytic lines.
self.dict_acc_ref[form['ref']] = []
children_list = acc_pool.search(self.cr, self.uid, [('parent_id', 'child_of', [form['ref']])])
for obj in obj_line:
if obj.account_id.id in children_list:
if obj.move_id and obj.move_id.id not in self.dict_acc_ref[form['ref']]:
self.dict_acc_ref[form['ref']].append(obj.move_id.id)
res['ref_name'] = acc_pool.name_get(self.cr, self.uid, [form['ref']])[0][1]
res['ref_code'] = acc_pool.browse(self.cr, self.uid, form['ref']).code
self.final_list = children_list
selected_ids = line_pool.search(self.cr, self.uid, [('account_id', 'in' ,self.final_list)])
res['ref_qty'] = 0.0
res['ref_amt'] = 0.0
self.base_amount = 0.0
if selected_ids:
query = "SELECT SUM(aal.amount) AS amt, SUM(aal.unit_amount) AS qty FROM account_analytic_line AS aal, account_analytic_account AS aaa \
WHERE aal.account_id = aaa.id AND aal.id IN ("+','.join(map(str,selected_ids))+") AND (aal.journal_id " + journal +") AND aal.date>='"+ str(form['date1']) +"'"" AND aal.date<='" + str(form['date2']) + "'"
self.cr.execute(query)
info=self.cr.dictfetchall()
res['ref_qty'] = info[0]['qty']
res['ref_amt'] = info[0]['amt']
self.base_amount = info[0]['amt']
result.append(res)
return result
def _lines(self, form, ids=None):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if form['journal_ids']:
journal=" in (" + ','.join(map(lambda x: str(x), form['journal_ids'])) + ")"
else:
journal= 'is not null'
acc_pool = self.pool.get('account.analytic.account')
line_pool = self.pool.get('account.analytic.line')
acc_id = []
final = []
self.list_ids = []
self.final_list = self.find_children(ids)
for acc_id in self.final_list:
selected_ids = line_pool.search(self.cr, self.uid, [('account_id','=',acc_id), ('move_id', 'in', self.dict_acc_ref[form['ref']])])
if selected_ids:
query="SELECT aaa.code AS code, SUM(aal.amount) AS amt, SUM(aal.unit_amount) AS qty, aaa.name AS acc_name, aal.account_id AS id FROM account_analytic_line AS aal, account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.id IN ("+','.join(map(str,selected_ids))+") AND (aal.journal_id " + journal +") AND aal.date>='"+ str(form['date1']) +"'"" AND aal.date<='" + str(form['date2']) + "'"" GROUP BY aal.account_id,aaa.name,aaa.code ORDER BY aal.account_id"
self.cr.execute(query)
res = self.cr.dictfetchall()
if res:
for element in res:
if self.base_amount <> 0.00:
element['perc'] = (element['amt'] / self.base_amount) * 100.00
else:
element['perc'] = 0.00
else:
result = {}
res = []
result['id'] = acc_id
data_account = acc_pool.browse(self.cr, self.uid, acc_id)
result['acc_name'] = data_account.name
result['code'] = data_account.code
result['amt'] = result['qty'] = result['perc'] = 0.00
if not form['empty_line']:
res.append(result)
else:
result = {}
res = []
result['id'] = acc_id
data_account = acc_pool.browse(self.cr, self.uid, acc_id)
result['acc_name'] = data_account.name
result['code'] = data_account.code
result['amt'] = result['qty'] = result['perc'] = 0.00
if not form['empty_line']:
res.append(result)
for item in res:
obj_acc = acc_pool.name_get(self.cr,self.uid,[item['id']])
item['acc_name'] = obj_acc[0][1]
final.append(item)
return final
class report_crossoveredanalyticplans(osv.AbstractModel):
_name = 'report.account_analytic_plans.report_crossoveredanalyticplans'
_inherit = 'report.abstract_report'
_template = 'account_analytic_plans.report_crossoveredanalyticplans'
_wrapped_report_class = crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
brandond/ansible | lib/ansible/modules/network/netvisor/pn_vrouter_interface_ip.py | 9 | 7215 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_vrouter_interface_ip
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to add/remove vrouter-interface-ip
description:
- This module can be used to add an IP address on interface from a vRouter
or remove an IP address on interface from a vRouter.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: false
type: str
state:
description:
- State the action to perform. Use C(present) to addvrouter-interface-ip
and C(absent) to remove vrouter-interface-ip.
required: true
type: str
choices: ['present', 'absent']
pn_bd:
description:
- interface Bridge Domain.
required: false
type: str
pn_netmask:
description:
- netmask.
required: false
type: str
pn_vnet:
description:
- interface VLAN VNET.
required: false
type: str
pn_ip:
description:
- IP address.
required: false
type: str
pn_nic:
description:
- virtual NIC assigned to interface.
required: false
type: str
pn_vrouter_name:
description:
- name of service config.
required: false
type: str
"""
EXAMPLES = """
- name: Add vrouter interface to nic
pn_vrouter_interface_ip:
state: "present"
pn_cliswitch: "sw01"
pn_vrouter_name: "foo-vrouter"
pn_ip: "2620:0:1651:1::30"
pn_netmask: "127"
pn_nic: "eth0.4092"
- name: Remove vrouter interface to nic
pn_vrouter_interface_ip:
state: "absent"
pn_cliswitch: "sw01"
pn_vrouter_name: "foo-vrouter"
pn_ip: "2620:0:1651:1::30"
pn_nic: "eth0.4092"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the vrouter-interface-ip command.
returned: always
type: list
stderr:
description: set of error responses from the vrouter-interface-ip command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
def check_cli(module, cli):
"""
This method checks if vRouter exists on the target node.
This method also checks for idempotency using the vrouter-interface-show
command.
If the given vRouter exists, return VROUTER_EXISTS as True else False.
If an interface with the given ip exists on the given vRouter,
return INTERFACE_EXISTS as True else False. This is required for
vrouter-interface-add.
If nic_str exists on the given vRouter, return NIC_EXISTS as True else
False. This is required for vrouter-interface-remove.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
:return Booleans: VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
"""
vrouter_name = module.params['pn_vrouter_name']
interface_ip = module.params['pn_ip']
nic_str = module.params['pn_nic']
# Check for vRouter
check_vrouter = cli + ' vrouter-show format name no-show-headers '
out = module.run_command(check_vrouter, use_unsafe_shell=True)[1]
out = out.split()
VROUTER_EXISTS = True if vrouter_name in out else False
if interface_ip:
# Check for interface and VRRP and fetch nic for VRRP
show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
show += 'ip2 %s format ip2,nic no-show-headers' % interface_ip
out = module.run_command(show, use_unsafe_shell=True)[1]
if out and interface_ip in out.split(' ')[1]:
INTERFACE_EXISTS = True
else:
INTERFACE_EXISTS = False
if nic_str:
# Check for nic
show = cli + ' vrouter-interface-show vrouter-name %s ' % vrouter_name
show += ' format nic no-show-headers'
out = module.run_command(show, use_unsafe_shell=True)[1]
NIC_EXISTS = True if nic_str in out else False
return VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS
def main():
""" This section is for arguments parsing """
state_map = dict(
present='vrouter-interface-ip-add',
absent='vrouter-interface-ip-remove'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_bd=dict(required=False, type='str'),
pn_netmask=dict(required=False, type='str'),
pn_vnet=dict(required=False, type='str'),
pn_ip=dict(required=False, type='str'),
pn_nic=dict(required=False, type='str'),
pn_vrouter_name=dict(required=False, type='str'),
),
required_if=(
["state", "present", ["pn_vrouter_name", "pn_nic", "pn_ip", "pn_netmask"]],
["state", "absent", ["pn_vrouter_name", "pn_nic", "pn_ip"]]
),
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
bd = module.params['pn_bd']
netmask = module.params['pn_netmask']
vnet = module.params['pn_vnet']
ip = module.params['pn_ip']
nic = module.params['pn_nic']
vrouter_name = module.params['pn_vrouter_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
VROUTER_EXISTS, INTERFACE_EXISTS, NIC_EXISTS = check_cli(module, cli)
if VROUTER_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter %s does not exist' % vrouter_name
)
if NIC_EXISTS is False:
module.fail_json(
failed=True,
msg='vRouter with nic %s does not exist' % nic
)
cli += ' %s vrouter-name %s ' % (command, vrouter_name)
if command == 'vrouter-interface-ip-add':
if INTERFACE_EXISTS is True:
module.exit_json(
skipped=True,
msg='vRouter with interface %s exist' % ip
)
cli += ' nic %s ip %s ' % (nic, ip)
if bd:
cli += ' bd ' + bd
if netmask:
cli += ' netmask ' + netmask
if vnet:
cli += ' vnet ' + vnet
if command == 'vrouter-interface-ip-remove':
if INTERFACE_EXISTS is False:
module.exit_json(
skipped=True,
msg='vRouter with interface %s does not exist' % ip
)
if nic:
cli += ' nic %s ' % nic
if ip:
cli += ' ip %s ' % ip.split('/')[0]
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
landier/imdb-crawler | crawler/libs/sqlalchemy/orm/deprecated_interfaces.py | 1 | 21785 | # orm/deprecated_interfaces.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy import event, util
from .interfaces import EXT_CONTINUE
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
using the ``extension`` mapper() argument, which is a single
:class:`.MapperExtension` or a list of such::
from sqlalchemy.orm.interfaces import MapperExtension
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
print "instance %s before insert !" % instance
m = mapper(User, users_table, extension=MyExtension())
A single mapper can maintain a chain of ``MapperExtension``
objects. When a particular mapping event occurs, the
corresponding method on each ``MapperExtension`` is invoked
serially, and each method has the ability to halt the chain
from proceeding further::
m = mapper(User, users_table, extension=[ext1, ext2, ext3])
Each ``MapperExtension`` method returns the symbol
EXT_CONTINUE by default. This symbol generally means "move
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
of ``MapperExtension`` objects that the chain will be stopped
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
"""
@classmethod
def _adapt_instrument_class(cls, self, listener):
cls._adapt_listener_methods(self, listener, ('instrument_class',))
@classmethod
def _adapt_listener(cls, self, listener):
cls._adapt_listener_methods(
self, listener,
(
'init_instance',
'init_failed',
'translate_row',
'create_instance',
'append_result',
'populate_instance',
'reconstruct_instance',
'before_insert',
'after_insert',
'before_update',
'after_update',
'before_delete',
'after_delete'
))
@classmethod
def _adapt_listener_methods(cls, self, listener, methods):
for meth in methods:
me_meth = getattr(MapperExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
if meth == 'reconstruct_instance':
def go(ls_meth):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def translate_row(self, mapper, context, row):
"""Perform pre-processing on the given result row and return a
new row instance.
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
If the ultimate return value is EXT_CONTINUE, the row
is not translated.
"""
return EXT_CONTINUE
def create_instance(self, mapper, selectcontext, row, class_):
"""Receive a row when a new object instance is about to be
created from that row.
The method can choose to create the instance itself, or it can return
EXT_CONTINUE to indicate normal object creation should take place.
mapper
The mapper doing the operation
selectcontext
The QueryContext generated from the Query.
row
The result row from the database
class\_
The class we are mapping.
return value
A new object instance, or EXT_CONTINUE
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
If this method returns EXT_CONTINUE, result appending will proceed
normally. if this method returns any other value or None,
result appending will not proceed for this instance, giving
this extension an opportunity to do the appending itself, if
desired.
mapper
The mapper doing the operation.
selectcontext
The QueryContext generated from the Query.
row
The result row from the database.
instance
The object instance to be appended to the result.
result
List to which results are being appended.
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
:class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
This usually corresponds to a newly loaded instance but may
also correspond to an already-loaded instance which has
unloaded attributes to be populated. The method may be called
many times for a single instance, as multiple result rows are
used to populate eagerly loaded collections.
If this method returns EXT_CONTINUE, instance population will
proceed normally. If any other value or None is returned,
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
"""Receive an object instance after it has been created via
``__new__``, and after initial attribute population has
occurred.
This typically occurs when the instance is created based on
incoming result rows, and is only called once for that
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
"""Receive an object instance before that instance is inserted
into its table.
This is a good place to set up primary key values and such
that aren't handled otherwise.
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
"""Receive an object instance before that instance is updated.
Note that this method is called for all instances that are marked as
"dirty", even those which have no net changes to their column-based
attributes. An object is marked as dirty when any of its column-based
attributes have a "set attribute" operation called or when any of its
collections are modified. If, at update time, no column-based
attributes have any net changes, no UPDATE statement will be issued.
This means that an instance being sent to before_update is *not* a
guarantee that an UPDATE statement will be issued (although you can
affect the outcome here).
To detect if the column-based attributes on the object have net
changes, and will therefore generate an UPDATE statement, use
``object_session(instance).is_modified(instance,
include_collections=False)``.
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
"""Receive an object instance before that instance is deleted.
Note that *no* changes to the overall flush plan can be made
here; and manipulation of the ``Session`` will not have the
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
:func:`.sessionmaker`) using the ``extension`` keyword
argument::
from sqlalchemy.orm.interfaces import SessionExtension
class MySessionExtension(SessionExtension):
def before_commit(self, session):
print "before commit!"
Session = sessionmaker(extension=MySessionExtension())
The same :class:`.SessionExtension` instance can be used
with any number of sessions.
"""
@classmethod
def _adapt_listener(cls, self, listener):
for meth in [
'before_commit',
'after_commit',
'after_rollback',
'before_flush',
'after_flush',
'after_flush_postexec',
'after_begin',
'after_attach',
'after_bulk_update',
'after_bulk_delete',
]:
me_meth = getattr(SessionExtension, meth)
ls_meth = getattr(listener, meth)
if not util.methods_equivalent(me_meth, ls_meth):
event.listen(self, meth, getattr(listener, meth))
def before_commit(self, session):
"""Execute right before commit is called.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_commit(self, session):
"""Execute after a commit has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def after_rollback(self, session):
"""Execute after a rollback has occurred.
Note that this may not be per-flush if a longer running
transaction is ongoing."""
def before_flush( self, session, flush_context, instances):
"""Execute before flush process has started.
`instances` is an optional list of objects which were passed to
the ``flush()`` method. """
def after_flush(self, session, flush_context):
"""Execute after flush has completed, but before commit has been
called.
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes."""
def after_flush_postexec(self, session, flush_context):
"""Execute after flush has completed, and after the post-exec
state occurs.
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction. """
def after_begin( self, session, transaction, connection):
"""Execute after a transaction is begun on a connection
`transaction` is the SessionTransaction. This method is called
after an engine level transaction is begun on a connection. """
def after_attach(self, session, instance):
"""Execute after an instance is attached to a session.
This is called after an add, delete or merge. """
def after_bulk_update( self, session, query, query_context, result):
"""Execute after a bulk update operation to the session.
This is called after a session.query(...).update()
`query` is the query object that this update operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
def after_bulk_delete( self, session, query, query_context, result):
"""Execute after a bulk delete operation to the session.
This is called after a session.query(...).delete()
`query` is the query object that this delete operation was
called on. `query_context` was the query context object.
`result` is the result object returned from the bulk operation.
"""
class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
remove, and append events on individual mapped attributes.
It is established on an individual mapped attribute using
the `extension` argument, available on
:func:`.column_property`, :func:`.relationship`, and
others::
from sqlalchemy.orm.interfaces import AttributeExtension
from sqlalchemy.orm import mapper, relationship, column_property
class MyAttrExt(AttributeExtension):
def append(self, state, value, initiator):
print "append event !"
return value
def set(self, state, value, oldvalue, initiator):
print "set event !"
return value
mapper(SomeClass, sometable, properties={
'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
'bar':relationship(Bar, extension=MyAttrExt())
})
Note that the :class:`.AttributeExtension` methods
:meth:`~.AttributeExtension.append` and
:meth:`~.AttributeExtension.set` need to return the
``value`` parameter. The returned value is used as the
effective value, and allows the extension to change what is
ultimately persisted.
AttributeExtension is assembled within the descriptors associated
with a mapped class.
"""
active_history = True
"""indicates that the set() method would like to receive the 'old' value,
even if it means firing lazy callables.
Note that ``active_history`` can also be set directly via
:func:`.column_property` and :func:`.relationship`.
"""
@classmethod
def _adapt_listener(cls, self, listener):
event.listen(self, 'append', listener.append,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):
"""Receive a collection append event.
The returned value will be used as the actual value to be
appended.
"""
return value
def remove(self, state, value, initiator):
"""Receive a remove event.
No return value is defined.
"""
pass
def set(self, state, value, oldvalue, initiator):
"""Receive a set event.
The returned value will be used as the actual value to be
set.
"""
return value
| gpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/networkx/algorithms/connectivity/tests/test_kcomponents.py | 50 | 8395 | # Test for Moody and White k-components algorithm
from nose.tools import assert_equal, assert_true, raises
import networkx as nx
from networkx.algorithms.connectivity.kcomponents import (
build_k_number_dict,
_consolidate,
)
##
## A nice synthetic graph
##
def torrents_and_ferraro_graph():
# Graph from http://arxiv.org/pdf/1503.04476v1 p.26
G = nx.convert_node_labels_to_integers(
nx.grid_graph([5, 5]),
label_attribute='labels',
)
rlabels = nx.get_node_attributes(G, 'labels')
labels = {v: k for k, v in rlabels.items()}
for nodes in [(labels[(0,4)], labels[(1,4)]),
(labels[(3,4)], labels[(4,4)])]:
new_node = G.order() + 1
# Petersen graph is triconnected
P = nx.petersen_graph()
G = nx.disjoint_union(G, P)
# Add two edges between the grid and P
G.add_edge(new_node+1, nodes[0])
G.add_edge(new_node, nodes[1])
# K5 is 4-connected
K = nx.complete_graph(5)
G = nx.disjoint_union(G, K)
# Add three edges between P and K5
G.add_edge(new_node+2, new_node+11)
G.add_edge(new_node+3, new_node+12)
G.add_edge(new_node+4, new_node+13)
# Add another K5 sharing a node
G = nx.disjoint_union(G, K)
nbrs = G[new_node+10]
G.remove_node(new_node+10)
for nbr in nbrs:
G.add_edge(new_node+17, nbr)
# This edge makes the graph biconnected; it's
# needed because K5s share only one node.
G.add_edge(new_node+16, new_node+8)
for nodes in [(labels[(0, 0)], labels[(1, 0)]),
(labels[(3, 0)], labels[(4, 0)])]:
new_node = G.order() + 1
# Petersen graph is triconnected
P = nx.petersen_graph()
G = nx.disjoint_union(G, P)
# Add two edges between the grid and P
G.add_edge(new_node+1, nodes[0])
G.add_edge(new_node, nodes[1])
# K5 is 4-connected
K = nx.complete_graph(5)
G = nx.disjoint_union(G, K)
# Add three edges between P and K5
G.add_edge(new_node+2, new_node+11)
G.add_edge(new_node+3, new_node+12)
G.add_edge(new_node+4, new_node+13)
# Add another K5 sharing two nodes
G = nx.disjoint_union(G,K)
nbrs = G[new_node+10]
G.remove_node(new_node+10)
for nbr in nbrs:
G.add_edge(new_node+17, nbr)
nbrs2 = G[new_node+9]
G.remove_node(new_node+9)
for nbr in nbrs2:
G.add_edge(new_node+18, nbr)
G.name = 'Example graph for connectivity'
return G
@raises(nx.NetworkXNotImplemented)
def test_directed():
G = nx.gnp_random_graph(10, 0.2, directed=True)
nx.k_components(G)
# Helper function
def _check_connectivity(G):
result = nx.k_components(G)
for k, components in result.items():
if k < 3:
continue
for component in components:
C = G.subgraph(component)
assert_true(nx.node_connectivity(C) >= k)
def test_torrents_and_ferraro_graph():
G = torrents_and_ferraro_graph()
_check_connectivity(G)
def test_random_gnp():
G = nx.gnp_random_graph(50, 0.2)
_check_connectivity(G)
def test_shell():
constructor=[(20, 80, 0.8), (80, 180, 0.6)]
G = nx.random_shell_graph(constructor)
_check_connectivity(G)
def test_configuration():
deg_seq = nx.utils.create_degree_sequence(100, nx.utils.powerlaw_sequence)
G = nx.Graph(nx.configuration_model(deg_seq))
G.remove_edges_from(G.selfloop_edges())
_check_connectivity(G)
def test_karate():
G = nx.karate_club_graph()
_check_connectivity(G)
def test_karate_component_number():
karate_k_num = {
0: 4, 1: 4, 2: 4, 3: 4, 4: 3, 5: 3, 6: 3, 7: 4, 8: 4, 9: 2,
10: 3, 11: 1, 12: 2, 13: 4, 14: 2, 15: 2, 16: 2, 17: 2,
18: 2, 19: 3, 20: 2, 21: 2, 22: 2, 23: 3, 24: 3, 25: 3,
26: 2, 27: 3, 28: 3, 29: 3, 30: 4, 31: 3, 32: 4, 33: 4
}
G = nx.karate_club_graph()
k_components = nx.k_components(G)
k_num = build_k_number_dict(k_components)
assert_equal(karate_k_num, k_num)
def test_torrents_and_ferraro_detail_3_and_4():
solution = {
3: [{25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 42},
{44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 61},
{63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 79, 80},
{81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 93, 94, 95, 99, 100},
{39, 40, 41, 42, 43},
{58, 59, 60, 61, 62},
{76, 77, 78, 79, 80},
{96, 97, 98, 99, 100},
],
4: [{35, 36, 37, 38, 42},
{39, 40, 41, 42, 43},
{54, 55, 56, 57, 61},
{58, 59, 60, 61, 62},
{73, 74, 75, 79, 80},
{76, 77, 78, 79, 80},
{93, 94, 95, 99, 100},
{96, 97, 98, 99, 100},
],
}
G = torrents_and_ferraro_graph()
result = nx.k_components(G)
for k, components in result.items():
if k < 3:
continue
assert_true(len(components) == len(solution[k]))
for component in components:
assert_true(component in solution[k])
def test_davis_southern_women():
G = nx.davis_southern_women_graph()
_check_connectivity(G)
def test_davis_southern_women_detail_3_and_4():
solution = {
3: [{
'Nora Fayette',
'E10',
'Myra Liddel',
'E12',
'E14',
'Frances Anderson',
'Evelyn Jefferson',
'Ruth DeSand',
'Helen Lloyd',
'Eleanor Nye',
'E9',
'E8',
'E5',
'E4',
'E7',
'E6',
'E1',
'Verne Sanderson',
'E3',
'E2',
'Theresa Anderson',
'Pearl Oglethorpe',
'Katherina Rogers',
'Brenda Rogers',
'E13',
'Charlotte McDowd',
'Sylvia Avondale',
'Laura Mandeville',
},
],
4: [{
'Nora Fayette',
'E10',
'Verne Sanderson',
'E12',
'Frances Anderson',
'Evelyn Jefferson',
'Ruth DeSand',
'Helen Lloyd',
'Eleanor Nye',
'E9',
'E8',
'E5',
'E4',
'E7',
'E6',
'Myra Liddel',
'E3',
'Theresa Anderson',
'Katherina Rogers',
'Brenda Rogers',
'Charlotte McDowd',
'Sylvia Avondale',
'Laura Mandeville',
},
],
}
G = nx.davis_southern_women_graph()
result = nx.k_components(G)
for k, components in result.items():
if k < 3:
continue
assert_true(len(components) == len(solution[k]))
for component in components:
assert_true(component in solution[k])
def test_set_consolidation_rosettacode():
# Tests from http://rosettacode.org/wiki/Set_consolidation
def list_of_sets_equal(result, solution):
assert_equal(
{frozenset(s) for s in result},
{frozenset(s) for s in solution}
)
question = [{'A', 'B'}, {'C', 'D'}]
solution = [{'A', 'B'}, {'C', 'D'}]
list_of_sets_equal(_consolidate(question, 1), solution)
question = [{'A', 'B'}, {'B', 'C'}]
solution = [{'A', 'B', 'C'}]
list_of_sets_equal(_consolidate(question, 1), solution)
question = [{'A', 'B'}, {'C', 'D'}, {'D', 'B'}]
solution = [{'A', 'C', 'B', 'D'}]
list_of_sets_equal(_consolidate(question, 1), solution)
question = [{'H', 'I', 'K'}, {'A', 'B'}, {'C', 'D'}, {'D', 'B'}, {'F', 'G', 'H'}]
solution = [{'A', 'C', 'B', 'D'}, {'G', 'F', 'I', 'H', 'K'}]
list_of_sets_equal(_consolidate(question, 1), solution)
question = [{'A','H'}, {'H','I','K'}, {'A','B'}, {'C','D'}, {'D','B'}, {'F','G','H'}]
solution = [{'A', 'C', 'B', 'D', 'G', 'F', 'I', 'H', 'K'}]
list_of_sets_equal(_consolidate(question, 1), solution)
question = [{'H','I','K'}, {'A','B'}, {'C','D'}, {'D','B'}, {'F','G','H'}, {'A','H'}]
solution = [{'A', 'C', 'B', 'D', 'G', 'F', 'I', 'H', 'K'}]
list_of_sets_equal(_consolidate(question, 1), solution)
| gpl-3.0 |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/netdisco/discoverables/__init__.py | 1 | 5004 | """Provides helpful stuff for discoverables."""
# pylint: disable=abstract-method
import ipaddress
from urllib.parse import urlparse
from ..const import (
ATTR_NAME, ATTR_MODEL_NAME, ATTR_HOST, ATTR_PORT, ATTR_SSDP_DESCRIPTION,
ATTR_SERIAL, ATTR_MODEL_NUMBER, ATTR_HOSTNAME, ATTR_MAC_ADDRESS,
ATTR_PROPERTIES)
class BaseDiscoverable(object):
"""Base class for discoverable services or device types."""
def is_discovered(self):
"""Return True if it is discovered."""
return len(self.get_entries()) > 0
def get_info(self):
"""Return a list with the important info for each item.
Uses self.info_from_entry internally.
"""
return [self.info_from_entry(entry) for entry in self.get_entries()]
# pylint: disable=no-self-use
def info_from_entry(self, entry):
"""Return an object with important info from the entry."""
return entry
# pylint: disable=no-self-use
def get_entries(self):
"""Return all the discovered entries."""
raise NotImplementedError()
class SSDPDiscoverable(BaseDiscoverable):
"""uPnP discoverable base class."""
def __init__(self, netdis):
"""Initialize SSDPDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
url = urlparse(entry.location)
info = {
ATTR_HOST: url.hostname,
ATTR_PORT: url.port,
ATTR_SSDP_DESCRIPTION: entry.location
}
device = entry.description.get('device')
if device:
info[ATTR_NAME] = device.get('friendlyName')
info[ATTR_MODEL_NAME] = device.get('modelName')
info[ATTR_MODEL_NUMBER] = device.get('modelNumber')
info[ATTR_SERIAL] = device.get('serialNumber')
return info
# Helper functions
# pylint: disable=invalid-name
def find_by_st(self, st):
"""Find entries by ST (the device identifier)."""
return self.netdis.ssdp.find_by_st(st)
def find_by_device_description(self, values):
"""Find entries based on values from their description."""
return self.netdis.ssdp.find_by_device_description(values)
class MDNSDiscoverable(BaseDiscoverable):
"""mDNS Discoverable base class."""
def __init__(self, netdis, typ):
"""Initialize MDNSDiscoverable."""
self.netdis = netdis
self.typ = typ
self.services = {}
netdis.mdns.register_service(self)
def reset(self):
"""Reset found services."""
self.services.clear()
def is_discovered(self):
"""Return True if any device has been discovered."""
return len(self.get_entries()) > 0
# pylint: disable=unused-argument
def remove_service(self, zconf, typ, name):
"""Callback when a service is removed."""
self.services.pop(name, None)
def add_service(self, zconf, typ, name):
"""Callback when a service is found."""
service = None
tries = 0
while service is None and tries < 3:
service = zconf.get_service_info(typ, name)
tries += 1
if service is not None:
self.services[name] = service
def get_entries(self):
"""Return all found services."""
return self.services.values()
def info_from_entry(self, entry):
"""Return most important info from mDNS entries."""
properties = {}
for key, value in entry.properties.items():
if isinstance(value, bytes):
value = value.decode('utf-8')
properties[key.decode('utf-8')] = value
info = {
ATTR_HOST: str(ipaddress.ip_address(entry.address)),
ATTR_PORT: entry.port,
ATTR_HOSTNAME: entry.server,
ATTR_PROPERTIES: properties,
}
if "mac" in properties:
info[ATTR_MAC_ADDRESS] = properties["mac"]
return info
def find_by_device_name(self, name):
"""Find entries based on the beginning of their entry names."""
return [entry for entry in self.services.values()
if entry.name.startswith(name)]
class GDMDiscoverable(BaseDiscoverable):
"""GDM discoverable base class."""
def __init__(self, netdis):
"""Initialize GDMDiscoverable."""
self.netdis = netdis
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
return {
ATTR_HOST: entry.values['location'],
ATTR_PORT: entry.values['port'],
}
def find_by_content_type(self, value):
"""Find entries based on values from their content_type."""
return self.netdis.gdm.find_by_content_type(value)
def find_by_data(self, values):
"""Find entries based on values from any returned field."""
return self.netdis.gdm.find_by_data(values)
| gpl-2.0 |
nyterage/Galaxy_Tab_3_217s | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
gokulnatha/GT-I9500 | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
stoqs/stoqs | stoqs/loaders/CANON/dorado_loadsep2013.py | 3 | 2698 | #!/usr/bin/env python
__author__ = 'Mike McCann,Duane Edgington,Reiko Michisaki'
__copyright__ = '2013'
__license__ = 'GPL v3'
__contact__ = 'duane at mbari.org'
__doc__ = '''
Dorado loader for all CANON activities in September 2013
Mike McCann; Modified by Duane Edgington and Reiko Michisaki
MBARI 02 September 2013
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
import datetime # needed for glider data
import time # for startdate, enddate args
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE']='config.settings.local'
project_dir = os.path.dirname(__file__)
# the next line makes it possible to find CANON
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../")) # this makes it possible to find CANON, one directory up
from CANON import CANONLoader
# building input data sources object
from socket import gethostname
hostname=gethostname()
print(hostname)
if hostname=='odss-test.shore.mbari.org':
cl = CANONLoader('stoqs_september2011', 'CANON - September 2011')
else:
cl = CANONLoader('stoqs_september2013', 'CANON - September 2013')
# default location of thredds and dods data:
cl.tdsBase = 'http://odss.mbari.org/thredds/'
cl.dodsBase = cl.tdsBase + 'dodsC/'
#####################################################################
# DORADO
#####################################################################
# special location for dorado data
cl.dorado_base = 'http://dods.mbari.org/opendap/data/auvctd/surveys/2013/netcdf/'
cl.dorado_files = [# 'Dorado389_2013_259_00_259_00_decim.nc', #Sep 16 Loaded
# 'Dorado389_2013_261_01_261_01_decim.nc',
# 'Dorado389_2013_262_00_262_00_decim.nc', #Sep 19 Dorado389_2013_262_00_262_00
# 'Dorado389_2013_262_01_262_01_decim.nc',
# 'Dorado389_2013_268_00_268_00_decim.nc',
# 'Dorado389_2013_273_00_273_00_decim.nc', #Sep 30
# 'Dorado389_2013_274_00_274_00_decim.nc',
# 'Dorado389_2013_274_01_274_01_decim.nc',
# 'Dorado389_2013_275_00_275_00_decim.nc',
# 'Dorado389_2013_275_01_275_01_decim.nc',
'Dorado389_2013_276_00_276_00_decim.nc',
]
###################################################################################################################
# Execute the load
cl.process_command_line()
if cl.args.test:
cl.loadDorado(stride=1)
elif cl.args.optimal_stride:
cl.loadDorado(stride=2)
else:
cl.loadDorado(stride=cl.args.stride)
| gpl-3.0 |
dakcarto/QGIS | python/plugins/processing/algs/lidar/lastools/shp2las.py | 12 | 3019 | # -*- coding: utf-8 -*-
"""
***************************************************************************
shp2las.py
---------------------
Date : September 2013
Copyright : (C) 2013 by Martin Isenburg
Email : martin near rapidlasso point com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Martin Isenburg'
__date__ = 'September 2013'
__copyright__ = '(C) 2013, Martin Isenburg'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from LAStoolsUtils import LAStoolsUtils
from LAStoolsAlgorithm import LAStoolsAlgorithm
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterFile
class shp2las(LAStoolsAlgorithm):
INPUT = "INPUT"
SCALE_FACTOR_XY = "SCALE_FACTOR_XY"
SCALE_FACTOR_Z = "SCALE_FACTOR_Z"
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('shp2las')
self.group, self.i18n_group = self.trAlgorithm('LAStools')
self.addParametersVerboseGUI()
self.addParameter(ParameterFile(shp2las.INPUT,
self.tr("Input SHP file")))
self.addParameter(ParameterNumber(shp2las.SCALE_FACTOR_XY,
self.tr("resolution of x and y coordinate"), 0, None, 0.01))
self.addParameter(ParameterNumber(shp2las.SCALE_FACTOR_Z,
self.tr("resolution of z coordinate"), 0, None, 0.01))
self.addParametersPointOutputGUI()
self.addParametersAdditionalGUI()
def processAlgorithm(self, progress):
commands = [os.path.join(LAStoolsUtils.LAStoolsPath(), "bin", "shp2las")]
self.addParametersVerboseCommands(commands)
commands.append("-i")
commands.append(self.getParameterValue(shp2las.INPUT))
scale_factor_xy = self.getParameterValue(shp2las.SCALE_FACTOR_XY)
scale_factor_z = self.getParameterValue(shp2las.SCALE_FACTOR_Z)
if scale_factor_xy != 0.01 or scale_factor_z != 0.01:
commands.append("-set_scale_factor")
commands.append(unicode(scale_factor_xy) + " " + unicode(scale_factor_xy) + " " + unicode(scale_factor_z))
self.addParametersPointOutputCommands(commands)
self.addParametersAdditionalCommands(commands)
LAStoolsUtils.runLAStools(commands, progress)
| gpl-2.0 |
krux/adspygoogle | examples/adspygoogle/dfp/v201208/get_licas_by_statement.py | 4 | 2264 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all line item creative associations (LICA) for a given
line item id. The statement retrieves up to the maximum page size limit of 500.
To create LICAs, run create_licas.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201208')
# Set the id of the line item to get LICAs by.
line_item_id = 'INSERT_LINE_ITEM_ID_HERE'
# Create statement object to only select LICAs for the given line item id.
values = [{
'key': 'lineItemId',
'value': {
'xsi_type': 'NumberValue',
'value': line_item_id
}
}]
filter_statement = {'query': 'WHERE lineItemId = :lineItemId LIMIT 500',
'values': values}
# Get LICAs by statement.
response = lica_service.GetLineItemCreativeAssociationsByStatement(
filter_statement)[0]
licas = []
if 'results' in response:
licas = response['results']
# Display results.
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and status '
'\'%s\' was found.' % (lica['lineItemId'], lica['creativeId'],
lica['status']))
print
print 'Number of results found: %s' % len(licas)
| apache-2.0 |
jsidabras/GA-PMR | HFSS-loadbest.py | 1 | 1771 | # ----------------------------------------------
# Script Written by Jason W. Sidabras (jason.sidabras@cec.mpg.de)
# requires jsidabras/hycohanz as of 20-04-2017
# Loads a file with a list of 1s and 0s and implements it to HFSS as Silv/Vac
# used to load the best results per generation or final
# ----------------------------------------------
from random import *
import argparse
import hycohanz as hfss
[oAnsoftApp, oDesktop] = hfss.setup_interface()
oProject = oDesktop.SetActiveProject("GA_PlanarResonator")
oDesign = hfss.set_active_design(oProject, 'HFSSDesign1')
oEditor = hfss.set_active_editor(oDesign)
oFieldsReporter = hfss.get_module(oDesign, 'FieldsReporter')
parser = argparse.ArgumentParser(description='Load GA best file and run solution in HFSS.')
parser.add_argument('file', type=str, help='the filename to load')
args = parser.parse_args()
f = open(args.file, 'r')
loadthing = f.readline()
f.close()
dump = loadthing.strip("[")
dump = dump.rstrip()
dump = dump.strip(r"']").split(", ")
thing = []
for i in dump:
thing.append(int(i))
print(len(dump))
index = 0
Vac = []
Silv = []
for i in thing:
if i == 1:
Silv.append("Elm_"+str(index))
index += 1
else:
Vac.append("Elm_"+str(index))
index += 1
oDesktop.ClearMessages("", "", 3)
# Check if list is empty
if Vac:
hfss.assign_White(oEditor, Vac)
hfss.assign_material(oEditor, Vac, MaterialName="vacuum", SolveInside=True)
if Silv:
hfss.assign_Orange(oEditor, Silv)
hfss.assign_material(oEditor, Silv, MaterialName="pec", SolveInside=False)
oDesktop.ClearMessages("", "", 3)
# try:
#oDesign.Analyze("Setup1")
# except:
# print("Simulation Error")
#oProject.Save()
| mit |
axitkhurana/stem | stem/response/events.py | 3 | 31598 | # Copyright 2012-2013, Damian Johnson
# Copyright 2012, Sean Robinson
# See LICENSE for licensing information
import datetime
import io
import re
import time
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.response
import stem.version
from stem.util import connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
# because some positional arguments, like circuit paths, can have an equal
# sign.
KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=(\S*)$")
QUOTED_KW_ARG = re.compile("^(.*) ([A-Za-z0-9_]+)=\"(.*)\"$")
class Event(stem.response.ControlMessage):
"""
Base for events we receive asynchronously, as described in section 4.1 of the
`control-spec
<https://gitweb.torproject.org/torspec.git/blob/HEAD:/control-spec.txt>`_.
:var str type: event type
:var int arrived_at: unix timestamp for when the message arrived
:var list positional_args: positional arguments of the event
:var dict keyword_args: key/value arguments of the event
"""
_POSITIONAL_ARGS = () # attribute names for recognized positional arguments
_KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes
_QUOTED = () # positional arguments that are quoted
_OPTIONALLY_QUOTED = () # positional arguments that may or may not be quoted
_SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args
_VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support
def _parse_message(self, arrived_at = None):
if arrived_at is None:
arrived_at = int(time.time())
if not str(self).strip():
raise stem.ProtocolError("Received a blank tor event. Events must at the very least have a type.")
self.type = str(self).split().pop(0)
self.arrived_at = arrived_at
# if we're a recognized event type then translate ourselves into that subclass
if self.type in EVENT_TYPE_TO_CLASS:
self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
self.positional_args = []
self.keyword_args = {}
if not self._SKIP_PARSING:
self._parse_standard_attr()
self._parse()
def _parse_standard_attr(self):
"""
Most events are of the form...
650 *( positional_args ) *( key "=" value )
This parses this standard format, populating our **positional_args** and
**keyword_args** attributes and creating attributes if it's in our event's
**_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
"""
# Tor events contain some number of positional arguments followed by
# key/value mappings. Parsing keyword arguments from the end until we hit
# something that isn't a key/value mapping. The rest are positional.
content = str(self)
while True:
match = QUOTED_KW_ARG.match(content)
if not match:
match = KW_ARG.match(content)
if match:
content, keyword, value = match.groups()
self.keyword_args[keyword] = value
else:
break
# Setting attributes for the fields that we recognize.
self.positional_args = content.split()[1:]
positional = list(self.positional_args)
for attr_name in self._POSITIONAL_ARGS:
attr_value = None
if positional:
if attr_name in self._QUOTED or (attr_name in self._OPTIONALLY_QUOTED and positional[0].startswith('"')):
attr_values = [positional.pop(0)]
if not attr_values[0].startswith('"'):
raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
while True:
if not positional:
raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
attr_values.append(positional.pop(0))
if attr_values[-1].endswith('"'):
break
attr_value = " ".join(attr_values)[1:-1]
else:
attr_value = positional.pop(0)
setattr(self, attr_name, attr_value)
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
def _log_if_unrecognized(self, attr, attr_enum):
"""
Checks if an attribute exists in a given enumeration, logging a message if
it isn't. Attributes can either be for a string or collection of strings
:param str attr: name of the attribute to check
:param stem.util.enum.Enum enum: enumeration to check against
"""
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, (bytes, unicode)):
attr_values = [attr_values]
for value in attr_values:
if not value in attr_enum:
log_id = "event.%s.unknown_%s.%s" % (self.type.lower(), attr, value)
unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self)
log.log_once(log_id, log.INFO, unrecognized_msg)
class AddrMapEvent(Event):
"""
Event that indicates a new address mapping.
The ADDRMAP event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str hostname: address being resolved
:var str destination: destionation of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
:var str error: error code if the resolution failed
:var datetime utc_expiry: expiration time of the resolution in UTC
:var bool cached: **True** if the resolution will be kept until it expires,
**False** otherwise or **None** if undefined
"""
_POSITIONAL_ARGS = ("hostname", "destination", "expiry")
_KEYWORD_ARGS = {
"error": "error",
"EXPIRES": "utc_expiry",
"CACHED": "cached",
}
_OPTIONALLY_QUOTED = ("expiry")
def _parse(self):
if self.destination == "<error>":
self.destination = None
if self.expiry is not None:
if self.expiry == "NEVER":
self.expiry = None
else:
try:
self.expiry = datetime.datetime.strptime(self.expiry, "%Y-%m-%d %H:%M:%S")
except ValueError:
raise stem.ProtocolError("Unable to parse date in ADDRMAP event: %s" % self)
if self.utc_expiry is not None:
self.utc_expiry = datetime.datetime.strptime(self.utc_expiry, "%Y-%m-%d %H:%M:%S")
if self.cached is not None:
if self.cached == "YES":
self.cached = True
elif self.cached == "NO":
self.cached = False
else:
raise stem.ProtocolError("An ADDRMAP event's CACHED mapping can only be 'YES' or 'NO': %s" % self)
class AuthDirNewDescEvent(Event):
"""
Event specific to directory authorities, indicating that we just received new
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha.
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
:var str descriptor: content of the descriptor
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_AUTHDIR_NEWDESCS
def _parse(self):
lines = str(self).split('\n')
if len(lines) < 5:
raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
elif not lines[-1] == "OK":
raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
self.action = lines[1]
self.message = lines[2]
self.descriptor = '\n'.join(lines[3:-1])
class BandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor.
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
"""
_POSITIONAL_ARGS = ("read", "written")
def _parse(self):
if not self.read:
raise stem.ProtocolError("BW event is missing its read value")
elif not self.written:
raise stem.ProtocolError("BW event is missing its written value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
class BuildTimeoutSetEvent(Event):
"""
Event indicating that the timeout value for a circuit has changed. This was
first added in tor version 0.2.2.7.
The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
:var stem.TimeoutSetType set_type: way in which the timeout is changing
:var int total_times: circuit build times tor used to determine the timeout
:var int timeout: circuit timeout value in milliseconds
:var int xm: Pareto parameter Xm in milliseconds
:var float alpha: Pareto parameter alpha
:var float quantile: CDF quantile cutoff point
:var float timeout_rate: ratio of circuits that have time out
:var int close_timeout: duration to keep measurement circuits in milliseconds
:var float close_rate: ratio of measurement circuits that are closed
"""
_POSITIONAL_ARGS = ("set_type",)
_KEYWORD_ARGS = {
"TOTAL_TIMES": "total_times",
"TIMEOUT_MS": "timeout",
"XM": "xm",
"ALPHA": "alpha",
"CUTOFF_QUANTILE": "quantile",
"TIMEOUT_RATE": "timeout_rate",
"CLOSE_MS": "close_timeout",
"CLOSE_RATE": "close_rate",
}
_VERSION_ADDED = stem.version.Requirement.EVENT_BUILDTIMEOUT_SET
def _parse(self):
# convert our integer and float parameters
for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, int(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be an integer: %s" % (param, self))
for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, float(param_value))
except ValueError:
raise stem.ProtocolError("The %s of a BUILDTIMEOUT_SET should be a float: %s" % (param, self))
self._log_if_unrecognized('set_type', stem.TimeoutSetType)
class CircuitEvent(Event):
"""
Event that indicates that a circuit has changed.
The fingerprint or nickname values in our 'path' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The CIRC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str id: circuit identifier
:var stem.CircStatus status: reported status for the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircClosureReason reason: reason for the circuit to be closed
:var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
"""
_POSITIONAL_ARGS = ("id", "status", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
}
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools._parse_iso_timestamp(self.created)
except ValueError as exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.CircStatus)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('reason', stem.CircClosureReason)
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
class CircMinorEvent(Event):
"""
Event providing information about minor changes in our circuits. This was
first added in tor version 0.2.3.11.
The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
:var str id: circuit identifier
:var stem.CircEvent event: type of change in the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircPurpose old_purpose: prior purpose for the circuit
:var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
"""
_POSITIONAL_ARGS = ("id", "event", "path")
_KEYWORD_ARGS = {
"BUILD_FLAGS": "build_flags",
"PURPOSE": "purpose",
"HS_STATE": "hs_state",
"REND_QUERY": "rend_query",
"TIME_CREATED": "created",
"OLD_PURPOSE": "old_purpose",
"OLD_HS_STATE": "old_hs_state",
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_MINOR
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if self.created is not None:
try:
self.created = str_tools._parse_iso_timestamp(self.created)
except ValueError as exc:
raise stem.ProtocolError("Unable to parse create date (%s): %s" % (exc, self))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('event', stem.CircEvent)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('old_purpose', stem.CircPurpose)
self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState)
class ClientsSeenEvent(Event):
"""
Periodic event on bridge relays that provides a summary of our users.
The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
:var datetime start_time: time in UTC that we started collecting these stats
:var dict locales: mapping of country codes to a rounded count for the number of users
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
"""
_KEYWORD_ARGS = {
"TimeStarted": "start_time",
"CountrySummary": "locales",
"IPVersions": "ip_versions",
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CLIENTS_SEEN
def _parse(self):
if self.start_time is not None:
self.start_time = datetime.datetime.strptime(self.start_time, "%Y-%m-%d %H:%M:%S")
if self.locales is not None:
locale_to_count = {}
for entry in self.locales.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
locale, count = entry.split('=', 1)
if len(locale) != 2:
raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
elif not count.isdigit():
raise stem.ProtocolError("Locale count was non-numeric (%s): %s" % (count, self))
elif locale in locale_to_count:
raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
locale_to_count[locale] = int(count)
self.locales = locale_to_count
if self.ip_versions is not None:
protocol_to_count = {}
for entry in self.ip_versions.split(','):
if not '=' in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError("IP protocol count was non-numeric (%s): %s" % (count, self))
protocol_to_count[protocol] = int(count)
self.ip_versions = protocol_to_count
class ConfChangedEvent(Event):
"""
Event that indicates that our configuration changed, either in response to a
SETCONF or RELOAD signal.
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED
def _parse(self):
self.config = {}
# Skip first and last line since they're the header and footer. For
# instance...
#
# 650-CONF_CHANGED
# 650-ExitNodes=caerSidi
# 650-ExitPolicy
# 650-MaxCircuitDirtiness=20
# 650 OK
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
else:
key, value = line, None
self.config[key] = value
class DescChangedEvent(Event):
"""
Event that indicates that our descriptor has changed.
The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_DESCCHANGED
class GuardEvent(Event):
"""
Event that indicates that our guard relays have changed. The 'endpoint' could
be either a...
* fingerprint
* 'fingerprint=nickname' pair
The derived 'endpoint_*' attributes are generally more useful.
The GUARD event was introduced in tor version 0.1.2.5-alpha.
:var stem.GuardType guard_type: purpose the guard relay is for
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint
:var str endpoint_nickname: endpoint's nickname if it was provided
:var stem.GuardStatus status: status of the guard relay
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_GUARD
_POSITIONAL_ARGS = ("guard_type", "endpoint", "status")
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
raise stem.ProtocolError("ORCONN's endpoint doesn't match a ServerSpec: %s" % self)
self._log_if_unrecognized('guard_type', stem.GuardType)
self._log_if_unrecognized('status', stem.GuardStatus)
class LogEvent(Event):
"""
Tor logging event. These are the most visible kind of event since, by
default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
The logging events were some of the first Control Protocol V1 events
and were introduced in tor version 0.1.1.1-alpha.
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
"""
_SKIP_PARSING = True
def _parse(self):
self.runlevel = self.type
self._log_if_unrecognized('runlevel', stem.Runlevel)
# message is our content, minus the runlevel and ending "OK" if a
# multi-line message
self.message = str(self)[len(self.runlevel) + 1:].rstrip("\nOK")
class NetworkStatusEvent(Event):
"""
Event for when our copy of the consensus has changed. This was introduced in
tor version 0.1.2.3.
The NS event was introduced in tor version 0.1.2.3-alpha.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NS
def _parse(self):
content = str(self).lstrip("NS\n").rstrip("\nOK")
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
:class:`~stem.response.events.NetworkStatusEvent`, except that it contains
the whole consensus so anything not listed is implicitly no longer
recommended.
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS
def _parse(self):
content = str(self).lstrip("NEWCONSENSUS\n").rstrip("\nOK")
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewDescEvent(Event):
"""
Event that indicates that a new descriptor is available.
The fingerprint or nickname values in our 'relays' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The NEWDESC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var tuple relays: **(fingerprint, nickname)** tuples for the relays with
new descriptors
"""
def _parse(self):
self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
class ORConnEvent(Event):
"""
Event that indicates a change in a relay connection. The 'endpoint' could be
any of several things including a...
* fingerprint
* nickname
* 'fingerprint=nickname' pair
* address:port
The derived 'endpoint_*' attributes are generally more useful.
The ORCONN event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint if it was provided
:var str endpoint_nickname: endpoint's nickname if it was provided
:var str endpoint_address: endpoint's address if it was provided
:var int endpoint_port: endpoint's port if it was provided
:var stem.ORStatus status: state of the connection
:var stem.ORClosureReason reason: reason for the connection to be closed
:var int circ_count: number of established and pending circuits
"""
_POSITIONAL_ARGS = ("endpoint", "status")
_KEYWORD_ARGS = {
"REASON": "reason",
"NCIRCS": "circ_count",
}
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
self.endpoint_address = None
self.endpoint_port = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
if not ':' in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.split(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
self.endpoint_address = address
self.endpoint_port = int(port)
if self.circ_count is not None:
if not self.circ_count.isdigit():
raise stem.ProtocolError("ORCONN event got a non-numeric circuit count (%s): %s" % (self.circ_count, self))
self.circ_count = int(self.circ_count)
self._log_if_unrecognized('status', stem.ORStatus)
self._log_if_unrecognized('reason', stem.ORClosureReason)
class SignalEvent(Event):
"""
Event that indicates that tor has received and acted upon a signal being sent
to the process. As of tor version 0.2.4.6 the only signals conveyed by this
event are...
* RELOAD
* DUMP
* DEBUG
* NEWNYM
* CLEARDNSCACHE
The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
:var stem.Signal signal: signal that tor received
"""
_POSITIONAL_ARGS = ("signal",)
_VERSION_ADDED = stem.version.Requirement.EVENT_SIGNAL
def _parse(self):
# log if we recieved an unrecognized signal
expected_signals = (
stem.Signal.RELOAD,
stem.Signal.DUMP,
stem.Signal.DEBUG,
stem.Signal.NEWNYM,
stem.Signal.CLEARDNSCACHE,
)
self._log_if_unrecognized('signal', expected_signals)
class StatusEvent(Event):
"""
Notification of a change in tor's state. These are generally triggered for
the same sort of things as log messages of the NOTICE level or higher.
However, unlike :class:`~stem.response.events.LogEvent` these contain well
formed data.
The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
in tor version 0.1.2.3-alpha.
:var stem.StatusType status_type: category of the status event
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
"""
_POSITIONAL_ARGS = ("runlevel", "action")
_VERSION_ADDED = stem.version.Requirement.EVENT_STATUS
def _parse(self):
if self.type == 'STATUS_GENERAL':
self.status_type = stem.StatusType.GENERAL
elif self.type == 'STATUS_CLIENT':
self.status_type = stem.StatusType.CLIENT
elif self.type == 'STATUS_SERVER':
self.status_type = stem.StatusType.SERVER
else:
raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
self._log_if_unrecognized('runlevel', stem.Runlevel)
class StreamEvent(Event):
"""
Event that indicates that a stream has changed.
The STREAM event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str id: stream identifier
:var stem.StreamStatus status: reported status for the stream
:var str circ_id: circuit that the stream is attached to
:var str target: destination of the stream
:var str target_address: destination address (ip, hostname, or '(Tor_internal)')
:var int target_port: destination port
:var stem.StreamClosureReason reason: reason for the stream to be closed
:var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed
:var stem.StreamSource source: origin of the REMAP request
:var str source_addr: requester of the connection
:var str source_address: requester address (ip or hostname)
:var int source_port: requester port
:var stem.StreamPurpose purpose: purpose for the stream
"""
_POSITIONAL_ARGS = ("id", "status", "circ_id", "target")
_KEYWORD_ARGS = {
"REASON": "reason",
"REMOTE_REASON": "remote_reason",
"SOURCE": "source",
"SOURCE_ADDR": "source_addr",
"PURPOSE": "purpose",
}
def _parse(self):
if self.target is None:
raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
else:
if not ':' in self.target:
raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
address, port = self.target.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Target location's port is invalid: %s" % self)
self.target_address = address
self.target_port = int(port)
if self.source_addr is None:
self.source_address = None
self.source_port = None
else:
if not ':' in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.split(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
self.source_address = address
self.source_port = int(port)
# spec specifies a circ_id of zero if the stream is unattached
if self.circ_id == "0":
self.circ_id = None
self._log_if_unrecognized('reason', stem.StreamClosureReason)
self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
self._log_if_unrecognized('purpose', stem.StreamPurpose)
class StreamBwEvent(Event):
"""
Event (emitted approximately every second) with the bytes sent and received
by the application since the last such event on this stream.
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
"""
_POSITIONAL_ARGS = ("id", "written", "read")
_VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW
def _parse(self):
if not tor_tools.is_valid_stream_id(self.id):
raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.written:
raise stem.ProtocolError("STREAM_BW event is missing its written value")
elif not self.read:
raise stem.ProtocolError("STREAM_BW event is missing its read value")
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = long(self.read)
self.written = long(self.written)
EVENT_TYPE_TO_CLASS = {
"ADDRMAP": AddrMapEvent,
"AUTHDIR_NEWDESCS": AuthDirNewDescEvent,
"BUILDTIMEOUT_SET": BuildTimeoutSetEvent,
"BW": BandwidthEvent,
"CIRC": CircuitEvent,
"CIRC_MINOR": CircMinorEvent,
"CLIENTS_SEEN": ClientsSeenEvent,
"CONF_CHANGED": ConfChangedEvent,
"DEBUG": LogEvent,
"DESCCHANGED": DescChangedEvent,
"ERR": LogEvent,
"GUARD": GuardEvent,
"INFO": LogEvent,
"NEWCONSENSUS": NewConsensusEvent,
"NEWDESC": NewDescEvent,
"NOTICE": LogEvent,
"NS": NetworkStatusEvent,
"ORCONN": ORConnEvent,
"SIGNAL": SignalEvent,
"STATUS_CLIENT": StatusEvent,
"STATUS_GENERAL": StatusEvent,
"STATUS_SERVER": StatusEvent,
"STREAM": StreamEvent,
"STREAM_BW": StreamBwEvent,
"WARN": LogEvent,
# accounting for a bug in tor 0.2.0.22
"STATUS_SEVER": StatusEvent,
}
| lgpl-3.0 |
guoshimin/kubernetes | cluster/saltbase/salt/_states/container_bridge.py | 32 | 5829 | #!/usr/bin/env python
# Copyright 2014 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import salt.exceptions
import salt.utils.ipaddr as ipaddr
def ensure(name, cidr, mtu=1460):
'''
Ensure that a bridge (named <name>) is configured for containers.
Under the covers we will make sure that
- The bridge exists
- The MTU is set
- The correct network is added to the bridge
- iptables is set up for MASQUERADE for egress
cidr:
The cidr range in the form of 10.244.x.0/24
mtu:
The MTU to set on the interface
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# This is a little hacky. I should probably import a real library for this
# but this'll work for now.
try:
cidr_network = ipaddr.IPNetwork(cidr, strict=True)
except Exception:
raise salt.exceptions.SaltInvocationError(
'Invalid CIDR \'{0}\''.format(cidr))
if cidr_network.version == 4:
iptables_rule = {
'table': 'nat',
'chain': 'POSTROUTING',
'rule': '-o eth0 -j MASQUERADE \! -d 10.0.0.0/8'
}
else:
iptables_rule = None
def bridge_exists(name):
'Determine if a bridge exists already.'
out = __salt__['cmd.run_stdout']('brctl show {0}'.format(name))
for line in out.splitlines():
# get rid of first line
if line.startswith('bridge name'):
continue
# get rid of ^\n's
vals = line.split()
if not vals:
continue
if len(vals) > 1:
return True
return False
def get_ip_addr_details(name):
'For the given interface, get address details.'
out = __salt__['cmd.run']('ip addr show dev {0}'.format(name))
ret = { 'networks': [] }
for line in out.splitlines():
match = re.match(
r'^\d*:\s+([\w.\-]+)(?:@)?([\w.\-]+)?:\s+<(.+)>.*mtu (\d+)',
line)
if match:
iface, parent, attrs, mtu = match.groups()
if 'UP' in attrs.split(','):
ret['up'] = True
else:
ret['up'] = False
if parent:
ret['parent'] = parent
ret['mtu'] = int(mtu)
continue
cols = line.split()
if len(cols) > 2 and cols[0] == 'inet':
ret['networks'].append(cols[1])
return ret
def get_current_state():
'Helper that returns a dict of current bridge state.'
ret = {}
ret['name'] = name
ret['exists'] = bridge_exists(name)
if ret['exists']:
ret['details'] = get_ip_addr_details(name)
else:
ret['details'] = {}
# This module function is strange and returns True if the rule exists.
# If not, it returns a string with the error from the call to iptables.
if iptables_rule:
ret['iptables_rule_exists'] = \
__salt__['iptables.check'](**iptables_rule) == True
else:
ret['iptables_rule_exists'] = True
return ret
desired_network = '{0}/{1}'.format(
str(ipaddr.IPAddress(cidr_network._ip + 1)),
str(cidr_network.prefixlen))
current_state = get_current_state()
if (current_state['exists']
and current_state['details']['mtu'] == mtu
and desired_network in current_state['details']['networks']
and current_state['details']['up']
and current_state['iptables_rule_exists']):
ret['result'] = True
ret['comment'] = 'System already in the correct state'
return ret
# The state of the system does need to be changed. Check if we're running
# in ``test=true`` mode.
if __opts__['test'] == True:
ret['comment'] = 'The state of "{0}" will be changed.'.format(name)
ret['changes'] = {
'old': current_state,
'new': 'Create and configure bridge'
}
# Return ``None`` when running with ``test=true``.
ret['result'] = None
return ret
# Finally, make the actual change and return the result.
if not current_state['exists']:
__salt__['cmd.run']('brctl addbr {0}'.format(name))
new_state = get_current_state()
if new_state['details']['mtu'] != mtu:
__salt__['cmd.run'](
'ip link set dev {0} mtu {1}'.format(name, str(mtu)))
new_state = get_current_state()
if desired_network not in new_state['details']['networks']:
__salt__['cmd.run'](
'ip addr add {0} dev {1}'.format(desired_network, name))
new_state = get_current_state()
if not new_state['details']['up']:
__salt__['cmd.run'](
'ip link set dev {0} up'.format(name))
new_state = get_current_state()
if iptables_rule and not new_state['iptables_rule_exists']:
__salt__['iptables.append'](**iptables_rule)
new_state = get_current_state()
ret['comment'] = 'The state of "{0}" was changed!'.format(name)
ret['changes'] = {
'old': current_state,
'new': new_state,
}
ret['result'] = True
return ret
| apache-2.0 |
ajgallegog/gem5_arm | configs/topologies/Cluster.py | 41 | 4886 | # Copyright (c) 2012 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Jason Power
from BaseTopology import BaseTopology
class Cluster(BaseTopology):
""" A cluster is a group of nodes which are all one hop from eachother
Clusters can also contain other clusters
When creating this kind of topology, return a single cluster (usually
the root cluster) from create_system in configs/ruby/<protocol>.py
"""
_num_int_links = 0
_num_ext_links = 0
_num_routers = 0
# Below methods for auto counting
@classmethod
def num_int_links(cls):
cls._num_int_links += 1
return cls._num_int_links - 1
@classmethod
def num_ext_links(cls):
cls._num_ext_links += 1
return cls._num_ext_links - 1
@classmethod
def num_routers(cls):
cls._num_routers += 1
return cls._num_routers - 1
def __init__(self, intBW=0, extBW=0, intLatency=0, extLatency=0):
""" internalBandwidth is bandwidth of all links within the cluster
externalBandwidth is bandwidth from this cluster to any cluster
connecting to it.
internal/externalLatency are similar
**** When creating a cluster with sub-clusters, the sub-cluster
external bandwidth overrides the internal bandwidth of the
super cluster
"""
self.nodes = []
self.router = None # created in makeTopology
self.intBW = intBW
self.extBW = extBW
self.intLatency = intLatency
self.extLatency = extLatency
def add(self, node):
self.nodes.append(node)
def makeTopology(self, options, network, IntLink, ExtLink, Router):
""" Recursively make all of the links and routers
"""
# make a router to connect all of the nodes
self.router = Router(router_id=self.num_routers())
network.routers.append(self.router)
for node in self.nodes:
if type(node) == Cluster:
node.makeTopology(options, network, IntLink, ExtLink, Router)
# connect this cluster to the router
link = IntLink(link_id=self.num_int_links(), node_a=self.router,
node_b=node.router)
if node.extBW:
link.bandwidth_factor = node.extBW
# if there is an interanl b/w for this node
# and no ext b/w to override
elif self.intBW:
link.bandwidth_factor = self.intBW
if node.extLatency:
link.latency = node.extLatency
elif self.intLatency:
link.latency = self.intLatency
network.int_links.append(link)
else:
# node is just a controller,
# connect it to the router via a ext_link
link = ExtLink(link_id=self.num_ext_links(), ext_node=node,
int_node=self.router)
if self.intBW:
link.bandwidth_factor = self.intBW
if self.intLatency:
link.latency = self.intLatency
network.ext_links.append(link)
def __len__(self):
return len([i for i in self.nodes if type(i) != Cluster]) + \
sum([len(i) for i in self.nodes if type(i) == Cluster])
| bsd-3-clause |
jaromil/faircoin2 | qa/rpc-tests/mempool_spendcoinbase.py | 14 | 2705 | #!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| mit |
bmazin/SDR | DataReadout/ReadoutControls/lib/LabJackPython-8-26-2011/build/lib/u12.py | 2 | 114714 | """
Name: u12.py
Desc: Defines the U12 class, which makes working with a U12 much easier. The
functions of the U12 class are divided into two categories: UW and
low-level.
Most of the UW functions are exposed as functions of the U12 class. With
the exception of the "e" functions, UW functions are Windows only. The "e"
functions will work with both the UW and the Exodriver. Therefore, people
wishing to write cross-platform code should restrict themselves to using
only the "e" functions. The UW functions are described in Section 4 of the
U12 User's Guide:
http://labjack.com/support/u12/users-guide/4
All low-level functions of the U12 class begin with the word
raw. For example, the low-level function Counter can be called with
U12.rawCounter(). Currently, low-level functions are limited to the
Exodriver (Linux and Mac OS X). You can find descriptions of the low-level
functions in Section 5 of the U12 User's Guide:
http://labjack.com/support/u12/users-guide/5
"""
import platform
import ctypes
import os, atexit
import math
from time import time
import struct
WINDOWS = "Windows"
ON_WINDOWS = (os.name == 'nt')
class U12Exception(Exception):
"""Custom Exception meant for dealing specifically with U12 Exceptions.
Error codes are either going to be a LabJackUD error code or a -1. The -1 implies
a python wrapper specific error.
def __init__(self, ec = 0, errorString = ''):
self.errorCode = ec
self.errorString = errorString
if not self.errorString:
#try:
self.errorString = getErrorString(ec)
#except:
# self.errorString = str(self.errorCode)
def __str__(self):
return self.errorString
"""
pass
class BitField(object):
"""
Provides a method for working with bit fields.
>>> bf = BitField()
>>> print bf
[ bit7 = 0, bit6 = 0, bit5 = 0, bit4 = 0, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
You can use attribute accessing for easy bit flipping:
>>> bf.bit4 = 1
>>> bf.bit7 = 1
>>> print bf
[ bit7 = 1, bit6 = 0, bit5 = 0, bit4 = 1, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
You can also use list-style accessing. Counting starts on the left:
>>> print bf[0] # List index 0 is bit7
1
>>> print bf[3] # List index 3 is bit4
1
List-style slicing:
>>> print bf[3:]
[1, 0, 0, 0, 0]
List-style setting bits works as you would expect:
>>> bf[1] = 1
>>> print bf
[ bit7 = 1, bit6 = 1, bit5 = 0, bit4 = 1, bit3 = 0, bit2 = 0, bit1 = 0, bit0 = 0 ]
It provides methods for going to and from bytes:
>>> bf = BitField(123)
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1, bit0 = 1 ]
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1, bit0 = 1 ]
>>> bf.bit4 = 0
>>> print bf.asByte()
107
You can iterate of the raw bits ( 1 and 0 Vs. '1' and '0') easily:
>>> for i in bf:
... print i
0
1
1
0
1
0
1
1
You can also iterate over the labels and their data values using items():
>>> for label, data in bf.items():
... print label, data
bit7 0
bit6 1
bit5 1
bit4 0
bit3 1
bit2 0
bit1 1
bit0 1
As an added bonus, it can also be cast as an int or hex:
>>> int(bf)
107
>>> hex(bf)
'0x6b'
See the description of the __init__ method for setting the label parameters. """
def __init__(self, rawByte = None, labelPrefix = "bit", labelList = None, zeroLabel = "0", oneLabel = "1"):
"""
Name: BitField.__init__(rawByte = None, labelPrefix = "bit",
labelList = None, zeroLabel = "0",
oneLabel = "1")
Args: rawByte, a value to set the bit field values to.
labelPrefix, what should go before the labels in labelList
labelList, a list of labels to apply to each bit. If None, it
gets set to range(7,-1,-1).
zeroLabel, bits with a value of 0 will have this label
oneLabel, bits with a value of 1 will have this label
Desc: Creates a new bitfield and sets up the labels.
With out any arguments, you get a bit field that looks like this:
>>> bf = BitField()
>>> print bf
[ bit7 = 0, bit6 = 0, bit5 = 0, bit4 = 0, bit3 = 0, bit2 = 0, bit1 = 0,
bit0 = 0 ]
To make the labels, it iterates over all the labelList and adds the
labelPrefix to them. If you have less than 8 labels, then your bit field
will only work up to that many bits.
To make a BitField with labels for FIO0-7 you can do the following:
>>> bf = BitField(labelPrefix = "FIO")
>>> print bf
[ FIO7 = 0, FIO6 = 0, FIO5 = 0, FIO4 = 0, FIO3 = 0, FIO2 = 0, FIO1 = 0,
FIO0 = 0 ]
The labels don't have to be numbers, for example:
>>> names = [ "Goodreau", "Jerri", "Selena", "Allan", "Tania",
"Kathrine", "Jessie", "Zelma" ]
>>> bf = BitField( labelPrefix = "", labelList = names)
>>> print bf
[ Goodreau = 0, Jerri = 0, Selena = 0, Allan = 0, Tania = 0,
Kathrine = 0, Jessie = 0, Zelma = 0 ]
You can change the display value of zero and one to be whatever you
want. For example, if you have a BitField that represents FIO0-7
directions:
>>> dirs = BitField(rawByte = 5, labelPrefix = "FIO",
zeroLabel = "Output", oneLabel = "Input")
>>> print dirs
[ FIO7 = Output, FIO6 = Output, FIO5 = Output, FIO4 = Output,
FIO3 = Output, FIO2 = Input, FIO1 = Output, FIO0 = Input ]
Note, that when you access the value, you will get 1 or 0, not "Input"
or "Output. For example:
>>> print dirs.FIO3
0
"""
# Do labels first, so that self.something = something works.
self.__dict__['labels'] = []
self.labelPrefix = labelPrefix
if labelList is None:
self.labelList = range(8)
else:
self.labelList = list(reversed(labelList))
self.zeroLabel = zeroLabel
self.oneLabel = oneLabel
self.rawValue = 0
self.rawBits = [ 0 ] * 8
self.data = [ self.zeroLabel ] * 8
items = min(8, len(self.labelList))
for i in reversed(range(items)):
self.labels.append("%s%s" % (self.labelPrefix, self.labelList[i]))
if rawByte is not None:
self.fromByte(rawByte)
def fromByte(self, raw):
"""
Name: BitField.fromByte(raw)
Args: raw, the raw byte to make the BitField.
Desc: Takes a byte, and modifies self to match.
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> print bf
[ bit7 = 0, bit6 = 1, bit5 = 1, bit4 = 1, bit3 = 1, bit2 = 0, bit1 = 1,
bit0 = 1 ]
"""
self.rawValue = raw
self.rawBits = []
self.data = []
items = min(8, len(self.labelList))
for i in reversed(range(items)):
self.rawBits.append( ((raw >> (i)) & 1) )
self.data.append(self.oneLabel if bool(((raw >> (i)) & 1)) else self.zeroLabel)
def asByte(self):
"""
Name: BitField.asByte()
Args: None
Desc: Returns the value of the bitfield as a byte.
>>> bf = BitField()
>>> bf.fromByte(123) # Modifies bf in place
>>> bf.bit4 = 0
>>> print bf.asByte()
107
"""
byteVal = 0
for i, v in enumerate(reversed(self.rawBits)):
byteVal += ( 1 << i ) * v
return byteVal
def asBin(self):
result = "0b"
for i in self.rawBits:
result += "%s" % i
return result
def __len__(self):
return len(self.data)
def __repr__(self):
result = "["
for i in range(len(self.data)):
result += " %s = %s (%s)," % (self.labels[i], self.data[i], self.rawBits[i])
result = result.rstrip(',')
result += " ]"
return "<BitField object: %s >" % result
def __str__(self):
result = "["
for i in range(len(self.data)):
result += " %s = %s," % (self.labels[i], self.data[i])
result = result.rstrip(',')
result += " ]"
return result
def __getattr__(self, label):
try:
i = self.labels.index(label)
return self.rawBits[i]
except ValueError:
raise AttributeError(label)
def __setattr__(self, label, value):
try:
i = self.labels.index(label)
self.rawBits[i] = int(bool(value))
self.data[i] = self.oneLabel if bool(value) else self.zeroLabel
except ValueError:
self.__dict__[label] = value
def __getitem__(self, key):
return self.rawBits[key]
def __setitem__(self, key, value):
self.rawBits[key] = int(bool(value))
self.data[key] = self.oneLabel if bool(value) else self.zeroLabel
def __iter__(self):
return iter(self.rawBits)
def items(self):
"""
Name: BitField.items()
Args: None
Desc: Returns a list of tuples where the first item is the label and the
second is the string value, like "High" or "Input"
>>> dirs = BitField(rawByte = 5, labelPrefix = "FIO",
zeroLabel = "Output", oneLabel = "Input")
>>> print dirs
[ FIO7 = Output, FIO6 = Output, FIO5 = Output, FIO4 = Output,
FIO3 = Output, FIO2 = Input, FIO1 = Output, FIO0 = Input ]
>>> for label, data in dirs.items():
... print label, data
...
FIO7 Output
FIO6 Output
FIO5 Output
FIO4 Output
FIO3 Output
FIO2 Input
FIO1 Output
FIO0 Input
"""
return zip(self.labels, self.data)
def __int__(self):
return self.asByte()
def __hex__(self):
return hex(self.asByte())
def __add__(self, other):
"""
A helper to prevent having to test if a variable is a bitfield or int.
"""
return other + self.asByte()
def errcheck(ret, func, args):
if ret == -1:
try:
ec = ctypes.get_errno()
raise U12Exception("Exodriver returned error number %s" % ec)
except AttributeError:
raise U12Exception("Exodriver returned an error, but LabJackPython is unable to read the error code. Upgrade to Python 2.6 for this functionality.")
else:
return ret
def _loadLinuxSo():
try:
l = ctypes.CDLL("liblabjackusb.so", use_errno=True)
except TypeError:
l = ctypes.CDLL("liblabjackusb.so")
l.LJUSB_Stream.errcheck = errcheck
l.LJUSB_Read.errcheck = errcheck
return l
def _loadMacDylib():
try:
l = ctypes.CDLL("liblabjackusb.dylib", use_errno=True)
except TypeError:
l = ctypes.CDLL("liblabjackusb.dylib")
l.LJUSB_Stream.errcheck = errcheck
l.LJUSB_Read.errcheck = errcheck
return l
staticLib = None
if os.name == 'posix':
try:
staticLib = _loadLinuxSo()
except OSError, e:
pass # We may be on Mac.
except Exception, e:
raise U12Exception("Could not load the Linux SO for some reason other than it not being installed. Ethernet connectivity only.\n\n The error was: %s" % e)
try:
if staticLib is None:
staticLib = _loadMacDylib()
except OSError, e:
raise U12Exception("Could not load the Exodriver driver. Ethernet connectivity only.\n\nCheck that the Exodriver is installed, and the permissions are set correctly.\nThe error message was: %s" % e)
except Exception, e:
raise U12Exception("Could not load the Mac Dylib for some reason other than it not being installed. Ethernet connectivity only.\n\n The error was: %s" % e)
else:
try:
staticLib = ctypes.windll.LoadLibrary("ljackuw")
except:
raise Exception, "Could not load LabJack UW driver."
class U12(object):
"""
U12 Class for all U12 specific commands.
u12 = U12()
"""
def __init__(self, id = -1, serialNumber = None, debug = False):
self.id = id
self.serialNumber = serialNumber
self.deviceName = "U12"
self.streaming = False
self.handle = None
self.debug = debug
self._autoCloseSetup = False
if not ON_WINDOWS:
# Save some variables to save state.
self.pwmAVoltage = 0
self.pwmBVoltage = 0
self.open(id, serialNumber)
def open(self, id = -1, serialNumber = None):
"""
Opens the U12.
The Windows UW driver opens the device every time a function is called.
The Exodriver, however, works like the UD family of devices and returns
a handle. On Windows, this method does nothing. On Mac OS X and Linux,
this method acquires a device handle and saves it to the U12 object.
"""
if ON_WINDOWS:
pass
else:
if self.debug: print "open called"
devType = ctypes.c_ulong(1)
openDev = staticLib.LJUSB_OpenDevice
openDev.restype = ctypes.c_void_p
if serialNumber is not None:
numDevices = staticLib.LJUSB_GetDevCount(devType)
for i in range(numDevices):
handle = openDev(i+1, 0, devType)
if handle != 0 and handle is not None:
self.handle = ctypes.c_void_p(handle)
try:
serial = self.rawReadSerial()
except Exception:
serial = self.rawReadSerial()
if serial == int(serialNumber):
break
else:
self.close()
if self.handle is None:
raise U12Exception("Couldn't find a U12 with a serial number matching %s" % serialNumber)
elif id != -1:
numDevices = staticLib.LJUSB_GetDevCount(devType)
for i in range(numDevices):
handle = openDev(i+1, 0, devType)
if handle != 0 and handle is not None:
self.handle = ctypes.c_void_p(handle)
try:
unitId = self.rawReadLocalId()
except Exception:
unitId = self.rawReadLocalId()
if unitId == int(id):
break
else:
self.close()
if self.handle is None:
raise U12Exception("Couldn't find a U12 with a local ID matching %s" % id)
elif id == -1:
handle = openDev(1, 0, devType)
if handle == 0 or handle is None:
raise Exception("Couldn't open a U12. Check that one is connected and try again.")
else:
self.handle = ctypes.c_void_p(handle)
# U12 ignores first command, so let's write a command.
command = [ 0 ] * 8
command[5] = 0x57 # 0b01010111
try:
self.write(command)
self.read()
except:
pass
self.id = self.rawReadLocalId()
else:
raise Exception("Invalid combination of parameters.")
if not self._autoCloseSetup:
# Only need to register auto-close once per device.
atexit.register(self.close)
self._autoCloseSetup = True
def close(self):
if ON_WINDOWS:
pass
else:
staticLib.LJUSB_CloseDevice(self.handle)
self.handle = None
def write(self, writeBuffer):
if ON_WINDOWS:
pass
else:
if self.handle is None:
raise U12Exception("The U12's handle is None. Please open a U12 with open()")
if self.debug: print "Writing:", hexWithoutQuotes(writeBuffer)
newA = (ctypes.c_byte*len(writeBuffer))(0)
for i in range(len(writeBuffer)):
newA[i] = ctypes.c_byte(writeBuffer[i])
writeBytes = staticLib.LJUSB_Write(self.handle, ctypes.byref(newA), len(writeBuffer))
if(writeBytes != len(writeBuffer)):
raise U12Exception( "Could only write %s of %s bytes." % (writeBytes, len(writeBuffer) ) )
return writeBuffer
def read(self, numBytes = 8):
if ON_WINDOWS:
pass
else:
if self.handle is None:
raise U12Exception("The U12's handle is None. Please open a U12 with open()")
newA = (ctypes.c_byte*numBytes)()
readBytes = staticLib.LJUSB_Read(self.handle, ctypes.byref(newA), numBytes)
# return a list of integers in command/response mode
result = [(newA[i] & 0xff) for i in range(readBytes)]
if self.debug: print "Received:", hexWithoutQuotes(result)
return result
# Low-level helpers
def rawReadSerial(self):
"""
Name: U12.rawReadSerial()
Args: None
Desc: Reads the serial number from internal memory.
Returns: The U12's serial number as an integer.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawReadSerial()
10004XXXX
"""
results = self.rawReadRAM()
return struct.unpack(">I", struct.pack("BBBB", results['DataByte3'], results['DataByte2'], results['DataByte1'], results['DataByte0']))[0]
def rawReadLocalId(self):
"""
Name: U12.rawReadLocalId()
Args: None
Desc: Reads the Local ID from internal memory.
Returns: The U12's Local ID as an integer.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawReadLocalId()
0
"""
results = self.rawReadRAM(0x08)
return results['DataByte0']
# Begin Section 5 Functions
def rawAISample(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, UpdateIO = False, LEDState = True, IO3toIO0States = 0, EchoValue = 0):
"""
Name: U12.rawAISample(channel0PGAMUX = 8, channel1PGAMUX = 9,
channel2PGAMUX = 10, channel3PGAMUX = 11,
UpdateIO = False, LEDState = True,
IO3toIO0States = 0, EchoValue = 0)
Args: channel0PGAMUX, A byte that contains channel0 information
channel1PGAMUX, A byte that contains channel1 information
channel2PGAMUX, A byte that contains channel2 information
channel3PGAMUX, A byte that contains channel3 information
IO3toIO0States, A byte that represents the states of IO0 to IO3
UpdateIO, If true, set IO0 to IO 3 to match IO3toIO0States
LEDState, Turns the status LED on or off.
EchoValue, Sometimes, you want what you put in.
Desc: Collects readings from 4 analog inputs. It can also toggle the
status LED and update the state of the IOs. See Section 5.1 of
the User's Guide.
By default it will read AI0-3 (single-ended).
Returns: A dictionary with the following keys:
PGAOvervoltage, A bool representing if the U12 detected overvoltage
IO3toIO0States, a BitField representing the state of IO0 to IO3
Channel0-3, the analog voltage for the channel
EchoValue, a repeat of the value passed in.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawAISample()
{
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0),
IO1 = Low (0), IO0 = Low (0) ] >,
'Channel0': 1.46484375,
'Channel1': 1.4501953125,
'Channel2': 1.4599609375,
'Channel3': 1.4306640625,
'PGAOvervoltage': False,
'EchoValue': 0
}
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
# Bit 1: Update IO
# Bit 0: LED State
bf = BitField()
bf.bit1 = int(UpdateIO)
bf.bit0 = int(LEDState)
command[4] = int(bf)
# Bit 7-4: 1100 (Command/Response)
# Bit 3-0: Bits for IO3 through IO0 States
bf.fromByte(0)
bf.bit7 = 1
bf.bit6 = 1
bf.fromByte( int(bf) | int(IO3toIO0States) )
command[5] = int(bf)
command[7] = EchoValue
self.write(command)
results = self.read()
bf = BitField()
bf.fromByte(results[0])
if bf.bit7 != 1 or bf.bit6 != 0:
raise U12Exception("Expected a AIStream response, got %s instead." % results[0])
returnDict = {}
returnDict['EchoValue'] = results[1]
returnDict['PGAOvervoltage'] = bool(bf.bit4)
returnDict['IO3toIO0States'] = BitField(results[0], "IO", range(3, -1, -1), "Low", "High")
channel0 = (results[2] >> 4) & 0xf
channel1 = (results[2] & 0xf)
channel2 = (results[5] >> 4) & 0xf
channel3 = (results[5] & 0xf)
channel0 = (channel0 << 8) + results[3]
returnDict['Channel0'] = self.bitsToVolts(channel0Number, channel0Gain, channel0)
channel1 = (channel1 << 8) + results[4]
returnDict['Channel1'] = self.bitsToVolts(channel1Number, channel1Gain, channel1)
channel2 = (channel2 << 8) + results[6]
returnDict['Channel2'] = self.bitsToVolts(channel2Number, channel2Gain, channel2)
channel3 = (channel3 << 8) + results[7]
returnDict['Channel3'] = self.bitsToVolts(channel3Number, channel3Gain, channel3)
return returnDict
def rawDIO(self, D15toD8Directions = 0, D7toD0Directions = 0, D15toD8States = 0, D7toD0States = 0, IO3toIO0DirectionsAndStates = 0, UpdateDigital = False):
"""
Name: U12.rawDIO(D15toD8Directions = 0, D7toD0Directions = 0,
D15toD8States = 0, D7toD0States = 0,
IO3toIO0DirectionsAndStates = 0, UpdateDigital = 1)
Args: D15toD8Directions, A byte where 0 = Output, 1 = Input for D15-8
D7toD0Directions, A byte where 0 = Output, 1 = Input for D7-0
D15toD8States, A byte where 0 = Low, 1 = High for D15-8
D7toD0States, A byte where 0 = Low, 1 = High for D7-0
IO3toIO0DirectionsAndStates, Bits 7-4: Direction, 3-0: State
UpdateDigital, True if you want to update the IO/D line. False to
False to just read their values.
Desc: This commands reads the direction and state of all the digital
I/O. See Section 5.2 of the U12 User's Guide.
By default, it just reads the directions and states.
Returns: A dictionary with the following keys:
D15toD8Directions, a BitField representing the directions of D15-D8
D7toD0Directions, a BitField representing the directions of D7-D0.
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
D15toD8OutputLatchStates, BitField of output latch states for D15-8
D7toD0OutputLatchStates, BitField of output latch states for D7-0
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawDIO()
{
'D15toD8Directions':
<BitField object: [ D15 = Input (1), D14 = Input (1),
D13 = Input (1), D12 = Input (1),
D11 = Input (1), D10 = Input (1),
D9 = Input (1), D8 = Input (1) ] >,
'D7toD0Directions':
<BitField object: [ D7 = Input (1), D6 = Input (1), D5 = Input (1),
D4 = Input (1), D3 = Input (1), D2 = Input (1),
D1 = Input (1), D0 = Input (1) ] >,
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >,
'D15toD8OutputLatchStates':
<BitField object: [ D15 = 0 (0), D14 = 0 (0), D13 = 0 (0),
D12 = 0 (0), D11 = 0 (0), D10 = 0 (0),
D9 = 0 (0), D8 = 0 (0) ] >,
'D7toD0OutputLatchStates':
<BitField object: [ D7 = 0 (0), D6 = 0 (0), D5 = 0 (0), D4 = 0 (0),
D3 = 0 (0), D2 = 0 (0), D1 = 0 (0),
D0 = 0 (0) ] >
}
"""
command = [ 0 ] * 8
# Bits for D15 through D8 Direction
command[0] = int(D15toD8Directions)
# Bits for D7 through D0 Direction ( 0 = Output, 1 = Input)
command[1] = int(D7toD0Directions)
# Bits for D15 through D8 State ( 0 = Low, 1 = High)
command[2] = int(D15toD8States)
# Bits for D7 through D0 State ( 0 = Low, 1 = High)
command[3] = int(D7toD0States)
# Bits 7-4: Bits for IO3 through IO0 Direction
# Bits 3-0: Bits for IO3 through IO0 State
command[4] = int(IO3toIO0DirectionsAndStates)
# 01X10111 (DIO)
command[5] = 0x57 # 0b01010111
# Bit 0: Update Digital
command[6] = int(bool(UpdateDigital))
#XXXXXXXX
# command[7] = XXXXXXXX
self.write(command)
results = self.read()
returnDict = {}
if results[0] != 87:
raise U12Exception("Expected a DIO response, got %s instead." % results[0])
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['D15toD8Directions'] = BitField(results[4], "D", range(15, 7, -1), "Output", "Input")
returnDict['D7toD0Directions'] = BitField(results[5], "D", range(7, -1, -1), "Output", "Input")
returnDict['D15toD8OutputLatchStates'] = BitField(results[6], "D", range(15, 7, -1))
returnDict['D7toD0OutputLatchStates'] = BitField(results[7], "D", range(7, -1, -1))
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
return returnDict
def rawCounter(self, StrobeEnabled = False, ResetCounter = False):
"""
Name: U12.rawCounter(StrobeEnabled = False, ResetCounter = False)
Args: StrobeEnable, set to True to enable strobe.
ResetCounter, set to True to reset the counter AFTER reading.
Desc: This command controls and reads the 32-bit counter. See
Section 5.3 of the User's Guide.
Returns: A dictionary with the following keys:
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
Counter, the value of the counter
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawCounter()
{
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >,
'Counter': 0
}
"""
command = [ 0 ] * 8
bf = BitField()
bf.bit1 = int(StrobeEnabled)
bf.bit0 = int(ResetCounter)
command[0] = int(bf)
bf.fromByte(0)
bf.bit6 = 1
bf.bit4 = 1
bf.bit1 = 1
command[5] = int(bf)
self.write(command)
results = self.read()
returnDict = {}
if results[0] != command[5]:
raise U12Exception("Expected a Counter response, got %s instead." % results[0])
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
counter = results[7]
counter += results[6] << 8
counter += results[5] << 16
counter += results[4] << 24
returnDict['Counter'] = counter
return returnDict
def rawCounterPWMDIO(self, D15toD8Directions = 0, D7toD0Directions = 0, D15toD8States = 0, D7toD0States = 0, IO3toIO0DirectionsAndStates = 0, ResetCounter = False, UpdateDigital = 0, PWMA = 0, PWMB = 0):
"""
Name: U12.rawCounterPWMDIO( D15toD8Directions = 0, D7toD0Directions = 0,
D15toD8States = 0, D7toD0States = 0,
IO3toIO0DirectionsAndStates = 0,
ResetCounter = False, UpdateDigital = 0,
PWMA = 0, PWMB = 0)
Args: D15toD8Directions, A byte where 0 = Output, 1 = Input for D15-8
D7toD0Directions, A byte where 0 = Output, 1 = Input for D7-0
D15toD8States, A byte where 0 = Low, 1 = High for D15-8
D7toD0States, A byte where 0 = Low, 1 = High for D7-0
IO3toIO0DirectionsAndStates, Bits 7-4: Direction, 3-0: State
ResetCounter, If True, reset the counter after reading.
UpdateDigital, True if you want to update the IO/D line. False to
False to just read their values.
PWMA, Voltage to set AO0 to output.
PWMB, Voltage to set AO1 to output.
Desc: This command controls all 20 digital I/O, and the 2 PWM outputs.
The response provides the state of all I/O and the current count.
See Section 5.4 of the User's Guide.
By default, sets the AOs to 0 and reads the states and counters.
Returns: A dictionary with the following keys:
D15toD8States, a BitField representing the states of D15-D8.
D7toD0States, a BitField representing the states of D7-D0.
IO3toIO0States, a BitField representing the states of IO3-IO0.
Counter, the value of the counter
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawCounterPWMDIO()
{
'D15toD8States':
<BitField object: [ D15 = Low (0), D14 = Low (0), D13 = Low (0),
D12 = Low (0), D11 = Low (0), D10 = Low (0),
D9 = Low (0), D8 = Low (0) ] >,
'D7toD0States':
<BitField object: [ D7 = Low (0), D6 = Low (0), D5 = Low (0),
D4 = Low (0), D3 = Low (0), D2 = Low (0),
D1 = Low (0), D0 = Low (0) ] >,
'IO3toIO0States':
<BitField object: [ IO3 = Low (0), IO2 = Low (0),
IO1 = Low (0), IO0 = Low (0) ] >,
'Counter': 0
}
"""
command = [ 0 ] * 8
# Bits for D15 through D8 Direction
command[0] = int(D15toD8Directions)
# Bits for D7 through D0 Direction ( 0 = Output, 1 = Input)
command[1] = int(D7toD0Directions)
# Bits for D15 through D8 State ( 0 = Low, 1 = High)
command[2] = int(D15toD8States)
# Bits for D7 through D0 State ( 0 = Low, 1 = High)
command[3] = int(D7toD0States)
# Bits 7-4: Bits for IO3 through IO0 Direction
# Bits 3-0: Bits for IO3 through IO0 State
command[4] = int(IO3toIO0DirectionsAndStates)
bf = BitField()
bf.bit5 = int(ResetCounter)
bf.bit4 = int(UpdateDigital)
binPWMA = int((1023 * (float(PWMA)/5.0)))
binPWMB = int((1023 * (float(PWMB)/5.0)))
bf2 = BitField()
bf2.fromByte( binPWMA & 3 ) # 3 = 0b11
bf.bit3 = bf2.bit1
bf.bit2 = bf2.bit0
bf2.fromByte( binPWMB & 3 ) # 3 = 0b11
bf.bit1 = bf2.bit1
bf.bit0 = bf2.bit0
command[5] = int(bf)
command[6] = (binPWMA >> 2) & 0xff
command[7] = (binPWMB >> 2) & 0xff
self.write(command)
results = self.read()
returnDict = {}
returnDict['D15toD8States'] = BitField(results[1], "D", range(15, 7, -1), "Low", "High")
returnDict['D7toD0States'] = BitField(results[2], "D", range(7, -1, -1), "Low", "High")
returnDict['IO3toIO0States'] = BitField((results[3] >> 4), "IO", range(3, -1, -1), "Low", "High")
counter = results[7]
counter += results[6] << 8
counter += results[5] << 16
counter += results[4] << 24
returnDict['Counter'] = counter
return returnDict
def rawAIBurst(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, NumberOfScans = 8, TriggerIONum = 0, TriggerState = 0, UpdateIO = False, LEDState = True, IO3ToIO0States = 0, FeatureReports = False, TriggerOn = False, SampleInterval = 15000):
"""
Name: U12.rawAIBurst( channel0PGAMUX = 8, channel1PGAMUX = 9,
channel2PGAMUX = 10, channel3PGAMUX = 11,
NumberOfScans = 8, TriggerIONum = 0,
TriggerState = 0, UpdateIO = False,
LEDState = True, IO3ToIO0States = 0,
FeatureReports = False, TriggerOn = False,
SampleInterval = 15000 )
Args: channel0PGAMUX, A byte that contains channel0 information
channel1PGAMUX, A byte that contains channel1 information
channel2PGAMUX, A byte that contains channel2 information
channel3PGAMUX, A byte that contains channel3 information
NumberOfScans, The number of scans you wish to take. Rounded up
to a power of 2.
TriggerIONum, IO to trigger burst on.
TriggerState, State to trigger on.
UpdateIO, True if you want to update the IO/D line. False to
False to just read their values.
LEDState, Turns the status LED on or off.
IO3ToIO0States, 4 bits for IO3-0 states
FeatureReports, Use feature reports, or not.
TriggerOn, Use trigger to start acquisition.
SampleInterval, = int(6000000.0/(ScanRate * NumberOfChannels))
must be greater than (or equal to) 733.
Desc: After receiving a AIBurst command, the LabJack collects 4
channels at the specified data rate, and puts data in the buffer.
This continues until the buffer is full, at which time the
LabJack starts sending the data to the host. Data is sent to the
host 1 scan at a time while checking for a command from the host.
If a command is received the burst operation is canceled and the
command is executed normally. If the LED is enabled, it blinks at
4 Hz while waiting for a trigger, is off during acquisition,
blinks at about 8 Hz during data delivery, and is set on when
done or stopped. See Section 5.5 of the User's Guide.
This function sends the AIBurst command, then reads all the
responses. Separating the write and read is not currently
supported (like in the UW driver).
By default, it does single-ended readings on AI0-4 at 100Hz for 8
scans.
Returns: A dictionary with the following keys:
Channel0-3, A list of the readings on the channels
PGAOvervoltages, A list of the over-voltage flags
IO3toIO0State, A list of the IO states
IterationCounters, A list of the values of the iteration counter
Backlogs, value*256 = number of packets in the backlog.
BufferOverflowOrChecksumErrors, If True and Backlog = 31,
then a buffer overflow occurred. If
True and Backlog = 0, then Checksum
error occurred.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawAIBurst()
{
'Channel0': [1.484375, 1.513671875, ... , 1.46484375],
'Channel1': [1.455078125, 1.455078125, ... , 1.455078125],
'Channel2': [1.46484375, 1.474609375, ... , 1.46484375],
'Channel3': [1.435546875, 1.42578125, ... , 1.435546875],
'PGAOvervoltages': [False, False, ..., False],
'IO3toIO0States':
[<BitField object: [ IO3 = Low (0), IO2 = Low (0), IO1 = Low (0),
IO0 = Low (0) ] >, ... ],
'IterationCounters': [0, 1, 2, 3, 4, 5, 6, 0],
'Backlogs': [0, 0, 0, 0, 0, 0, 0, 0],
'BufferOverflowOrChecksumErrors': [False, False, ... , False]
}
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
if NumberOfScans > 1024 or NumberOfScans < 8:
raise U12Exception("The number of scans must be between 1024 and 8 (inclusive)")
NumScansExponentMod = 10 - int(math.ceil(math.log(NumberOfScans, 2)))
NumScans = 2 ** (10 - NumScansExponentMod)
bf = BitField( rawByte = (NumScansExponentMod << 5) )
# bits 4-3: IO to Trigger on
bf.bit2 = 0
bf.bit1 = int(bool(UpdateIO))
bf.bit0 = int(bool(LEDState))
command[4] = int(bf)
bf2 = BitField(rawByte = int(IO3ToIO0States))
#Bits 7-4: 1010 (Start Burst)
bf2.bit7 = 1
bf2.bit5 = 1
command[5] = int(bf2)
if SampleInterval < 733:
raise U12Exception("SampleInterval must be greater than 733.")
bf3 = BitField( rawByte = ((SampleInterval >> 8) & 0xf) )
bf3.bit7 = int(bool(FeatureReports))
bf3.bit6 = int(bool(TriggerOn))
command[6] = int(bf3)
command[7] = SampleInterval & 0xff
self.write(command)
resultsList = []
for i in range(NumScans):
resultsList.append(self.read())
returnDict = {}
returnDict['BufferOverflowOrChecksumErrors'] = list()
returnDict['PGAOvervoltages'] = list()
returnDict['IO3toIO0States'] = list()
returnDict['IterationCounters'] = list()
returnDict['Backlogs'] = list()
returnDict['Channel0'] = list()
returnDict['Channel1'] = list()
returnDict['Channel2'] = list()
returnDict['Channel3'] = list()
for results in resultsList:
bf = BitField(rawByte = results[0])
if bf.bit7 != 1 or bf.bit6 != 0:
raise U12Exception("Expected a AIBurst response, got %s instead." % results[0])
returnDict['BufferOverflowOrChecksumErrors'].append(bool(bf.bit5))
returnDict['PGAOvervoltages'].append(bool(bf.bit4))
returnDict['IO3toIO0States'].append(BitField(results[0], "IO", range(3, -1, -1), "Low", "High"))
returnDict['IterationCounters'].append((results[1] >> 5))
returnDict['Backlogs'].append(results[1] & 0xf)
channel0 = (results[2] >> 4) & 0xf
channel1 = (results[2] & 0xf)
channel2 = (results[5] >> 4) & 0xf
channel3 = (results[5] & 0xf)
channel0 = (channel0 << 8) + results[3]
returnDict['Channel0'].append(self.bitsToVolts(channel0Number, channel0Gain, channel0))
channel1 = (channel1 << 8) + results[4]
returnDict['Channel1'].append(self.bitsToVolts(channel1Number, channel1Gain, channel1))
channel2 = (channel2 << 8) + results[6]
returnDict['Channel2'].append(self.bitsToVolts(channel2Number, channel2Gain, channel2))
channel3 = (channel3 << 8) + results[7]
returnDict['Channel3'].append(self.bitsToVolts(channel3Number, channel3Gain, channel3))
return returnDict
def rawAIContinuous(self, channel0PGAMUX = 8, channel1PGAMUX = 9, channel2PGAMUX = 10, channel3PGAMUX = 11, FeatureReports = False, CounterRead = False, UpdateIO = False, LEDState = True, IO3ToIO0States = 0, SampleInterval = 15000):
"""
Currently in development.
The function is mostly implemented, but is currently too slow to be
useful.
"""
command = [ 0 ] * 8
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
command[0] = int(channel0PGAMUX)
tempNum = command[0] & 7 # 7 = 0b111
channel0Number = tempNum if (command[0] & 0xf) > 7 else tempNum+8
channel0Gain = (command[0] >> 4) & 7 # 7 = 0b111
command[1] = int(channel1PGAMUX)
tempNum = command[1] & 7 # 7 = 0b111
channel1Number = tempNum if (command[1] & 0xf) > 7 else tempNum+8
channel1Gain = (command[1] >> 4) & 7 # 7 = 0b111
command[2] = int(channel2PGAMUX)
tempNum = command[2] & 7 # 7 = 0b111
channel2Number = tempNum if (command[2] & 0xf) > 7 else tempNum+8
channel2Gain = (command[2] >> 4) & 7 # 7 = 0b111
command[3] = int(channel3PGAMUX)
tempNum = command[3] & 7 # 7 = 0b111
channel3Number = tempNum if (command[3] & 0xf) > 7 else tempNum+8
channel3Gain = (command[3] >> 4) & 7 # 7 = 0b111
bf = BitField()
bf.bit7 = int(bool(FeatureReports))
bf.bit6 = int(bool(CounterRead))
bf.bit1 = int(bool(UpdateIO))
bf.bit0 = int(bool(LEDState))
command[4] = int(bf)
# Bits 7-4: 1001 (Start Continuous)
bf2 = BitField( rawByte = int(IO3ToIO0States) )
bf2.bit7 = 1
bf2.bit4 = 1
command[5] = int(bf2)
command[6] = ( SampleInterval >> 8)
command[7] = SampleInterval & 0xff
byte0bf = BitField()
returnDict = dict()
self.write(command)
while True:
results = self.read()
byte0bf.fromByte(results[0])
returnDict['Byte0'] = byte0bf
returnDict['IterationCounter'] = (results[1] >> 5)
returnDict['Backlog'] = results[1] & 0xf
yield returnDict
def rawPulseout(self, B1 = 10, C1 = 2, B2 = 10, C2 = 2, D7ToD0PulseSelection = 1, ClearFirst = False, NumberOfPulses = 5):
"""
Name: U12.rawPulseout( B1 = 10, C1 = 2, B2 = 10, C2 = 2,
D7ToD0PulseSelection = 1, ClearFirst = False,
NumberOfPulses = 5)
Args: B1, the B component of the first half cycle
C1, the C component of the first half cycle
B2, the B component of the second half cycle
C2, the C component of the second half cycle
D7ToD0PulseSelection, which D lines to pulse.
ClearFirst, True = Start Low.
NumberOfPulses, the number of pulses
Desc: This command creates pulses on any, or all, of D0-D7. The desired
D lines must be set to output with some other function. See
Section 5.7 of the User's Guide.
By default, pulses D0 5 times at 400us high, then 400 us low.
Returns: None
Example:
Have a jumper wire connected from D0 to CNT.
>>> import u12
>>> d = u12.U12()
>>> d.rawDIO(D7toD0Directions = 0, UpdateDigital = True)
>>> d.rawCounter(ResetCounter = True)
>>> d.rawPulseout(ClearFirst = True)
>>> print d.rawCounter()
{ 'IO3toIO0States': ... ,
'Counter': 5,
'D7toD0States': ... ,
'D15toD8States': ...
}
"""
command = [ 0 ] * 8
command[0] = B1
command[1] = C1
command[2] = B2
command[3] = C2
command[4] = int(D7ToD0PulseSelection)
# 01100100 (Pulseout)
bf = BitField()
bf.bit6 = 1
bf.bit5 = 1
bf.bit2 = 1
command[5] = int(bf)
bf2 = BitField( rawByte = ( NumberOfPulses >> 8 ) )
bf2.bit7 = int(bool(ClearFirst))
command[6] = int(bf2)
command[7] = NumberOfPulses & 0xff
self.write(command)
results = self.read()
if command[5] != results[5]:
raise U12Exception("Expected Pulseout response, got %s instead." % results[5])
if results[4] != 0:
errors = BitField(rawByte = command[4], labelPrefix = "D", zeroLabel = "Ok", oneLabel = "Error")
raise U12Exception("D7-D0 Direction error detected: %s" % errors)
return None
def rawReset(self):
"""
Name: U12.rawReset()
Desc: Sits in an infinite loop until micro watchdog timeout after about
2 seconds. See Section 5.8 of the User's Guide.
Note: The function will close the device after it has written the
command.
Returns: None
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawReset()
"""
command = [ 0 ] * 8
# 0b01011111 ( Reset )
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
bf.bit3 = 1
bf.bit2 = 1
bf.bit1 = 1
bf.bit0 = 1
command[5] = int(bf)
self.write(command)
self.close()
def rawReenumerate(self):
"""
Name: U12.rawReenumerate()
Desc: Detaches from the USB, reloads config parameters, and then
reattaches so the device can be re-enumerated. See Section 5.9 of
the User's Guide.
Note: The function will close the device after it has written the
command.
Returns: None
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawReenumerate()
"""
command = [ 0 ] * 8
# 0b01000000 (Re-Enumerate)
bf = BitField()
bf.bit6 = 1
command[5] = int(bf)
self.write(command)
self.close()
def rawWatchdog(self, IgnoreCommands = False, D0Active = False, D0State = False, D1Active = False, D1State = False, D8Active = False, D8State = False, ResetOnTimeout = False, WatchdogActive = False, Timeout = 60):
"""
Name: U12.rawWatchdog( IgnoreCommands = False, D0Active = False,
D0State = False, D1Active = False,
D1State = False, D8Active = False,
D8State = False, ResetOnTimeout = False,
WatchdogActive = False, Timeout = 60)
Desc: Sets the settings for the watchdog, or just reads the firmware
version of the U12. See section 5.10 of the User's Guide.
By defaults, just reads the firmware version.
Returns: A dictionary with the following keys:
FirmwareVersion, the firmware version of the U12.
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawWatchdog()
{'FirmwareVersion': '1.10'}
"""
command = [ 0 ] * 8
command[0] = int(bool(IgnoreCommands))
bf = BitField()
bf.bit7 = int(D0Active)
bf.bit6 = int(D0State)
bf.bit5 = int(D1Active)
bf.bit4 = int(D1State)
bf.bit3 = int(D8Active)
bf.bit2 = int(D8State)
bf.bit1 = int(ResetOnTimeout)
bf.bit0 = int(WatchdogActive)
command[4] = int(bf)
# 01X1X011 (Watchdog)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit4 = 1
bf2.bit1 = 1
bf2.bit0 = 1
command[5] = int(bf2)
# Timeout is increments of 2^16 cycles.
# 2^16 cycles is about 0.01 seconds.
binTimeout = int((float(Timeout) / 0.01))
command[6] = ( binTimeout >> 8 ) & 0xff
command[7] = binTimeout & 0xff
self.write(command)
results = self.read()
returnDict = dict()
returnDict['FirmwareVersion'] = "%s.%.2d" % (results[0], results[1])
return returnDict
def rawReadRAM(self, Address = 0):
"""
Name: U12.rawReadRAM(Address = 0)
Args: Address, the starting address to read from
Desc: Reads 4 bytes out of the U12's internal memory. See section 5.11
of the User's Guide.
By default, reads the bytes that make up the serial number.
Returns: A dictionary with the following keys:
DataByte0, the data byte at Address - 0
DataByte1, the data byte at Address - 1
DataByte2, the data byte at Address - 2
DataByte3, the data byte at Address - 3
Example:
>>> import u12, struct
>>> d = u12.U12()
>>> r = d.rawReadRAM()
>>> print r
{'DataByte3': 5, 'DataByte2': 246, 'DataByte1': 139, 'DataByte0': 170}
>>> bytes = [ r['DataByte3'], r['DataByte2'], r['DataByte1'], r['DataByte0'] ]
>>> print struct.unpack(">I", struct.pack("BBBB", *bytes))[0]
100043690
"""
command = [ 0 ] * 8
# 01010000 (Read RAM)
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
command[5] = int(bf)
command[6] = (Address >> 8) & 0xff
command[7] = Address & 0xff
self.write(command)
results = self.read()
if results[0] != int(bf):
raise U12Exception("Expected ReadRAM response, got %s" % results[0])
if (results[6] != command[6]) or (results[7] != command[7]):
receivedAddress = (results[6] << 8) + results[7]
raise U12Exception("Wanted address %s got address %s" % (Address, receivedAddress))
returnDict = dict()
returnDict['DataByte3'] = results[1]
returnDict['DataByte2'] = results[2]
returnDict['DataByte1'] = results[3]
returnDict['DataByte0'] = results[4]
return returnDict
def rawWriteRAM(self, Data, Address):
"""
Name: U12.rawWriteRAM(Data, Address)
Args: Data, a list of 4 bytes to write to memory.
Address, the starting address to write to.
Desc: Writes 4 bytes to the U12's internal memory. See section 5.13 of
the User's Guide.
No default behavior, you must pass Data and Address.
Returns: A dictionary with the following keys:
DataByte0, the data byte at Address - 0
DataByte1, the data byte at Address - 1
DataByte2, the data byte at Address - 2
DataByte3, the data byte at Address - 3
Example:
>>> import u12
>>> d = u12.U12()
>>> print d.rawWriteRAM([1, 2, 3, 4], 0x200)
{'DataByte3': 4, 'DataByte2': 3, 'DataByte1': 2, 'DataByte0': 1}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
Data.reverse()
command[:len(Data)] = Data
# 01010001 (Write RAM)
bf = BitField()
bf.bit6 = 1
bf.bit4 = 1
bf.bit0 = 1
command[5] = int(bf)
command[6] = (Address >> 8) & 0xff
command[7] = Address & 0xff
self.write(command)
results = self.read()
if results[0] != int(bf):
raise U12Exception("Expected ReadRAM response, got %s" % results[0])
if (results[6] != command[6]) or (results[7] != command[7]):
receivedAddress = (results[6] << 8) + results[7]
raise U12Exception("Wanted address %s got address %s" % (Address, receivedAddress))
returnDict = dict()
returnDict['DataByte3'] = results[1]
returnDict['DataByte2'] = results[2]
returnDict['DataByte1'] = results[3]
returnDict['DataByte0'] = results[4]
return returnDict
def rawAsynch(self, Data, AddDelay = False, TimeoutActive = False, SetTransmitEnable = False, PortB = False, NumberOfBytesToWrite = 0, NumberOfBytesToRead = 0):
"""
Name: U12.rawAsynch(Data, AddDelay = False, TimeoutActive = False,
SetTransmitEnable = False, PortB = False,
NumberOfBytesToWrite = 0, NumberOfBytesToRead = 0)
Args: Data, A list of bytes to write.
AddDelay, True to add a 1 bit delay between each transmit byte.
TimeoutActive, True to enable timeout for the receive phase.
SetTransmitEnable, True to set Transmit Enable to high during
transmit and low during receive.
PortB, True to use PortB instead of PortA.
NumberOfBytesToWrite, Number of bytes to write.
NumberOfBytesToRead, Number of bytes to read.
Desc: Requires firmware V1.1 or higher. This function writes and then
reads half-duplex asynchronous data on 1 of two pairs of D lines.
See section 5.13 of the User's Guide.
Returns: A dictionary with the following keys,
DataByte0-3, the first four data bytes read over the RX line
ErrorFlags, a BitField representing the error flags.
Example:
>>> import u12
>>> d = u12.U12()
>>> # Set the full and half A,B,C to 9600
>>> d.rawWriteRAM([0, 1, 1, 200], 0x073)
>>> d.rawWriteRAM([5, 1, 2, 48], 0x076)
>>> print d.rawAsynch([1, 2, 3, 4], NumberOfBytesToWrite = 4, NumberOfBytesToRead = 4)
{
'DataByte3': 4,
'DataByte2': 3,
'DataByte1': 2,
'DataByte0': 1,
'ErrorFlags': <BitField object: [ Timeout Error Flag = 0 (0), ... ] >
}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
NumberOfBytesToWrite = NumberOfBytesToRead & 0xff
NumberOfBytesToRead = NumberOfBytesToRead & 0xff
if NumberOfBytesToWrite > 18:
raise U12Exception("Can only write 18 or fewer bytes at a time.")
if NumberOfBytesToRead > 18:
raise U12Exception("Can only read 18 or fewer bytes at a time.")
Data.reverse()
command[:len(Data)] = Data
bf = BitField()
bf.bit3 = int(bool(AddDelay))
bf.bit2 = int(bool(TimeoutActive))
bf.bit1 = int(bool(SetTransmitEnable))
bf.bit0 = int(bool(PortB))
command[4] = int(bf)
#01100001 (Asynch)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit0 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWrite
command[7] = NumberOfBytesToRead
self.write(command)
results = self.read()
if command[5] != results[5]:
raise U12Exception("Expected Asynch response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["Timeout Error Flag", "STRT Error Flag", "FRM Error Flag", "RXTris Error Flag", "TETris Error Flag", "TXTris Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
SPIModes = ['A', 'B', 'C', 'D']
def rawSPI(self, Data, AddMsDelay = False, AddHundredUsDelay = False, SPIMode = 'A', NumberOfBytesToWriteRead = 0, ControlCS = False, StateOfActiveCS = False, CSLineNumber = 0):
"""
Name: U12.rawSPI( Data, AddMsDelay = False, AddHundredUsDelay = False,
SPIMode = 'A', NumberOfBytesToWriteRead = 0,
ControlCS = False, StateOfActiveCS = False,
CSLineNumber = 0)
Args: Data, A list of four bytes to write using SPI
AddMsDelay, If True, a 1 ms delay is added between each bit
AddHundredUsDelay, if True, 100us delay is added
SPIMode, 'A', 'B', 'C', or 'D'
NumberOfBytesToWriteRead, number of bytes to write and read.
ControlCS, D0-D7 is automatically controlled as CS. The state and
direction of CS is only tested if control is enabled.
StateOfActiveCS, Active state for CS line.
CSLineNumber, D line to use as CS if enabled (0-7).
Desc: This function performs SPI communication. See Section 5.14 of the
User's Guide.
Returns: A dictionary with the following keys,
DataByte0-3, the first four data bytes read
ErrorFlags, a BitField representing the error flags.
Example:
>>> import u12
>>> d = u12.U12()
>>> d.rawSPI([1,2,3,4], NumberOfBytesToWriteRead = 4)
{
'DataByte3': 4,
'DataByte2': 3,
'DataByte1': 2,
'DataByte0': 1,
'ErrorFlags':
<BitField object: [ CSStateTris Error Flag = 0 (0), ... ] >
}
"""
command = [ 0 ] * 8
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
NumberOfBytesToWriteRead = NumberOfBytesToWriteRead & 0xff
if NumberOfBytesToWriteRead == 0:
NumberOfBytesToWriteRead = len(Data)
if NumberOfBytesToWriteRead > 18 or NumberOfBytesToWriteRead < 1:
raise U12Exception("Can only read/write 1 to 18 bytes at a time.")
Data.reverse()
command[:len(Data)] = Data
bf = BitField()
bf.bit7 = int(bool(AddMsDelay))
bf.bit6 = int(bool(AddHundredUsDelay))
modeIndex = self.SPIModes.index(SPIMode)
bf[7-modeIndex] = 1
command[4] = int(bf)
# 01100010 (SPI)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit1 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWriteRead
bf3 = BitField(rawByte = CSLineNumber)
bf3.bit7 = int(bool(ControlCS))
bf3.bit6 = int(bool(StateOfActiveCS))
command[7] = int(bf3)
self.write(command)
results = self.read()
if results[5] != command[5]:
raise U12Exception("Expected SPI response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["CSStateTris Error Flag", "SCKTris Error Flag", "MISOTris Error Flag", "MOSITris Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
def rawSHT1X(self, Data = [3,0,0,0], WaitForMeasurementReady = True, IssueSerialReset = False, Add1MsDelay = False, Add300UsDelay = False, IO3State = 1, IO2State = 1, IO3Direction = 1, IO2Direction = 1, NumberOfBytesToWrite = 1, NumberOfBytesToRead = 3):
"""
Name: U12.rawSHT1X( Data = [3, 0, 0, 0],
WaitForMeasurementReady = True,
IssueSerialReset = False, Add1MsDelay = False,
Add300UsDelay = False, IO3State = 1, IO2State = 1,
IO3Direction = 1, IO2Direction = 1,
NumberOfBytesToWrite = 1, NumberOfBytesToRead = 3)
Args: Data, a list of bytes to write to the SHT.
WaitForMeasurementReady, Wait for the measurement ready signal.
IssueSerialReset, perform a serial reset
Add1MsDelay, adds 1ms delay
Add300UsDelay, adds a 300us delay
IO3State, sets the state of IO3
IO2State, sets the state of IO2
IO3Direction, sets the direction of IO3 ( 1 = Output )
IO2Direction, sets the direction of IO3 ( 1 = Output )
NumberOfBytesToWrite, how many bytes to write
NumberOfBytesToRead, how may bytes to read back
Desc: Sends and receives data from a SHT1X T/RH sensor from Sensirion.
See Section 5.15 of the User's Guide.
By default, reads the temperature from the SHT.
Returns: A dictionary with the following keys,
DataByte0-3, the four data bytes read
ErrorFlags, a BitField representing the error flags.
Example:
Uses an EI-1050 Temp/Humidity probe wired as follows:
Data ( Green ) -> IO0
Clock ( White ) -> IO1
Ground ( Black ) -> GND
Power ( Red ) -> +5V
Enable ( Brown ) -> IO2
>>> import u12
>>> d = u12.U12()
>>> results = d.rawSHT1X()
>>> print results
{
'DataByte3': 0,
'DataByte2': 69,
'DataByte1': 48,
'DataByte0': 25,
'ErrorFlags':
<BitField object: [ Serial Reset Error Flag = 0 (0), ... ] >
}
>>> tempC = (results['DataByte0'] * 256 ) + results['DataByte1']
>>> tempC = (tempC * 0.01) - 40
>>> print tempC
24.48
>>> results = d.rawSHT1X(Data = [5,0,0,0])
>>> print results
{
'DataByte3': 0,
'DataByte2': 200,
'DataByte1': 90,
'DataByte0': 2,
'ErrorFlags':
<BitField object: [ Serial Reset Error Flag = 0 (0), ... ] >
}
>>> sorh = (results['DataByte0'] * 256 ) + results['DataByte1']
>>> rhlinear = (-0.0000028*sorh*sorh)+(0.0405*sorh)-4.0
>>> rh = ((tempC-25.0)*(0.01+(0.00008*sorh)))+rhlinear
>>> print rh
19.3360256
"""
command = [ 0 ] * 8
if NumberOfBytesToWrite != 0:
if not isinstance(Data, list) or len(Data) > 4:
raise U12Exception("Data wasn't a list, or was too long.")
Data.reverse()
command[:len(Data)] = Data
if max(NumberOfBytesToWrite, NumberOfBytesToRead) > 4:
raise U12Exception("Can only read/write up to 4 bytes at a time.")
bf = BitField()
bf.bit7 = int(bool(WaitForMeasurementReady))
bf.bit6 = int(bool(IssueSerialReset))
bf.bit5 = int(bool(Add1MsDelay))
bf.bit4 = int(bool(Add300UsDelay))
bf.bit3 = int(bool(IO3State))
bf.bit2 = int(bool(IO2State))
bf.bit1 = int(bool(IO3Direction))
bf.bit0 = int(bool(IO2Direction))
command[4] = int(bf)
# 01101000 (SHT1X)
bf2 = BitField()
bf2.bit6 = 1
bf2.bit5 = 1
bf2.bit3 = 1
command[5] = int(bf2)
command[6] = NumberOfBytesToWrite
command[7] = NumberOfBytesToRead
self.write(command)
results = self.read()
if results[5] != command[5]:
raise U12Exception("Expected SHT1x response, got %s instead." % results[5])
returnDict = dict()
returnDict['DataByte3'] = results[0]
returnDict['DataByte2'] = results[1]
returnDict['DataByte1'] = results[2]
returnDict['DataByte0'] = results[3]
bfLabels = ["Serial Reset Error Flag", "Measurement Ready Error Flag", "Ack Error Flag"]
bf = BitField( rawByte = results[4], labelPrefix = "", labelList = bfLabels )
returnDict["ErrorFlags"] = bf
return returnDict
def eAnalogIn(self, channel, idNum = None, demo=0, gain=0):
"""
Name: U12.eAnalogIn(channel, idNum = None, demo=0, gain=0)
Args: See section 4.1 of the User's Guide
Desc: This is a simplified version of AISample. Reads the voltage from 1 analog input
>>> import u12
>>> d = u12.U12()
>>> d.eAnalogIn(0)
{'overVoltage': 0, 'idnum': 1, 'voltage': 1.435546875}
"""
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ad0 = ctypes.c_long(999)
ad1 = ctypes.c_float(999)
ecode = staticLib.EAnalogIn(ctypes.byref(ljid), demo, channel, gain, ctypes.byref(ad0), ctypes.byref(ad1))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "overVoltage":ad0.value, "voltage":ad1.value}
else:
# Bits 6-4: PGA for 1st Channel
# Bits 3-0: MUX command for 1st Channel
channel0PGAMUX = ( ( gain & 7 ) << 4)
channel0PGAMUX += channel-8 if channel > 7 else channel+8
results = self.rawAISample(channel0PGAMUX = channel0PGAMUX)
return {"idnum" : self.id, "overVoltage" : int(results['PGAOvervoltage']), 'voltage' : results['Channel0']}
def eAnalogOut(self, analogOut0, analogOut1, idNum = None, demo=0):
"""
Name: U12.eAnalogOut(analogOut0, analogOut1, idNum = None, demo=0)
Args: See section 4.2 of the User's Guide
Desc: This is a simplified version of AOUpdate. Sets the voltage of both analog outputs.
>>> import u12
>>> d = u12.U12()
>>> d.eAnalogOut(2, 2)
{'idnum': 1}
"""
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ecode = staticLib.EAnalogOut(ctypes.byref(ljid), demo, ctypes.c_float(analogOut0), ctypes.c_float(analogOut1))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value}
else:
if analogOut0 < 0:
analogOut0 = self.pwmAVoltage
if analogOut1 < 0:
analogOut1 = self.pwmBVoltage
self.rawCounterPWMDIO(PWMA = analogOut0, PWMB = analogOut1)
self.pwmAVoltage = analogOut0
self.pwmBVoltage = analogOut1
return {"idnum": self.id}
def eCount(self, idNum = None, demo = 0, resetCounter = 0):
"""
Name: U12.eCount(idNum = None, demo = 0, resetCounter = 0)
Args: See section 4.3 of the User's Guide
Desc: This is a simplified version of Counter. Reads & resets the counter (CNT).
>>> import u12
>>> d = u12.U12()
>>> d.eCount()
{'count': 1383596032.0, 'ms': 251487257.0}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
count = ctypes.c_double()
ms = ctypes.c_double()
ecode = staticLib.ECount(ctypes.byref(ljid), demo, resetCounter, ctypes.byref(count), ctypes.byref(ms))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "count":count.value, "ms":ms.value}
else:
results = self.rawCounter( ResetCounter = resetCounter)
return {"idnum":self.id, "count":results['Counter'], "ms": (time() * 1000)}
def eDigitalIn(self, channel, idNum = None, demo = 0, readD=0):
"""
Name: U12.eDigitalIn(channel, idNum = None, demo = 0, readD=0)
Args: See section 4.4 of the User's Guide
Desc: This is a simplified version of DigitalIO that reads the state of
one digital input. Also configures the requested pin to input and
leaves it that way.
>>> import u12
>>> d = u12.U12()
>>> d.eDigitalIn(0)
{'state': 0, 'idnum': 1}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
state = ctypes.c_long(999)
ecode = staticLib.EDigitalIn(ctypes.byref(ljid), demo, channel, readD, ctypes.byref(state))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value, "state":state.value}
else:
oldstate = self.rawDIO()
if readD:
if channel > 7:
channel = channel-7
direction = BitField(rawByte = oldstate['D15toD8Directions'])
direction[7-channel] = 1
results = self.rawDIO(D15toD8Directions = direction, UpdateDigital = True)
state = results["D15toD8States"][7-channel]
else:
direction = BitField(rawByte = oldstate['D7toD0Directions'])
direction[7-channel] = 1
results = self.rawDIO(D7ToD0Directions = direction, UpdateDigital = True)
state = results["D15toD8States"][7-channel]
else:
results = self.rawDIO(IO3toIO0DirectionsAndStates = 255, UpdateDigital = True)
state = results["IO3toIO0States"][3-channel]
return {"idnum" : self.id, "state" : state}
def eDigitalOut(self, channel, state, idNum = None, demo = 0, writeD=0):
"""
Name: U12.eDigitalOut(channel, state, idNum = None, demo = 0, writeD=0)
Args: See section 4.5 of the User's Guide
Desc: This is a simplified version of DigitalIO that sets/clears the
state of one digital output. Also configures the requested pin to
output and leaves it that way.
>>> import u12
>>> d = u12.U12()
>>> d.eDigitalOut(0, 1)
{idnum': 1}
"""
# Check id num
if idNum is None:
idNum = self.id
if ON_WINDOWS:
ljid = ctypes.c_long(idNum)
ecode = staticLib.EDigitalOut(ctypes.byref(ljid), demo, channel, writeD, state)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":ljid.value}
else:
oldstate = self.rawDIO()
if writeD:
if channel > 7:
channel = channel-7
direction = BitField(rawByte = int(oldstate['D15toD8Directions']))
direction[7-channel] = 0
states = BitField(rawByte = int(oldstate['D15toD8States']))
states[7-channel] = state
self.rawDIO(D15toD8Directions = direction, D15toD8States = state, UpdateDigital = True)
else:
direction = BitField(rawByte = int(oldstate['D7toD0Directions']))
direction[7-channel] = 0
states = BitField(rawByte = int(oldstate['D7toD0States']))
states[7-channel] = state
self.rawDIO(D7toD0Directions = direction, D7toD0States = states, UpdateDigital = True)
else:
bf = BitField()
bf[7-(channel+4)] = 0
bf[7-channel] = state
self.rawDIO(IO3toIO0DirectionsAndStates = bf, UpdateDigital = True)
return {"idnum" : self.id}
def aiSample(self, numChannels, channels, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0):
"""
Name: U12.aiSample(channels, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0)
Args: See section 4.6 of the User's Guide
Desc: Reads the voltages from 1,2, or 4 analog inputs. Also controls/reads the 4 IO ports.
>>> dev = U12()
>>> dev.aiSample(2, [0, 1])
{'stateIO': [0, 0, 0, 0], 'overVoltage': 0, 'idnum': 1, 'voltages': [1.4208984375, 1.4306640625]}
"""
# Check id num
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check to make sure that everything is checked
if not isIterable(channels): raise TypeError("channels must be iterable")
if not isIterable(gains): raise TypeError("gains must be iterable")
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
overVoltage = ctypes.c_long(999)
longArrayType = (ctypes.c_long * 4)
floatArrayType = (ctypes.c_float * 4)
voltages = floatArrayType(0, 0, 0, 0)
stateIOin = ctypes.c_long(stateIOin)
ecode = staticLib.AISample(ctypes.byref(idNum), demo, ctypes.byref(stateIOin), updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), disableCal, ctypes.byref(overVoltage), ctypes.byref(voltages))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "stateIO":stateIOin.value, "overVoltage":overVoltage.value, "voltages":voltages[0:numChannels]}
def aiBurst(self, numChannels, channels, scanRate, numScans, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, triggerIO=0, triggerState=0, timeout=1, transferMode=0):
"""
Name: U12.aiBurst(numChannels, channels, scanRate, numScans, idNum=None, demo=0, stateIOin=[0, 0, 0, 0], updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, triggerIO=0, triggerState=0, timeout=1, transferMode=0)
Args: See section 4.7 of the User's Guide
Desc: Reads a specified number of scans (up to 4096) at a specified scan rate (up to 8192 Hz) from 1,2, or 4 analog inputs
>>> dev = U12()
>>> dev.aiBurst(1, [0], 400, 10)
{'overVoltage': 0, 'scanRate': 400.0, 'stateIOout': <u12.c_long_Array_4096 object at 0x00DB4BC0>, 'idnum': 1, 'voltages': <u12.c_float_Array_4096_Array_4 object at 0x00DB4B70>}
"""
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# check list sizes
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
scanRate = ctypes.c_float(scanRate)
pointerArray = (ctypes.c_void_p * 4)
arr4096_type = ctypes.c_float * 4096
voltages_type = arr4096_type * 4
voltages = voltages_type()
stateIOout = (ctypes.c_long * 4096)()
overVoltage = ctypes.c_long(999)
ecode = staticLib.AIBurst(ctypes.byref(idNum), demo, stateIOin, updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), ctypes.byref(scanRate), disableCal, triggerIO, triggerState, numScans, timeout, ctypes.byref(voltages), ctypes.byref(stateIOout), ctypes.byref(overVoltage), transferMode)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "scanRate":scanRate.value, "voltages":voltages, "stateIOout":stateIOout, "overVoltage":overVoltage.value}
def aiStreamStart(self, numChannels, channels, scanRate, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, readCount=0):
"""
Name: U12.aiStreamStart(numChannels, channels, scanRate, idNum=None, demo=0, stateIOin=0, updateIO=0, ledOn=0, gains=[0, 0, 0, 0], disableCal=0, readCount=0)
Args: See section 4.8 of the User's Guide
Desc: Starts a hardware timed continuous acquisition
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
{'scanRate': 200.0, 'idnum': 1}
"""
# Configure return type
staticLib.AIStreamStart.restype = ctypes.c_long
# check list sizes
if len(channels) < numChannels: raise ValueError("channels must have atleast numChannels elements")
if len(gains) < numChannels: raise ValueError("gains must have atleast numChannels elements")
#if len(stateIOin) < 4: raise ValueError("stateIOin must have atleast 4 elements")
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Convert lists to arrays and create other ctypes
channelsArray = listToCArray(channels, ctypes.c_long)
gainsArray = listToCArray(gains, ctypes.c_long)
scanRate = ctypes.c_float(scanRate)
ecode = staticLib.AIStreamStart(ctypes.byref(idNum), demo, stateIOin, updateIO, ledOn, numChannels, ctypes.byref(channelsArray), ctypes.byref(gainsArray), ctypes.byref(scanRate), disableCal, 0, readCount)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
# The ID number must be saved for AIStream
self.id = idNum.value
self.streaming = True
return {"idnum":idNum.value, "scanRate":scanRate.value}
def aiStreamRead(self, numScans, localID=None, timeout=1):
"""
Name: U12.aiStreamRead(numScans, localID=None, timeout=1)
Args: See section 4.9 of the User's Guide
Desc: Waits for a specified number of scans to be available and reads them.
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
>>> dev.aiStreamRead(10)
{'overVoltage': 0, 'ljScanBacklog': 0, 'stateIOout': <u12.c_long_Array_4096 object at 0x00DF4AD0>, 'reserved': 0, 'voltages': <u12.c_float_Array_4096_Array_4 object at 0x00DF4B20>}
"""
# Check to make sure that we are streaming
if not self.streaming:
raise U12Exception(-1, "Streaming has not started")
# Check id number
if localID is None:
localID = self.id
# Create arrays and other ctypes
arr4096_type = ctypes.c_float * 4096
voltages_type = arr4096_type * 4
voltages = voltages_type()
stateIOout = (ctypes.c_long * 4096)()
reserved = ctypes.c_long(0)
ljScanBacklog = ctypes.c_long(99999)
overVoltage = ctypes.c_long(999)
ecode = staticLib.AIStreamRead(localID, numScans, timeout, ctypes.byref(voltages), ctypes.byref(stateIOout), ctypes.byref(reserved), ctypes.byref(ljScanBacklog), ctypes.byref(overVoltage))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"voltages":voltages, "stateIOout":stateIOout, "reserved":reserved.value, "ljScanBacklog":ljScanBacklog.value, "overVoltage":overVoltage.value}
def aiStreamClear(self, localID=None):
"""
Name: U12.aiClear()
Args: See section 4.10 of the User's Guide
Desc: This function stops the continuous acquisition. It should be called once when finished with the stream.
>>> dev = U12()
>>> dev.aiStreamStart(1, [0], 200)
>>> dev.aiStreamRead(10)
>>> dev.aiStreamClear()
"""
# Check to make sure that we are streaming
if not self.streaming:
raise U12Exception(-1, "Streaming has not started")
# Check id number
if localID is None:
localID = self.id
ecode = staticLib.AIStreamClear(localID)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
def aoUpdate(self, idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0, resetCounter=0, analogOut0=0, analogOut1=0):
"""
Name: U12.aoUpdate()
Args: See section 4.11 of the User's Guide
Desc: Sets the voltages of the analog outputs. Also controls/reads all 20 digital I/O and the counter.
>>> dev = U12()
>>> dev.aoUpdate()
>>> {'count': 2, 'stateIO': 3, 'idnum': 1, 'stateD': 0}
"""
# Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check tris and state arguments
if updateDigital > 0:
if trisD is None: raise ValueError("keyword argument trisD must be set")
if trisIO is None: raise ValueError("keyword argument trisIO must be set")
if stateD is None: raise ValueError("keyword argument stateD must be set")
if stateIO is None: raise ValueError("keyword argument stateIO must be set")
# Create ctypes
if stateD is None: stateD = ctypes.c_long(0)
else: stateD = ctypes.c_long(stateD)
if stateIO is None: stateIO = ctypes.c_long(0)
else: stateIO = ctypes.c_long(stateIO)
count = ctypes.c_ushort(999)
# Create arrays and other ctypes
ecode = staticLib.AOUpdate(ctypes.byref(idNum), demo, trisD, trisIO, ctypes.byref(stateD), ctypes.byref(stateIO), updateDigital, resetCounter, ctypes.byref(count), ctypes.c_float(analogOut0), ctypes.c_float(analogOut1))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idnum":idNum.value, "stateD":stateD.value, "stateIO":stateIO.value, "count":count.value}
def asynchConfig(self, fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0):
"""
Name: U12.asynchConfig(fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0)
Args: See section 4.12 of the User's Guide
Desc: Requires firmware V1.1 or higher. This function writes to the asynch registers and sets the direction of the D lines (input/output) as needed.
>>> dev = U12()
>>> dev.asynchConfig(96,1,1,22,2,1)
>>> {'idNum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.AsynchConfig(ctypes.byref(idNum), demo, timeoutMult, configA, configB, configTE, fullA, fullB, fullC, halfA, halfB, halfC)
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idNum":idNum.value}
def asynch(self, baudrate, data, idNum=None, demo=0, portB=0, enableTE=0, enableTO=0, enableDel=0, numWrite=0, numRead=0):
"""
Name: U12.asynchConfig(fullA, fullB, fullC, halfA, halfB, halfC, idNum=None, demo=None, timeoutMult=1, configA=0, configB=0, configTE=0)
Args: See section 4.13 of the User's Guide
Desc: Requires firmware V1.1 or higher. This function writes to the asynch registers and sets the direction of the D lines (input/output) as needed.
>>> dev = U12()
>>> dev.asynch(96,1,1,22,2,1)
>>> dev.asynch(19200, [0, 0])
>>> {'data': <u12.c_long_Array_18 object at 0x00DEFB70>, 'idnum': <type 'long'>}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check size of data
if len(data) > 18: raise ValueError("data can not be larger than 18 elements")
# Make data 18 elements large
dataArray = [0] * 18
for i in range(0, len(data)):
dataArray[i] = data[i]
print dataArray
dataArray = listToCArray(dataArray, ctypes.c_long)
ecode = staticLib.Asynch(ctypes.byref(idNum), demo, portB, enableTE, enableTO, enableDel, baudrate, numWrite, numRead, ctypes.byref(dataArray))
if ecode != 0: raise U12Exception(ecode) # TODO: Switch this out for exception
return {"idnum":long, "data":dataArray}
GainMapping = [ 1.0, 2.0, 4.0, 5.0, 8.0, 10.0, 16.0, 20.0 ]
def bitsToVolts(self, chnum, chgain, bits):
"""
Name: U12.bitsToVolts(chnum, chgain, bits)
Args: See section 4.14 of the User's Guide
Desc: Converts a 12-bit (0-4095) binary value into a LabJack voltage. No hardware communication is involved.
>>> dev = U12()
>>> dev.bitsToVolts(0, 0, 2662)
>>> {'volts': 2.998046875}
"""
if ON_WINDOWS:
volts = ctypes.c_float()
ecode = staticLib.BitsToVolts(chnum, chgain, bits, ctypes.byref(volts))
if ecode != 0: print ecode
return volts.value
else:
if chnum < 8:
return ( float(bits) * 20.0 / 4096.0 ) - 10.0
else:
volts = ( float(bits) * 40.0 / 4096.0 ) - 20.0
return volts / self.GainMapping[chgain]
def voltsToBits(self, chnum, chgain, volts):
"""
Name: U12.voltsToBits(chnum, chgain, bits)
Args: See section 4.15 of the User's Guide
Desc: Converts a voltage to it's 12-bit (0-4095) binary representation. No hardware communication is involved.
>>> dev = U12()
>>> dev.voltsToBits(0, 0, 3)
>>> {'bits': 2662}
"""
if ON_WINDOWS:
bits = ctypes.c_long(999)
ecode = staticLib.VoltsToBits(chnum, chgain, ctypes.c_float(volts), ctypes.byref(bits))
if ecode != 0: raise U12Exception(ecode)
return bits.value
else:
pass
#*bits = RoundFL((volts+10.0F)/(20.0F/4096.0F));
def counter(self, idNum=None, demo=0, resetCounter=0, enableSTB=1):
"""
Name: U12.counter(idNum=None, demo=0, resetCounter=0, enableSTB=1)
Args: See section 4.15 of the User's Guide
Desc: Converts a voltage to it's 12-bit (0-4095) binary representation. No hardware communication is involved.
>>> dev = U12()
>>> dev.counter(0, 0, 3)
>>> {'bits': 2662}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Create ctypes
stateD = ctypes.c_long(999)
stateIO = ctypes.c_long(999)
count = ctypes.c_ulong(999)
print idNum
ecode = staticLib.Counter(ctypes.byref(idNum), demo, ctypes.byref(stateD), ctypes.byref(stateIO), resetCounter, enableSTB, ctypes.byref(count))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "stateD": stateD.value, "stateIO":stateIO.value, "count":count.value}
def digitalIO(self, idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0):
"""
Name: U12.digitalIO(idNum=None, demo=0, trisD=None, trisIO=None, stateD=None, stateIO=None, updateDigital=0)
Args: See section 4.17 of the User's Guide
Desc: Reads and writes to all 20 digital I/O.
>>> dev = U12()
>>> dev.digitalIO()
>>> {'stateIO': 0, 'stateD': 0, 'idnum': 1, 'outputD': 0, 'trisD': 0}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check tris and state parameters
if updateDigital > 0:
if trisD is None: raise ValueError("keyword argument trisD must be set")
if trisIO is None: raise ValueError("keyword argument trisIO must be set")
if stateD is None: raise ValueError("keyword argument stateD must be set")
if stateIO is None: raise ValueError("keyword argument stateIO must be set")
# Create ctypes
if trisD is None: trisD = ctypes.c_long(999)
else:trisD = ctypes.c_long(trisD)
if stateD is None:stateD = ctypes.c_long(999)
else: stateD = ctypes.c_long(stateD)
if stateIO is None: stateIO = ctypes.c_long(0)
else: stateIO = ctypes.c_long(stateIO)
outputD = ctypes.c_long(999)
# Check trisIO
if trisIO is None: trisIO = 0
ecode = staticLib.DigitalIO(ctypes.byref(idNum), demo, ctypes.byref(trisD), trisIO, ctypes.byref(stateD), ctypes.byref(stateIO), updateDigital, ctypes.byref(outputD))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "trisD":trisD.value, "stateD":stateD.value, "stateIO":stateIO.value, "outputD":outputD.value}
def getDriverVersion(self):
"""
Name: U12.getDriverVersion()
Args: See section 4.18 of the User's Guide
Desc: Returns the version number of ljackuw.dll. No hardware communication is involved.
>>> dev = U12()
>>> dev.getDriverVersion()
>>> 1.21000003815
"""
staticLib.GetDriverVersion.restype = ctypes.c_float
return staticLib.GetDriverVersion()
def getFirmwareVersion(self, idNum=None):
"""
Name: U12.getErrorString(idnum=None)
Args: See section 4.20 of the User's Guide
Desc: Retrieves the firmware version from the LabJack's processor
>>> dev = U12()
>>> dev.getFirmwareVersion()
>>> Unkown error
"""
# Check ID number
if idNum is None: idNum = self.id
idNum = ctypes.c_long(idNum)
staticLib.GetFirmwareVersion.restype = ctypes.c_float
firmware = staticLib.GetFirmwareVersion(ctypes.byref(idNum))
if firmware > 512: raise U12Exception(firmware-512)
return {"idnum" : idNum.value, "firmware" : firmware}
def getWinVersion(self):
"""
Name: U12.getErrorString()
Args: See section 4.21 of the User's Guide
Desc: Uses a Windows API function to get the OS version
>>> dev = U12()
>>> dev.getWinVersion()
>>> {'majorVersion': 5L, 'minorVersion': 1L, 'platformID': 2L, 'buildNumber': 2600L, 'servicePackMajor': 2L, 'servicePackMinor': 0L}
"""
# Create ctypes
majorVersion = ctypes.c_ulong()
minorVersion = ctypes.c_ulong()
buildNumber = ctypes.c_ulong()
platformID = ctypes.c_ulong()
servicePackMajor = ctypes.c_ulong()
servicePackMinor = ctypes.c_ulong()
ecode = staticLib.GetWinVersion(ctypes.byref(majorVersion), ctypes.byref(minorVersion), ctypes.byref(buildNumber), ctypes.byref(platformID), ctypes.byref(servicePackMajor), ctypes.byref(servicePackMinor))
if ecode != 0: raise U12Exception(ecode)
return {"majorVersion":majorVersion.value, "minorVersion":minorVersion.value, "buildNumber":buildNumber.value, "platformID":platformID.value, "servicePackMajor":servicePackMajor.value, "servicePackMinor":servicePackMinor.value}
def listAll(self):
"""
Name: U12.listAll()
Args: See section 4.22 of the User's Guide
Desc: Searches the USB for all LabJacks, and returns the serial number and local ID for each
>>> dev = U12()
>>> dev.listAll()
>>> {'serialnumList': <u12.c_long_Array_127 object at 0x00E2AD50>, 'numberFound': 1, 'localIDList': <u12.c_long_Array_127 object at 0x00E2ADA0>}
"""
# Create arrays and ctypes
productIDList = listToCArray([0]*127, ctypes.c_long)
serialnumList = listToCArray([0]*127, ctypes.c_long)
localIDList = listToCArray([0]*127, ctypes.c_long)
powerList = listToCArray([0]*127, ctypes.c_long)
arr127_type = ctypes.c_long * 127
calMatrix_type = arr127_type * 20
calMatrix = calMatrix_type()
reserved = ctypes.c_long()
numberFound = ctypes.c_long()
ecode = staticLib.ListAll(ctypes.byref(productIDList), ctypes.byref(serialnumList), ctypes.byref(localIDList), ctypes.byref(powerList), ctypes.byref(calMatrix), ctypes.byref(numberFound), ctypes.byref(reserved), ctypes.byref(reserved))
if ecode != 0: raise U12Exception(ecode)
return {"serialnumList": serialnumList, "localIDList":localIDList, "numberFound":numberFound.value}
def localID(self, localID, idNum=None):
"""
Name: U12.localID(localID, idNum=None)
Args: See section 4.23 of the User's Guide
Desc: Changes the local ID of a specified LabJack
>>> dev = U12()
>>> dev.localID(1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.LocalID(ctypes.byref(idNum), localID)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def noThread(self, noThread, idNum=None):
"""
Name: U12.localID(noThread, idNum=None)
Args: See section 4.24 of the User's Guide
Desc: This function is needed when interfacing TestPoint to the LabJack DLL on Windows 98/ME
>>> dev = U12()
>>> dev.noThread(1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.NoThread(ctypes.byref(idNum), noThread)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOut(self, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0):
"""
Name: U12.pulseOut(bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0)
Args: See section 4.25 of the User's Guide
Desc: This command creates pulses on any/all of D0-D7
>>> dev = U12()
>>> dev.pulseOut(0, 1, 1, 1, 1, 1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOut(ctypes.byref(idNum), demo, lowFirst, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutStart(self, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0):
"""
Name: U12.pulseOutStart(bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2, idNum=None, demo=0, lowFirst=0)
Args: See section 4.26 of the User's Guide
Desc: PulseOutStart and PulseOutFinish are used as an alternative to PulseOut (See PulseOut for more information)
>>> dev = U12()
>>> dev.pulseOutStart(0, 1, 1, 1, 1, 1)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOutStart(ctypes.byref(idNum), demo, lowFirst, bitSelect, numPulses, timeB1, timeC1, timeB2, timeC2)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutFinish(self, timeoutMS, idNum=None, demo=0):
"""
Name: U12.pulseOutFinish(timeoutMS, idNum=None, demo=0)
Args: See section 4.27 of the User's Guide
Desc: See PulseOutStart for more information
>>> dev = U12()
>>> dev.pulseOutStart(0, 1, 1, 1, 1, 1)
>>> dev.pulseOutFinish(100)
>>> {'idnum':1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.PulseOutFinish(ctypes.byref(idNum), demo, timeoutMS)
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def pulseOutCalc(self, frequency):
"""
Name: U12.pulseOutFinish(frequency)
Args: See section 4.28 of the User's Guide
Desc: This function can be used to calculate the cycle times for PulseOut or PulseOutStart.
>>> dev = U12()
>>> dev.pulseOutCalc(100)
>>> {'frequency': 100.07672882080078, 'timeB': 247, 'timeC': 1}
"""
# Create ctypes
frequency = ctypes.c_float(frequency)
timeB = ctypes.c_long(0)
timeC = ctypes.c_long(0)
ecode = staticLib.PulseOutCalc(ctypes.byref(frequency), ctypes.byref(timeB), ctypes.byref(timeC))
if ecode != 0: raise U12Exception(ecode)
return {"frequency":frequency.value, "timeB":timeB.value, "timeC":timeC.value}
def reEnum(self, idNum=None):
"""
Name: U12.reEnum(idNum=None)
Args: See section 4.29 of the User's Guide
Desc: Causes the LabJack to electrically detach from and re-attach to the USB so it will re-enumerate
>>> dev = U12()
>>> dev.reEnum()
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.ReEnum(ctypes.byref(idNum))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def reset(self, idNum=None):
"""
Name: U12.reset(idNum=None)
Args: See section 4.30 of the User's Guide
Desc: Causes the LabJack to reset after about 2 seconds
>>> dev = U12()
>>> dev.reset()
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
ecode = staticLib.Reset(ctypes.byref(idNum))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def resetLJ(self, idNum=None):
"""
Name: U12.resetLJ(idNum=None)
Args: See section 4.30 of the User's Guide
Desc: Causes the LabJack to reset after about 2 seconds
>>> dev = U12()
>>> dev.resetLJ()
>>> {'idnum': 1}
"""
return reset(idNum)
def sht1X(self, idNum=None, demo=0, softComm=0, mode=0, statusReg=0):
"""
Name: U12.sht1X(idNum=None, demo=0, softComm=0, mode=0, statusReg=0)
Args: See section 4.31 of the User's Guide
Desc: This function retrieves temperature and/or humidity readings from an SHT1X sensor.
>>> dev = U12()
>>> dev.sht1X()
>>> {'tempC': 24.69999885559082, 'rh': 39.724445343017578, 'idnum': 1, 'tempF': 76.459999084472656}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Create ctypes
tempC = ctypes.c_float(0)
tempF = ctypes.c_float(0)
rh = ctypes.c_float(0)
ecode = staticLib.SHT1X(ctypes.byref(idNum), demo, softComm, mode, statusReg, ctypes.byref(tempC), ctypes.byref(tempF), ctypes.byref(rh))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "tempC":tempC.value, "tempF":tempF.value, "rh":rh.value}
def shtComm(self, numWrite, numRead, datatx, idNum=None, softComm=0, waitMeas=0, serialReset=0, dataRate=0):
"""
Name: U12.shtComm(numWrite, numRead, datatx, idNum=None, softComm=0, waitMeas=0, serialReset=0, dataRate=0)
Args: See section 4.32 of the User's Guide
Desc: Low-level public function to send and receive up to 4 bytes to from an SHT1X sensor
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
# Check size of datatx
if len(datatx) != 4: raise ValueError("datatx must have exactly 4 elements")
# Create ctypes
datatx = listToCArray(datatx, ctypes.c_ubyte)
datarx = (ctypes.c_ubyte * 4)((0) * 4)
ecode = staticLib.SHTComm(ctypes.byref(idNum), softComm, waitMeas, serialReset, dataRate, numWrite, numRead, ctypes.byref(datatx), ctypes.byref(datarx))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "datarx":datarx}
def shtCRC(self, numWrite, numRead, datatx, datarx, statusReg=0):
"""
Name: U12.shtCRC(numWrite, numRead, datatx, datarx, statusReg=0)
Args: See section 4.33 of the User's Guide
Desc: Checks the CRC on an SHT1X communication
"""
# Create ctypes
datatx = listToCArray(datatx, ctypes.c_ubyte)
datarx = listToCArray(datarx, ctypes.c_ubyte)
return staticLib.SHTCRC(statusReg, numWrite, numRead, ctypes.byref(datatx), ctypes.byref(datarx))
def synch(self, mode, numWriteRead, data, idNum=None, demo=0, msDelay=0, husDelay=0, controlCS=0, csLine=None, csState=0, configD=0):
"""
Name: U12.synch(mode, numWriteRead, data, idNum=None, demo=0, msDelay=0, husDelay=0, controlCS=0, csLine=None, csState=0, configD=0)
Args: See section 4.35 of the User's Guide
Desc: This function retrieves temperature and/or humidity readings from an SHT1X sensor.
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
if controlCS > 0 and csLine is None: raise ValueError("csLine must be specified")
# Make sure data is 18 elements
cData = [0] * 18
for i in range(0, len(data)):
cData[i] = data[i]
cData = listToCArray(cData, ctypes.c_long)
ecode = staticLib.Synch(ctypes.byref(idNum), demo, mode, msDelay, husDelay, controlCS, csLine, csState, configD, numWriteRead, ctypes.byref(cData))
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value, "data":cData}
def watchdog(self, active, timeout, activeDn, stateDn, idNum=None, demo=0, reset=0):
"""
Name: U12.watchdog(active, timeout, activeDn, stateDn, idNum=None, demo=0, reset=0)
Args: See section 4.35 of the User's Guide
Desc: Controls the LabJack watchdog function.
>>> dev = U12()
>>> dev.watchdog(1, 1, [0, 0, 0], [0, 0, 0])
>>> {'idnum': 1}
"""
#Check id number
if idNum is None:
idNum = self.id
idNum = ctypes.c_long(idNum)
if len(activeDn) is not 3: raise ValueError("activeDn must have 3 elements")
if len(stateDn) is not 3: raise Value("stateDn must have 3 elements")
ecode = staticLib.Watchdog(ctypes.byref(idNum), demo, active, timeout, reset, activeDn[0], activeDn[1], activeDn[2], stateDn[0], stateDn[1], stateDn[2])
if ecode != 0: raise U12Exception(ecode)
return {"idnum":idNum.value}
def readMem(self, address, idnum = None):
"""
Name: U12.readMem(address, idnum=None)
Args: See section 4.36 of the User's Guide
Desc: Reads 4 bytes from a specified address in the LabJack's nonvolatile memory
>>> dev = U12()
>>> dev.readMem(0)
>>> [5, 246, 16, 59]
"""
if address is None:
raise Exception, "Must give an Address."
if idnum is None:
idnum = self.id
ljid = ctypes.c_ulong(idnum)
ad0 = ctypes.c_ulong()
ad1 = ctypes.c_ulong()
ad2 = ctypes.c_ulong()
ad3 = ctypes.c_ulong()
ec = staticLib.ReadMem(ctypes.byref(ljid), ctypes.c_long(address), ctypes.byref(ad3), ctypes.byref(ad2), ctypes.byref(ad1), ctypes.byref(ad0))
if ec != 0: raise U12Exception(ec)
addr = [0] * 4
addr[0] = int(ad3.value & 0xff)
addr[1] = int(ad2.value & 0xff)
addr[2] = int(ad1.value & 0xff)
addr[3] = int(ad0.value & 0xff)
return addr
def writeMem(self, address, data, idnum=None, unlocked=False):
"""
Name: U12.writeMem(self, address, data, idnum=None, unlocked=False)
Args: See section 4.37 of the User's Guide
Desc: Writes 4 bytes to the LabJack's 8,192 byte nonvolatile memory at a specified address.
>>> dev = U12()
>>> dev.writeMem(0, [5, 246, 16, 59])
>>> 1
"""
if address is None or data is None:
raise Exception, "Must give both an Address and data."
if type(data) is not list or len(data) != 4:
raise Exception, "Data must be a list and have a length of 4"
if idnum is None:
idnum = self.id
ljid = ctypes.c_ulong(idnum)
ec = staticLib.WriteMem(ctypes.byref(ljid), int(unlocked), address, data[3] & 0xff, data[2] & 0xff, data[1] & 0xff, data[0] & 0xff)
if ec != 0: raise U12Exception(ec)
return ljid.value
def LJHash(self, hashStr, size):
outBuff = (ctypes.c_char * 16)()
retBuff = ''
staticLib = ctypes.windll.LoadLibrary("ljackuw")
ec = staticLib.LJHash(ctypes.cast(hashStr, ctypes.POINTER(ctypes.c_char)),
size,
ctypes.cast(outBuff, ctypes.POINTER(ctypes.c_char)),
0)
if ec != 0: raise U12Exception(ec)
for i in range(16):
retBuff += outBuff[i]
return retBuff
def isIterable(var):
try:
iter(var)
return True
except:
return False
def listToCArray(list, dataType):
arrayType = dataType * len(list)
array = arrayType()
for i in range(0,len(list)):
array[i] = list[i]
return array
def cArrayToList(array):
list = []
for item in array:
list.append(item)
return list
def getErrorString(errorcode):
"""
Name: U12.getErrorString(errorcode)
Args: See section 4.19 of the User's Guide
Desc: Converts a LabJack errorcode, returned by another function, into a string describing the error. No hardware communication is involved.
>>> dev = U12()
>>> dev.getErrorString(1)
>>> Unkown error
"""
errorString = ctypes.c_char_p(" "*50)
staticLib.GetErrorString(errorcode, errorString)
return errorString.value
def hexWithoutQuotes(l):
""" Return a string listing hex without all the single quotes.
>>> l = range(10)
>>> print hexWithoutQuotes(l)
[0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9]
"""
return str([hex (i) for i in l]).replace("'", "")
| gpl-2.0 |
ShieldKteam/shield_osprey | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
hectord/lettuce | lettuce/django/management/commands/harvest.py | 3 | 9169 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
from optparse import make_option
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.test.utils import setup_test_environment
from django.test.utils import teardown_test_environment
from lettuce import Runner
from lettuce import registry
from lettuce.core import SummaryTotalResults
from lettuce.django import harvest_lettuces, get_server
from lettuce.django.server import LettuceServerException
class Command(BaseCommand):
help = u'Run lettuce tests all along installed apps'
args = '[PATH to feature file or folder]'
requires_model_validation = False
option_list = BaseCommand.option_list[1:] + (
make_option('-v', '--verbosity', action='store', dest='verbosity', default='4',
type='choice', choices=map(str, range(5)),
help='Verbosity level; 0=no output, 1=only dots, 2=only scenario names, 3=colorless output, 4=normal output (colorful)'),
make_option('-a', '--apps', action='store', dest='apps', default='',
help='Run ONLY the django apps that are listed here. Comma separated'),
make_option('-A', '--avoid-apps', action='store', dest='avoid_apps', default='',
help='AVOID running the django apps that are listed here. Comma separated'),
make_option('-S', '--no-server', action='store_true', dest='no_server', default=False,
help="will not run django's builtin HTTP server"),
make_option('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.'),
make_option('-T', '--test-server', action='store_true', dest='test_database',
default=getattr(settings, "LETTUCE_USE_TEST_DATABASE", False),
help="will run django's builtin HTTP server using the test databases"),
make_option('-P', '--port', type='int', dest='port',
help="the port in which the HTTP server will run at"),
make_option('-d', '--debug-mode', action='store_true', dest='debug', default=False,
help="when put together with builtin HTTP server, forces django to run with settings.DEBUG=True"),
make_option('-s', '--scenarios', action='store', dest='scenarios', default=None,
help='Comma separated list of scenarios to run'),
make_option("-t", "--tag",
dest="tags",
type="str",
action='append',
default=None,
help='Tells lettuce to run the specified tags only; '
'can be used multiple times to define more tags'
'(prefixing tags with "-" will exclude them and '
'prefixing with "~" will match approximate words)'),
make_option('--with-xunit', action='store_true', dest='enable_xunit', default=False,
help='Output JUnit XML test results to a file'),
make_option('--smtp-queue', action='store_true', dest='smtp_queue', default=False,
help='Use smtp for mail queue (usefull with --no-server option'),
make_option('--xunit-file', action='store', dest='xunit_file', default=None,
help='Write JUnit XML to this file. Defaults to lettucetests.xml'),
make_option('--with-subunit',
action='store_true',
dest='enable_subunit',
default=False,
help='Output Subunit test results to a file'),
make_option('--subunit-file',
action='store',
dest='subunit_file',
default=None,
help='Write Subunit to this file. Defaults to subunit.bin'),
make_option("--failfast", dest="failfast", default=False,
action="store_true", help='Stop running in the first failure'),
make_option("--pdb", dest="auto_pdb", default=False,
action="store_true", help='Launches an interactive debugger upon error'),
)
def stopserver(self, failed=False):
raise SystemExit(int(failed))
def get_paths(self, args, apps_to_run, apps_to_avoid):
if args:
for path, exists in zip(args, map(os.path.exists, args)):
if not exists:
sys.stderr.write("You passed the path '%s', but it does not exist.\n" % path)
sys.exit(1)
else:
paths = args
else:
paths = harvest_lettuces(apps_to_run, apps_to_avoid) # list of tuples with (path, app_module)
return paths
def handle(self, *args, **options):
setup_test_environment()
verbosity = int(options.get('verbosity', 4))
apps_to_run = tuple(options.get('apps', '').split(","))
apps_to_avoid = tuple(options.get('avoid_apps', '').split(","))
run_server = not options.get('no_server', False)
test_database = options.get('test_database', False)
smtp_queue = options.get('smtp_queue', False)
tags = options.get('tags', None)
failfast = options.get('failfast', False)
auto_pdb = options.get('auto_pdb', False)
threading = options.get('use_threading', True)
with_summary = options.get('summary_display', False)
if test_database:
migrate_south = getattr(settings, "SOUTH_TESTS_MIGRATE", True)
try:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
except:
migrate_south = False
pass
from django.test.utils import get_runner
self._testrunner = get_runner(settings)(interactive=False)
self._testrunner.setup_test_environment()
self._old_db_config = self._testrunner.setup_databases()
call_command('syncdb', verbosity=0, interactive=False,)
if migrate_south:
call_command('migrate', verbosity=0, interactive=False,)
settings.DEBUG = options.get('debug', False)
paths = self.get_paths(args, apps_to_run, apps_to_avoid)
server = get_server(port=options['port'], threading=threading)
if run_server:
try:
server.start()
except LettuceServerException, e:
raise SystemExit(e)
os.environ['SERVER_NAME'] = str(server.address)
os.environ['SERVER_PORT'] = str(server.port)
failed = False
registry.call_hook('before', 'harvest', locals())
results = []
try:
for path in paths:
app_module = None
if isinstance(path, tuple) and len(path) is 2:
path, app_module = path
if app_module is not None:
registry.call_hook('before_each', 'app', app_module)
runner = Runner(path, options.get('scenarios'), verbosity,
enable_xunit=options.get('enable_xunit'),
enable_subunit=options.get('enable_subunit'),
xunit_filename=options.get('xunit_file'),
subunit_filename=options.get('subunit_file'),
tags=tags, failfast=failfast, auto_pdb=auto_pdb,
smtp_queue=smtp_queue)
result = runner.run()
if app_module is not None:
registry.call_hook('after_each', 'app', app_module, result)
results.append(result)
if not result or result.steps != result.steps_passed:
failed = True
except SystemExit, e:
failed = e.code
except Exception, e:
failed = True
import traceback
traceback.print_exc(e)
finally:
summary = SummaryTotalResults(results)
summary.summarize_all()
registry.call_hook('after', 'harvest', summary)
if test_database:
self._testrunner.teardown_databases(self._old_db_config)
teardown_test_environment()
server.stop(failed)
raise SystemExit(int(failed))
| gpl-3.0 |
SaganBolliger/nupic | examples/tp/tp_test.py | 34 | 87763 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file performs a variety of tests on the reference temporal pooler code.
basic_test
==========
Tests creation and serialization of the TP class. Sets parameters and ensures
they are the same after a serialization and de-serialization step. Runs learning
and inference on a small number of random patterns and ensures it doesn't crash.
===============================================================================
Basic First Order Sequences
===============================================================================
These tests ensure the most basic (first order) sequence learning mechanism is
working.
Parameters: Use a "fast learning mode": turn off global decay, temporal pooling
and hilo (make minThreshold really high). initPerm should be greater than
connectedPerm and permanenceDec should be zero. With these settings sequences
should be learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
temporalPooling = False
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
numCols = 100
cellsPerCol = 1
newSynapseCount=11
activationThreshold = 8
permanenceMax = 1
Note: this is not a high order sequence, so one cell per column is fine.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Training: The TP is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next time step up to and including the N-1'st pattern. A perfect
prediction consists of getting every column correct in the prediction, with no
extra columns. We report the number of columns that are incorrect and report a
failure if more than 2 columns are incorrectly predicted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
B1) Basic sequence learner. M=1, N=100, P=1.
B2) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
B3) N=300, M=1, P=1. (See how high we can go with M)
B4) N=100, M=3, P=1 (See how high we can go with N*M)
B5) Like B1) but only have newSynapseCount columns ON in each pattern (instead of
between 21 and 25), and set activationThreshold to newSynapseCount.
B6) Like B1 but with cellsPerCol = 4. First order sequences should still work
just fine.
B7) Like B1 but with slower learning. Set the following parameters differently:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
Now we train the TP with the B1 sequence 4 times (P=4). This will increment
the permanences to be above 0.8 and at that point the inference will be correct.
This test will ensure the basic match function and segment activation rules are
working correctly.
B8) Like B7 but with 4 cells per column. Should still work.
B9) Like B7 but present the sequence less than 4 times: the inference should be
incorrect.
B10) Like B2, except that cells per column = 4. Should still add zero additional
synapses.
===============================================================================
High Order Sequences
===============================================================================
These tests ensure that high order sequences can be learned in a multiple cells
per column instantiation.
Parameters: Same as Basic First Order Tests above, but with varying cells per
column.
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 21 and 25 columns (except for H0). The
sequences are constructed so that consecutive patterns within a sequence don't
share any columns. The sequences are constructed to contain shared subsequences,
such as:
A B C D E F G H I J
K L M D E F N O P Q
The position and length of shared subsequences are parameters in the tests.
Training: Identical to basic first order tests above.
Testing: Identical to basic first order tests above unless noted.
We can also calculate the number of segments and synapses that should be
learned. We raise an error if too many or too few were learned.
H0) Two simple high order sequences, each of length 7, with a shared
subsequence in positions 2-4. Each pattern has a consecutive set of 5 bits on.
No pattern shares any columns with the others. These sequences are easy to
visualize and is very useful for debugging.
H1) Learn two sequences with a short shared pattern. Parameters
should be the same as B1. This test will FAIL since cellsPerCol == 1. No
consecutive patterns share any column.
H2) As above but with cellsPerCol == 4. This test should PASS. No consecutive
patterns share any column.
H2a) Same as above, except P=2. Test that permanences go up and that no
additional synapses or segments are learned.
H3) Same parameters as H.2 except sequences are created such that they share a
single significant sub-sequence. Subsequences should be reasonably long and in
the middle of sequences. No consecutive patterns share any column.
H4) Like H.3, except the shared subsequence is in the beginning. (e.g.
"ABCDEF" and "ABCGHIJ". At the point where the shared subsequence ends, all
possible next patterns should be predicted. As soon as you see the first unique
pattern, the predictions should collapse to be a perfect prediction.
H5) Shared patterns. Similar to H3 except that patterns are shared between
sequences. All sequences are different shufflings of the same set of N
patterns (there is no shared subsequence). Care should be taken such that the
same three patterns never follow one another in two sequences.
H6) Combination of H5) and H3). Shared patterns in different sequences, with a
shared subsequence.
H7) Stress test: every other pattern is shared. [Unimplemented]
H8) Start predicting in the middle of a sequence. [Unimplemented]
H9) Hub capacity. How many patterns can use that hub?
[Implemented, but does not run by default.]
H10) Sensitivity to small amounts of noise during inference. [Unimplemented]
H11) Higher order patterns with alternating elements.
Create the following 4 sequences:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
After training we should verify that the expected transitions are in the
model. Prediction accuracy should be perfect. In addition, during inference,
after the first element is presented, the columns should not burst any more.
Need to verify, for the first sequence, that the high order representation
when presented with the second A and B is different from the representation
in the first presentation.
===============================================================================
Temporal Pooling Tests [UNIMPLEMENTED]
===============================================================================
Parameters: Use a "fast learning mode": With these settings sequences should be
learned in one pass:
minThreshold = newSynapseCount
globalDecay = 0
initialPerm = 0.8
connectedPerm = 0.7
permanenceDec = 0
permanenceInc = 0.4
Other Parameters:
cellsPerCol = 4
newSynapseCount=11
activationThreshold = 11
permanenceMax = 1
doPooling = True
Input Sequence: We train with M input sequences, each consisting of N random
patterns. Each pattern consists of a random number of bits on. The number of 1's
in each pattern should be between 17 and 21 columns. The sequences are
constructed so that consecutive patterns within a sequence don't share any
columns.
Note: for pooling tests the density of input patterns should be pretty low
since each pooling step increases the output density. At the same time, we need
enough bits on in the input for the temporal pooler to find enough synapses. So,
for the tests, constraints should be something like:
(Input Density) * (Number of pooling steps) < 25 %.
AND
sum(Input) > newSynapseCount*1.5
Training: The TP is trained with P passes of the M sequences. There
should be a reset between sequences. The total number of iterations during
training is P*N*M.
Testing: Run inference through the same set of sequences, with a reset before
each sequence. For each sequence the system should accurately predict the
pattern at the next P time steps, up to and including the N-P'th pattern. A
perfect prediction consists of getting every column correct in the prediction,
with no extra columns. We report the number of columns that are incorrect and
report a failure if more than 2 columns are incorrectly predicted.
P1) Train the TP two times (P=2) on a single long sequence consisting of random
patterns (N=20, M=1). There should be no overlapping columns between successive
patterns. During inference, the TP should be able reliably predict the pattern
two time steps in advance. numCols should be about 350 to meet the above
constraints and also to maintain consistency with test P2.
P2) Increase TP rate to 3 time steps in advance (P=3). At each step during
inference, the TP should be able to reliably predict the pattern coming up at
t+1, t+2, and t+3..
P3) Set segUpdateValidDuration to 2 and set P=3. This should behave almost
identically to P1. It should only predict the next time step correctly and not
two time steps in advance. (Check off by one error in this logic.)
P4) As above, but with multiple sequences.
P5) Same as P3 but with shared subsequences.
Continuous mode tests
=====================
Slow changing inputs.
Orphan Decay Tests
==================
HiLo Tests
==========
A high order sequence memory like the TP can memorize very long sequences. In
many applications though you don't want to memorize. You see a long sequence of
patterns but there are actually lower order repeating sequences embedded within
it. A simplistic example is words in a sentence. Words such as You'd like the TP to learn those sequences.
Tests should capture number of synapses learned and compare against
theoretically optimal numbers to pass/fail.
HL0a) For debugging, similar to H0. We want to learn a 3 pattern long sequence presented
with noise before and after, with no resets. Two steps of noise will be presented.
The noise will be 20 patterns, presented in random order. Every pattern has a
consecutive set of 5 bits on, so the vector will be 115 bits long. No pattern
shares any columns with the others. These sequences are easy to visualize and is
very useful for debugging.
TP parameters should be the same as B7 except that permanenceDec should be 0.05:
activationThreshold = newSynapseCount
minThreshold = activationThreshold
initialPerm = 0.2
connectedPerm = 0.7
permanenceInc = 0.2
permanenceDec = 0.05
So, this means it should learn a sequence after 4 repetitions. It will take
4 orphan decay steps to get an incorrect synapse to go away completely.
HL0b) Like HL0a, but after the 3-sequence is learned, try to learn a 4-sequence that
builds on the 3-sequence. For example, if learning A-B-C we train also on
D-A-B-C. It should learn that ABC is separate from DABC. Note: currently this
test is disabled in the code. It is a bit tricky to test this. When you present DAB,
you should predict the same columns as when you present AB (i.e. in both cases
C should be predicted). However, the representation for C in DABC should be
different than the representation for C in ABC. Furthermore, when you present
AB, the representation for C should be an OR of the representation in DABC and ABC
since you could also be starting in the middle of the DABC sequence. All this is
actually happening in the code, but verified by visual inspection only.
HL1) Noise + sequence + noise + sequence repeatedly without resets until it has
learned that sequence. Train the TP repeatedly with N random sequences that all
share a single subsequence. Each random sequence can be 10 patterns long,
sharing a subsequence that is 5 patterns long. There should be no resets
between presentations. Inference should then be on that 5 long shared subsequence.
Example (3-long shared subsequence):
A B C D E F G H I J
K L M D E F N O P Q
R S T D E F U V W X
Y Z 1 D E F 2 3 4 5
TP parameters should be the same as HL0.
HL2) Like HL1, but after A B C has learned, try to learn D A B C . It should learn
ABC is separate from DABC.
HL3) Like HL2, but test with resets.
HL4) Like HL1 but with minThreshold high. This should FAIL and learn a ton
of synapses.
HiLo but with true high order sequences embedded in noise
Present 25 sequences in random order with no resets but noise between
sequences (1-20 samples). Learn all 25 sequences. Test global decay vs non-zero
permanenceDec .
Pooling + HiLo Tests [UNIMPLEMENTED]
====================
Needs to be defined.
Global Decay Tests [UNIMPLEMENTED]
==================
Simple tests to ensure global decay is actually working.
Sequence Likelihood Tests
=========================
These tests are in the file TPLikelihood.py
Segment Learning Tests [UNIMPLEMENTED]
======================
Multi-attribute sequence tests.
SL1) Train the TP repeatedly using a single (multiple) sequence plus noise. The
sequence can be relatively short, say 20 patterns. No two consecutive patterns
in the sequence should share columns. Add random noise each time a pattern is
presented. The noise should be different for each presentation and can be equal
to the number of on bits in the pattern. After N iterations of the noisy
sequences, the TP should should achieve perfect inference on the true sequence.
There should be resets between each presentation of the sequence.
Check predictions in the sequence only. And test with clean sequences.
Vary percentage of bits that are signal vs noise.
Noise can be a fixed alphabet instead of being randomly generated.
HL2) As above, but with no resets.
Shared Column Tests [UNIMPLEMENTED]
===================
Carefully test what happens when consecutive patterns in a sequence share
columns.
Sequence Noise Tests [UNIMPLEMENTED]
====================
Note: I don't think these will work with the current logic. Need to discuss
whether we want to accommodate sequence noise like this.
SN1) Learn sequence with pooling up to T timesteps. Run inference on a sequence
and occasionally drop elements of a sequence. Inference should still work.
SN2) As above, but occasionally add a random pattern into a sequence.
SN3) A combination of the above two.
Capacity Tests [UNIMPLEMENTED]
==============
These are stress tests that verify that the temporal pooler can learn a large
number of sequences and can predict a large number of possible next steps. Some
research needs to be done first to understand the capacity of the system as it
relates to the number of columns, cells per column, etc.
Token Prediction Tests: Test how many predictions of individual tokens we can
superimpose and still recover.
Online Learning Tests [UNIMPLEMENTED]
=====================
These tests will verify that the temporal pooler continues to work even if
sequence statistics (and the actual sequences) change slowly over time. The TP
should adapt to the changes and learn to recognize newer sequences (and forget
the older sequences?).
"""
import random
import numpy
from numpy import *
import sys
import pickle
import cPickle
import pprint
from nupic.research.TP import TP
from nupic.research.TP10X2 import TP10X2
from nupic.research import fdrutilities as fdrutils
#---------------------------------------------------------------------------------
TEST_CPP_TP = 1 # temporarily disabled until it can be updated
VERBOSITY = 0 # how chatty the unit tests should be
SEED = 33 # the random seed used throughout
TPClass = TP
checkSynapseConsistency = False
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
#---------------------------------------------------------------------------------
# Helper routines
#--------------------------------------------------------------------------------
def printOneTrainingVector(x):
print ''.join('1' if k != 0 else '.' for k in x)
def printAllTrainingSequences(trainingSequences, upTo = 99999):
for t in xrange(min(len(trainingSequences[0]), upTo)):
print 't=',t,
for i,trainingSequence in enumerate(trainingSequences):
print "\tseq#",i,'\t',
printOneTrainingVector(trainingSequences[i][t])
def generatePattern(numCols = 100,
minOnes =21,
maxOnes =25,
colSet = [],
prevPattern =numpy.array([])):
"""Generate a single test pattern with given parameters.
Parameters:
--------------------------------------------
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
colSet: The set of column indices for the pattern.
prevPattern: Pattern to avoid (null intersection).
"""
assert minOnes < maxOnes
assert maxOnes < numCols
nOnes = rgen.randint(minOnes, maxOnes)
candidates = list(colSet.difference(set(prevPattern.nonzero()[0])))
rgen.shuffle(candidates)
ind = candidates[:nOnes]
x = numpy.zeros(numCols, dtype='float32')
x[ind] = 1
return x
def buildTrainingSet(numSequences = 2,
sequenceLength = 100,
pctShared = 0.2,
seqGenMode = 'shared sequence',
subsequenceStartPos = 10,
numCols = 100,
minOnes=21,
maxOnes = 25,
disjointConsecutive =True):
"""Build random high order test sequences.
Parameters:
--------------------------------------------
numSequences: The number of sequences created.
sequenceLength: The length of each sequence.
pctShared: The percentage of sequenceLength that is shared across
every sequence. If sequenceLength is 100 and pctShared
is 0.2, then a subsequence consisting of 20 patterns
will be in every sequence. Can also be the keyword
'one pattern', in which case a single time step is shared.
seqGenMode: What kind of sequence to generate. If contains 'shared'
generates shared subsequence. If contains 'no shared',
does not generate any shared subsequence. If contains
'shuffle', will use common patterns shuffle among the
different sequences. If contains 'beginning', will
place shared subsequence at the beginning.
subsequenceStartPos: The position where the shared subsequence starts
numCols: Number of columns in each pattern.
minOnes: The minimum number of 1's in each pattern.
maxOnes: The maximum number of 1's in each pattern.
disjointConsecutive: Whether to generate disjoint consecutive patterns or not.
"""
# Calculate the set of column indexes once to be used in each call to generatePattern()
colSet = set(range(numCols))
if 'beginning' in seqGenMode:
assert 'shared' in seqGenMode and 'no shared' not in seqGenMode
if 'no shared' in seqGenMode or numSequences == 1:
pctShared = 0.0
#--------------------------------------------------------------------------------
# Build shared subsequence
if 'no shared' not in seqGenMode and 'one pattern' not in seqGenMode:
sharedSequenceLength = int(pctShared*sequenceLength)
elif 'one pattern' in seqGenMode:
sharedSequenceLength = 1
else:
sharedSequenceLength = 0
assert sharedSequenceLength + subsequenceStartPos < sequenceLength
sharedSequence = []
for i in xrange(sharedSequenceLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sharedSequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sharedSequence.append(x)
#--------------------------------------------------------------------------------
# Build random training set, splicing in the shared subsequence
trainingSequences = []
if 'beginning' not in seqGenMode:
trailingLength = sequenceLength - sharedSequenceLength - subsequenceStartPos
else:
trailingLength = sequenceLength - sharedSequenceLength
for k,s in enumerate(xrange(numSequences)):
# TODO: implement no repetitions
if len(trainingSequences) > 0 and 'shuffle' in seqGenMode:
r = range(subsequenceStartPos) \
+ range(subsequenceStartPos + sharedSequenceLength, sequenceLength)
rgen.shuffle(r)
r = r[:subsequenceStartPos] \
+ range(subsequenceStartPos, subsequenceStartPos + sharedSequenceLength) \
+ r[subsequenceStartPos:]
sequence = [trainingSequences[k-1][j] for j in r]
else:
sequence = []
if 'beginning' not in seqGenMode:
for i in xrange(subsequenceStartPos):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
if 'shared' in seqGenMode and 'no shared' not in seqGenMode:
sequence.extend(sharedSequence)
for i in xrange(trailingLength):
if disjointConsecutive and i > 0:
x = generatePattern(numCols, minOnes, maxOnes, colSet, sequence[i-1])
else:
x = generatePattern(numCols, minOnes, maxOnes, colSet)
sequence.append(x)
assert len(sequence) == sequenceLength
trainingSequences.append(sequence)
assert len(trainingSequences) == numSequences
if VERBOSITY >= 2:
print "Training Sequences"
pprint.pprint(trainingSequences)
if sharedSequenceLength > 0:
return (trainingSequences, subsequenceStartPos + sharedSequenceLength)
else:
return (trainingSequences, -1)
def getSimplePatterns(numOnes, numPatterns):
"""Very simple patterns. Each pattern has numOnes consecutive
bits on. There are numPatterns*numOnes bits in the vector."""
numCols = numOnes * numPatterns
p = []
for i in xrange(numPatterns):
x = numpy.zeros(numCols, dtype='float32')
x[i*numOnes:(i+1)*numOnes] = 1
p.append(x)
return p
def buildSimpleTrainingSet(numOnes=5):
"""Two very simple high order sequences for debugging. Each pattern in the
sequence has a series of 1's in a specific set of columns."""
numPatterns = 11
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[2], p[3], p[4], p[5], p[6] ]
s2 = [p[7], p[8], p[2], p[3], p[4], p[9], p[10]]
trainingSequences = [s1, s2]
return (trainingSequences, 5)
def buildAlternatingTrainingSet(numOnes=5):
"""High order sequences that alternate elements. Pattern i has one's in
i*numOnes to (i+1)*numOnes.
The sequences are:
A B A B A C
A B A B D E
A B F G H I
A J K L M N
"""
numPatterns = 14
p = getSimplePatterns(numOnes, numPatterns)
s1 = [p[0], p[1], p[0], p[1], p[0], p[2]]
s2 = [p[0], p[1], p[0], p[1], p[3], p[4]]
s3 = [p[0], p[1], p[5], p[6], p[7], p[8]]
s4 = [p[0], p[9], p[10], p[11], p[12], p[13]]
trainingSequences = [s1, s2, s3, s4]
return (trainingSequences, 5)
def buildHL0aTrainingSet(numOnes=5):
"""Simple sequences for HL0. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequence we want to learn is p0->p1->p2
We create a very long sequence consisting of N N p0 p1 p2 N N p0 p1 p2
N is randomly chosen from p3 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(3,23)])
for _ in xrange(20):
s.append(p[rgen.randint(3,23)])
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[rgen.randint(3,23)])
return ([s], [[p[0], p[1], p[2]]])
def buildHL0bTrainingSet(numOnes=5):
"""Simple sequences for HL0b. Each pattern in the sequence has a series of 1's
in a specific set of columns.
There are 23 patterns, p0 to p22.
The sequences we want to learn are p1->p2->p3 and p0->p1->p2->p4.
We create a very long sequence consisting of these two sub-sequences
intermixed with noise, such as:
N N p0 p1 p2 p4 N N p1 p2 p3 N N p1 p2 p3
N is randomly chosen from p5 to p22
"""
numPatterns = 23
p = getSimplePatterns(numOnes, numPatterns)
s = []
s.append(p[rgen.randint(5,numPatterns)])
for _ in xrange(50):
r = rgen.randint(5,numPatterns)
print r,
s.append(p[r])
if rgen.binomial(1, 0.5) > 0:
print "S1",
s.append(p[0])
s.append(p[1])
s.append(p[2])
s.append(p[4])
else:
print "S2",
s.append(p[1])
s.append(p[2])
s.append(p[3])
r = rgen.randint(5,numPatterns)
s.append(p[r])
print r,
print
return ([s], [ [p[0], p[1], p[2], p[4]], [p[1], p[2], p[3]] ])
# Basic test (creation, pickling, basic run of learning and inference)
def basicTest():
global TPClass, SEED, VERBOSITY, checkSynapseConsistency
#--------------------------------------------------------------------------------
# Create TP object
numberOfCols =10
cellsPerColumn =3
initialPerm =.2
connectedPerm =.8
minThreshold =2
newSynapseCount =5
permanenceInc =.1
permanenceDec =.05
permanenceMax =1
globalDecay =.05
activationThreshold =4 # low for those basic tests on purpose
doPooling =True
segUpdateValidDuration =5
seed =SEED
verbosity =VERBOSITY
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=seed, verbosity=verbosity,
pamLength = 1000,
checkSynapseConsistency=checkSynapseConsistency)
print "Creation ok"
#--------------------------------------------------------------------------------
# Save and reload
pickle.dump(tp, open("test_tp.pkl", "wb"))
tp2 = pickle.load(open("test_tp.pkl"))
assert tp2.numberOfCols == numberOfCols
assert tp2.cellsPerColumn == cellsPerColumn
print tp2.initialPerm
assert tp2.initialPerm == numpy.float32(.2)
assert tp2.connectedPerm == numpy.float32(.8)
assert tp2.minThreshold == minThreshold
assert tp2.newSynapseCount == newSynapseCount
assert tp2.permanenceInc == numpy.float32(.1)
assert tp2.permanenceDec == numpy.float32(.05)
assert tp2.permanenceMax == 1
assert tp2.globalDecay == numpy.float32(.05)
assert tp2.activationThreshold == activationThreshold
assert tp2.doPooling == doPooling
assert tp2.segUpdateValidDuration == segUpdateValidDuration
assert tp2.seed == SEED
assert tp2.verbosity == verbosity
print "Save/load ok"
#--------------------------------------------------------------------------------
# Learn
for i in xrange(5):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tp.learn(x)
#--------------------------------------------------------------------------------
# Infer
patterns = rgen.randint(0,2,(4,numberOfCols))
for i in xrange(10):
xi = rgen.randint(0,2,(numberOfCols))
x = numpy.array(xi, dtype="uint32")
y = tp.infer(x)
if i > 0:
p = tp.checkPrediction2([pattern.nonzero()[0] for pattern in patterns])
print "basicTest ok"
#---------------------------------------------------------------------------------
# Figure out acceptable patterns if none were passed to us.
def findAcceptablePatterns(tp, t, whichSequence, trainingSequences, nAcceptable = 1):
"""
Tries to infer the set of acceptable patterns for prediction at the given
time step and for the give sequence. Acceptable patterns are: the current one,
plus a certain number of patterns after timeStep, in the sequence that the TP
is currently tracking. Any other pattern is not acceptable.
TODO:
====
- Doesn't work for noise cases.
- Might run in trouble if shared subsequence at the beginning.
Parameters:
==========
tp the whole TP, so that we can look at its parameters
t the current time step
whichSequence the sequence we are currently tracking
trainingSequences all the training sequences
nAcceptable the number of steps forward from the current timeStep
we are willing to consider acceptable. In the case of
pooling, it is less than or equal to the min of the
number of training reps and the segUpdateValidDuration
parameter of the TP, depending on the test case.
The default value is 1, because by default, the pattern
after the current one should always be predictable.
Return value:
============
acceptablePatterns A list of acceptable patterns for prediction.
"""
# Determine how many steps forward we want to see in the prediction
upTo = t + 2 # always predict current and next
# If the TP is pooling, more steps can be predicted
if tp.doPooling:
upTo += min(tp.segUpdateValidDuration, nAcceptable)
assert upTo <= len(trainingSequences[whichSequence])
acceptablePatterns = []
# Check whether we were in a shared subsequence at the beginning.
# If so, at the point of exiting the shared subsequence (t), we should
# be predicting multiple patterns for 1 time step, then collapse back
# to a single sequence.
if len(trainingSequences) == 2 and \
(trainingSequences[0][0] == trainingSequences[1][0]).all():
if (trainingSequences[0][t] == trainingSequences[1][t]).all() \
and (trainingSequences[0][t+1] != trainingSequences[1][t+1]).any():
acceptablePatterns.append(trainingSequences[0][t+1])
acceptablePatterns.append(trainingSequences[1][t+1])
# Add patterns going forward
acceptablePatterns += [trainingSequences[whichSequence][t] \
for t in xrange(t,upTo)]
return acceptablePatterns
def testSequence(trainingSequences,
nTrainingReps = 1,
numberOfCols = 40,
cellsPerColumn =5,
initialPerm =.8,
connectedPerm =.7,
minThreshold = 11,
newSynapseCount =5,
permanenceInc =.4,
permanenceDec =0.0,
permanenceMax =1,
globalDecay =0.0,
pamLength = 1000,
activationThreshold =5,
acceptablePatterns = [], # if empty, try to infer what they are
doPooling = False,
nAcceptable = -1, # if doPooling, number of acceptable steps
noiseModel = None,
noiseLevel = 0,
doResets = True,
shouldFail = False,
testSequences = None,
predJustAfterHubOnly = None,
compareToPy = False,
nMultiStepPrediction = 0,
highOrder = False):
"""Test a single set of sequences once and return the number of
prediction failures, the number of errors, and the number of perfect
predictions"""
global TP, SEED, checkSynapseConsistency, VERBOSITY
numPerfect = 0 # When every column is correct in the prediction
numStrictErrors = 0 # When at least one column is incorrect
numFailures = 0 # When > 2 columns are incorrect
sequenceLength = len(trainingSequences[0])
segUpdateValidDuration =5
verbosity = VERBOSITY
# override default maxSeqLEngth value for high-order sequences
if highOrder:
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength,
maxSeqLength=0
)
else:
tp = TPClass(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
checkSynapseConsistency=checkSynapseConsistency,
pamLength=pamLength
)
if compareToPy:
# override default maxSeqLEngth value for high-order sequences
if highOrder:
py_tp = TP(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
maxSeqLength=0
)
else:
py_tp = TP(numberOfCols, cellsPerColumn,
initialPerm, connectedPerm,
minThreshold, newSynapseCount,
permanenceInc, permanenceDec, permanenceMax,
globalDecay, activationThreshold,
doPooling, segUpdateValidDuration,
seed=SEED, verbosity=verbosity,
pamLength=pamLength,
)
trainingSequences = trainingSequences[0]
if testSequences == None: testSequences = trainingSequences
inferAcceptablePatterns = acceptablePatterns == []
#--------------------------------------------------------------------------------
# Learn
for r in xrange(nTrainingReps):
if VERBOSITY > 1:
print "============= Learning round",r,"================="
for sequenceNum, trainingSequence in enumerate(trainingSequences):
if VERBOSITY > 1:
print "============= New sequence ================="
if doResets:
tp.reset()
if compareToPy:
py_tp.reset()
for t,x in enumerate(trainingSequence):
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'training' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2:
print "Time step",t, "learning round",r, "sequence number", sequenceNum
print "Input: ",tp.printInput(x)
print "NNZ:", x.nonzero()
x = numpy.array(x).astype('float32')
y = tp.learn(x)
if compareToPy:
py_y = py_tp.learn(x)
if t % 25 == 0: # To track bugs, do that every iteration, but very slow
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
if VERBOSITY > 3:
tp.printStates(printPrevious = (VERBOSITY > 4))
print
if VERBOSITY > 3:
print "Sequence finished. Complete state after sequence"
tp.printCells()
print
numPerfectAtHub = 0
if compareToPy:
print "End of training"
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
#--------------------------------------------------------------------------------
# Infer
if VERBOSITY > 1: print "============= Inference ================="
for s,testSequence in enumerate(testSequences):
if VERBOSITY > 1: print "============= New sequence ================="
if doResets:
tp.reset()
if compareToPy:
py_tp.reset()
slen = len(testSequence)
for t,x in enumerate(testSequence):
# Generate noise (optional)
if noiseModel is not None and \
'xor' in noiseModel and 'binomial' in noiseModel \
and 'inference' in noiseModel:
noise_vector = rgen.binomial(len(x), noiseLevel, (len(x)))
x = logical_xor(x, noise_vector)
if VERBOSITY > 2: print "Time step",t, '\nInput:', tp.printInput(x)
x = numpy.array(x).astype('float32')
y = tp.infer(x)
if compareToPy:
py_y = py_tp.infer(x)
assert fdrutils.tpDiff(tp, py_tp, VERBOSITY) == True
# if t == predJustAfterHubOnly:
# z = sum(y, axis = 1)
# print '\t\t',
# print ''.join('.' if z[i] == 0 else '1' for i in xrange(len(z)))
if VERBOSITY > 3: tp.printStates(printPrevious = (VERBOSITY > 4),
printLearnState = False); print
if nMultiStepPrediction > 0:
y_ms = tp.predict(nSteps=nMultiStepPrediction)
if VERBOSITY > 3:
print "Multi step prediction at Time step", t
for i in range(nMultiStepPrediction):
print "Prediction at t+", i+1
tp.printColConfidence(y_ms[i])
# Error Checking
for i in range(nMultiStepPrediction):
predictedTimeStep = t+i+1
if predictedTimeStep < slen:
input = testSequence[predictedTimeStep].nonzero()[0]
prediction = y_ms[i].nonzero()[0]
foundInInput, totalActiveInInput, \
missingFromInput, totalActiveInPrediction = \
fdrutils.checkMatch(input, prediction, sparse=True)
falseNegatives = totalActiveInInput - foundInInput
falsePositives = missingFromInput
if VERBOSITY > 2:
print "Predition from %d to %d" % (t, t+i+1)
print "\t\tFalse Negatives:", falseNegatives
print "\t\tFalse Positivies:", falsePositives
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false negative with error=",falseNegatives,
print "out of", totalActiveInInput,"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Multi step prediction from t=", t, "to t=", t+i+1,\
"false positive with error=",falsePositives,
print "out of",totalActiveInInput,"ones"
if falsePositives > 3 or falseNegatives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Input at t=', t
print '\t\t',; printOneTrainingVector(testSequence[t])
print 'Prediction for t=', t+i+1
print '\t\t',; printOneTrainingVector(y_ms[i])
print 'Actual input at t=', t+i+1
print '\t\t',; printOneTrainingVector(testSequence[t+i+1])
if t < slen-1:
# If no acceptable patterns were passed to us, we need to infer them
# for the current sequence and time step by looking at the testSequences.
# nAcceptable is used to reduce the number of automatically determined
# acceptable patterns.
if inferAcceptablePatterns:
acceptablePatterns = findAcceptablePatterns(tp, t, s, testSequences,
nAcceptable)
scores = tp.checkPrediction2([pattern.nonzero()[0] \
for pattern in acceptablePatterns])
falsePositives, falseNegatives = scores[0], scores[1]
# We report an error if FN or FP is > 0.
# We report a failure if number of FN or number of FP is > 2 for any
# pattern. We also count the number of perfect predictions.
if falseNegatives > 0 or falsePositives > 0:
numStrictErrors += 1
if falseNegatives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false negative with error=",falseNegatives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falsePositives > 0 and VERBOSITY > 1:
print "Pattern",s,"time",t,\
"prediction false positive with error=",falsePositives,
print "out of",int(testSequence[t+1].sum()),"ones"
if falseNegatives > 3 or falsePositives > 3:
numFailures += 1
# Analyze the failure if we care about it
if VERBOSITY > 1 and not shouldFail:
print 'Test sequences'
if len(testSequences) > 1:
printAllTrainingSequences(testSequences, t+1)
else:
print '\t\t',; printOneTrainingVector(testSequence[t])
print '\t\t',; printOneTrainingVector(testSequence[t+1])
print 'Acceptable'
for p in acceptablePatterns:
print '\t\t',; printOneTrainingVector(p)
print 'Output'
diagnostic = ''
output = sum(tp.currentOutput,axis=1)
print '\t\t',; printOneTrainingVector(output)
else:
numPerfect += 1
if predJustAfterHubOnly is not None and predJustAfterHubOnly == t:
numPerfectAtHub += 1
if predJustAfterHubOnly is None:
return numFailures, numStrictErrors, numPerfect, tp
else:
return numFailures, numStrictErrors, numPerfect, numPerfectAtHub, tp
def TestB1(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B1"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 1 repetition - 1 sequence)"
for k in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB7(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B7"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]:
print "Test "+name+" (sequence memory - 4 repetition - 1 sequence - slow learning)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 4,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
minThreshold = 11,
newSynapseCount = 11,
activationThreshold = 11,
initialPerm = .2,
connectedPerm = .6,
permanenceInc = .2,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
doPooling = False)
if numFailures == 0:
print "Test "+name+" ok"
else:
print "Test "+name+" failed"
nFailed = nFailed + 1
print "numFailures=", numFailures,
print "numStrictErrors=", numStrictErrors,
print "numPerfect=", numPerfect
return nFailed
def TestB2(numUniquePatterns, nTests, cellsPerColumn = 1, name = "B2"):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [1]: # TestC has multiple sequences
print "Test",name,"(sequence memory - second repetition of the same sequence" +\
" should not add synapses)"
print "Num patterns in sequence =", numUniquePatterns,
print "cellsPerColumn=",cellsPerColumn
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
# Do one pass through the training set
numFailures1, numStrictErrors1, numPerfect1, tp1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Do two passes through the training set
numFailures, numStrictErrors, numPerfect, tp2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 8,
newSynapseCount = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
activationThreshold = 8)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tp1.getSegmentInfo()
segmentInfo2 = tp2.getSegmentInfo()
if (segmentInfo1[0] != segmentInfo2[0]) or \
(segmentInfo1[1] != segmentInfo2[1]) :
print "Training twice incorrectly resulted in more segments or synapses"
print "Number of segments: ", segmentInfo1[0], segmentInfo2[0]
numFailures += 1
if numFailures == 0:
print "Test",name,"ok"
else:
print "Test",name,"failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestB3(numUniquePatterns, nTests):
numCols = 100
sequenceLength = numUniquePatterns
nFailed = 0
for numSequences in [2,5]:
print "Test B3 (sequence memory - 2 repetitions -", numSequences, "sequences)"
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = 0.0,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 15, maxOnes = 20)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = 4,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False)
if numFailures == 0:
print "Test B3 ok"
else:
print "Test B3 failed"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH0(numOnes = 5,nMultiStepPrediction=0):
cellsPerColumn = 4
print "Higher order test 0 with cellsPerColumn=",cellsPerColumn
trainingSet = buildSimpleTrainingSet(numOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 20,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = .2,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 5,
activationThreshold = 4,
doPooling = False,
nMultiStepPrediction=nMultiStepPrediction)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared sequence', nTrainingReps = 2,
shouldFail = False, compareToPy = False, highOrder = False):
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .4,
permanenceDec = .1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 11,
activationThreshold = 8,
doPooling = False,
shouldFail = shouldFail,
compareToPy = compareToPy,
highOrder = highOrder)
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestH11(numOnes = 3):
cellsPerColumn = 4
print "Higher order test 11 with cellsPerColumn=",cellsPerColumn
trainingSet = buildAlternatingTrainingSet(numOnes= 3)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = trainingSet[0][0][0].size,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 6,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 1,
activationThreshold = 1,
doPooling = False)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == len(trainingSet[0])*(len(trainingSet[0][0]) - 1):
print "Test PASS"
return 0
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestH2a(sequenceLength, nTests, cellsPerColumn, numCols =100, nSequences =[2],
pctShared = 0.02, seqGenMode = 'shared sequence',
shouldFail = False):
"""
Still need to test:
Two overlapping sequences. OK to get new segments but check that we can
get correct high order prediction after multiple reps.
"""
print "Test H2a - second repetition of the same sequence should not add synapses"
nFailed = 0
subsequenceStartPos = 10
assert subsequenceStartPos < sequenceLength
for numSequences in nSequences:
print "Higher order test with sequenceLength=",sequenceLength,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,"numCols=", numCols
print "numSequences=",numSequences, "pctShared=", pctShared,
print "sharing mode=", seqGenMode
for _ in range(nTests): # Test that configuration several times
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
print "============== 10 ======================"
numFailures3, numStrictErrors3, numPerfect3, tp3 = \
testSequence(trainingSet,
nTrainingReps = 10,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .4,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0.1,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 2 ======================"
numFailures, numStrictErrors, numPerfect, tp2 = \
testSequence(trainingSet,
nTrainingReps = 2,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
print "============== 1 ======================"
numFailures1, numStrictErrors1, numPerfect1, tp1 = \
testSequence(trainingSet,
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 12,
permanenceInc = .1,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 15,
activationThreshold = 12,
doPooling = False,
shouldFail = shouldFail)
# Check that training with a second pass did not result in more synapses
segmentInfo1 = tp1.getSegmentInfo()
segmentInfo2 = tp2.getSegmentInfo()
if (abs(segmentInfo1[0] - segmentInfo2[0]) > 3) or \
(abs(segmentInfo1[1] - segmentInfo2[1]) > 3*15) :
print "Training twice incorrectly resulted in too many segments or synapses"
print segmentInfo1
print segmentInfo2
print tp3.getSegmentInfo()
tp3.trimSegments()
print tp3.getSegmentInfo()
print "Failures for 1, 2, and N reps"
print numFailures1, numStrictErrors1, numPerfect1
print numFailures, numStrictErrors, numPerfect
print numFailures3, numStrictErrors3, numPerfect3
numFailures += 1
if numFailures == 0 and not shouldFail \
or numFailures > 0 and shouldFail:
print "Test PASS",
if shouldFail:
print '(should fail, and failed)'
else:
print
else:
print "Test FAILED"
nFailed = nFailed + 1
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return nFailed
def TestP(sequenceLength, nTests, cellsPerColumn, numCols =300, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 2):
nFailed = 0
newSynapseCount = 7
activationThreshold = newSynapseCount - 2
minOnes = 1.5 * newSynapseCount
maxOnes = .3 * numCols / nTrainingReps
for numSequences in nSequences:
print "Pooling test with sequenceLength=",sequenceLength,
print 'numCols=', numCols,
print "cellsPerColumn=",cellsPerColumn,"nTests=",nTests,
print "numSequences=",numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = True)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def TestHL0a(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0a with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0aTrainingSet()
numCols = trainingSet[0][0].size
numFailures, numStrictErrors, numPerfect, tp = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
pamLength = 2,
doPooling = False,
testSequences = testSet)
tp.trimSegments()
retAfter = tp.getSegmentInfo()
print retAfter[0], retAfter[1]
if retAfter[0] > 20:
print "Too many segments"
numFailures += 1
if retAfter[1] > 100:
print "Too many synapses"
numFailures += 1
if numFailures == 0:
print "Test HL0a ok"
return 0
else:
print "Test HL0a failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL0b(numOnes = 5):
cellsPerColumn = 4
newSynapseCount = 5
activationThreshold = newSynapseCount
print "HiLo test 0b with cellsPerColumn=",cellsPerColumn
trainingSet, testSet = buildHL0bTrainingSet()
numCols = trainingSet[0][0].size
print "numCols=", numCols
numFailures, numStrictErrors, numPerfect, tp = \
testSequence([trainingSet],
nTrainingReps = 1,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
doPooling = False,
testSequences = testSet)
tp.trimSegments()
retAfter = tp.getSegmentInfo()
tp.printCells()
if numFailures == 0:
print "Test HL0 ok"
return 0
else:
print "Test HL0 failed"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
return 1
def TestHL(sequenceLength, nTests, cellsPerColumn, numCols =200, nSequences =[2],
pctShared = 0.1, seqGenMode = 'shared subsequence', nTrainingReps = 3,
noiseModel = 'xor binomial in learning only', noiseLevel = 0.1,
hiloOn = True):
nFailed = 0
newSynapseCount = 8
activationThreshold = newSynapseCount
minOnes = 1.5 * newSynapseCount
maxOnes = 0.3 * numCols / nTrainingReps
if hiloOn == False:
minThreshold = 0.9
for numSequences in nSequences:
print "Hilo test with sequenceLength=", sequenceLength,
print "cellsPerColumn=", cellsPerColumn, "nTests=", nTests,
print "numSequences=", numSequences, "pctShared=", pctShared,
print "nTrainingReps=", nTrainingReps, "minOnes=", minOnes,
print "maxOnes=", maxOnes,
print 'noiseModel=', noiseModel, 'noiseLevel=', noiseLevel
for _ in range(nTests): # Test that configuration several times
minOnes = 1.5 * newSynapseCount
trainingSet = buildTrainingSet(numSequences =numSequences,
sequenceLength = sequenceLength,
pctShared = pctShared, seqGenMode = seqGenMode,
subsequenceStartPos = 10,
numCols = numCols,
minOnes = minOnes, maxOnes = maxOnes)
numFailures, numStrictErrors, numPerfect, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .2,
connectedPerm = .7,
minThreshold = activationThreshold,
newSynapseCount = newSynapseCount,
activationThreshold = activationThreshold,
permanenceInc = .2,
permanenceDec = 0.05,
permanenceMax = 1,
globalDecay = .0,
doPooling = False,
noiseModel = noiseModel,
noiseLevel = noiseLevel)
if numFailures == 0 and \
numStrictErrors == 0 and \
numPerfect == numSequences*(sequenceLength - 1):
print "Test PASS"
else:
print "Test FAILED"
print "numFailures=", numFailures
print "numStrictErrors=", numStrictErrors
print "numPerfect=", numPerfect
nFailed = nFailed + 1
return nFailed
def worker(x):
"""Worker function to use in parallel hub capacity test below."""
cellsPerColumn, numSequences = x[0], x[1]
nTrainingReps = 1
sequenceLength = 10
numCols = 200
print 'Started', cellsPerColumn, numSequences
seqGenMode = 'shared subsequence, one pattern'
subsequenceStartPos = 5
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = .1, seqGenMode = seqGenMode,
subsequenceStartPos = subsequenceStartPos,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures1, numStrictErrors1, numPerfect1, atHub, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False,
predJustAfterHubOnly = 5)
seqGenMode = 'no shared subsequence'
trainingSet = buildTrainingSet(numSequences = numSequences,
sequenceLength = sequenceLength,
pctShared = 0, seqGenMode = seqGenMode,
subsequenceStartPos = 0,
numCols = numCols,
minOnes = 21, maxOnes = 25)
numFailures2, numStrictErrors2, numPerfect2, tp = \
testSequence(trainingSet,
nTrainingReps = nTrainingReps,
numberOfCols = numCols,
cellsPerColumn = cellsPerColumn,
initialPerm = .8,
connectedPerm = .7,
minThreshold = 11,
permanenceInc = .4,
permanenceDec = 0,
permanenceMax = 1,
globalDecay = .0,
newSynapseCount = 8,
activationThreshold = 8,
doPooling = False,
shouldFail = False)
print 'Completed',
print cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
return cellsPerColumn, numSequences, numFailures1, numStrictErrors1, numPerfect1, atHub, \
numFailures2, numStrictErrors2, numPerfect2
def hubCapacity():
"""
Study hub capacity. Figure out how many sequences can share a pattern
for a given number of cells per column till we the system fails.
DON'T RUN IN BUILD SYSTEM!!! (takes too long)
"""
from multiprocessing import Pool
import itertools
print "Hub capacity test"
# scalar value on predictions by looking at max perm over column
p = Pool(2)
results = p.map(worker, itertools.product([1,2,3,4,5,6,7,8], xrange(1,2000,200)))
f = open('results-numPerfect.11.22.10.txt', 'w')
for i,r in enumerate(results):
print >>f, '{%d,%d,%d,%d,%d,%d,%d,%d,%d},' % r
f.close()
def runTests(testLength = "short"):
# Data structure to collect results of tests
# TODO: put numFailures, numStrictErrors and numPerfect in here for reporting
tests = {}
# always run this one: if that one fails, we can't do anything
basicTest()
print
#---------------------------------------------------------------------------------
if testLength == "long":
tests['B1'] = TestB1(numUniquePatterns, nTests)
tests['B2'] = TestB2(numUniquePatterns, nTests)
tests['B8'] = TestB7(4, nTests, cellsPerColumn = 4, name="B8")
tests['B10'] = TestB2(numUniquePatterns, nTests, cellsPerColumn = 4,
name = "B10")
# Run these always
tests['B3'] = TestB3(numUniquePatterns, nTests)
tests['B6'] = TestB1(numUniquePatterns, nTests,
cellsPerColumn = 4, name="B6")
tests['B7'] = TestB7(numUniquePatterns, nTests)
print
#---------------------------------------------------------------------------------
#print "Test H11"
#tests['H11'] = TestH11()
if True:
print "Test H0"
tests['H0'] = TestH0(numOnes = 5)
print "Test H2"
#tests['H2'] = TestH(numUniquePatterns, nTests, cellsPerColumn = 4,
# nTrainingReps = numUniquePatterns, compareToPy = False)
print "Test H3"
tests['H3'] = TestH(numUniquePatterns, nTests,
numCols = 200,
cellsPerColumn = 20,
pctShared = 0.3, nTrainingReps=numUniquePatterns,
compareToPy = False,
highOrder = True)
print "Test H4" # Produces 3 false positives, but otherwise fine.
# TODO: investigate initial false positives?
tests['H4'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 20,
pctShared = 0.1,
seqGenMode='shared subsequence at beginning')
if True:
print "Test H0 with multistep prediction"
tests['H0_MS'] = TestH0(numOnes = 5, nMultiStepPrediction=2)
if True:
print "Test H1" # - Should Fail
tests['H1'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 1, nTrainingReps = 1,
shouldFail = True)
# Also fails in --long mode. See H2 above
#print "Test H2a"
#tests['H2a'] = TestH2a(numUniquePatterns,
# nTests, pctShared = 0.02, numCols = 300, cellsPerColumn = 4)
if False:
print "Test H5" # make sure seqs are good even with shuffling, fast learning
tests['H5'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.0,
seqGenMode='shuffle, no shared subsequence')
print "Test H6" # should work
tests['H6'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
# Try with 2 sequences, then 3 sequences interleaved so that there is
# always a shared pattern, but it belongs to 2 different sequences each
# time!
#print "Test H7"
#tests['H7'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
# tricky: if start predicting in middle of subsequence, several predictions
# are possible
#print "Test H8"
#tests['H8'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print "Test H9" # plot hub capacity
tests['H9'] = TestH(numUniquePatterns, nTests,
cellsPerColumn = 10,
pctShared = 0.4,
seqGenMode='shuffle, shared subsequence')
#print "Test H10" # plot
#tests['H10'] = TestH(numUniquePatterns, nTests,
# cellsPerColumn = 10,
# pctShared = 0.4,
# seqGenMode='shuffle, shared subsequence')
print
#---------------------------------------------------------------------------------
if False:
print "Test P1"
tests['P1'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 3)
if False:
print "Test P2"
tests['P2'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nTrainingReps = 5)
print "Test P3"
tests['P3'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'no shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print "Test P4"
tests['P4'] = TestP(numUniquePatterns, nTests,
cellsPerColumn = 4,
pctShared = 0.0,
seqGenMode = 'shared subsequence',
nSequences = [2] if testLength == 'short' else [2,5],
nTrainingReps = 5)
print
#---------------------------------------------------------------------------------
if True:
print "Test HL0a"
tests['HL0a'] = TestHL0a(numOnes = 5)
if False:
print "Test HL0b"
tests['HL0b'] = TestHL0b(numOnes = 5)
print "Test HL1"
tests['HL1'] = TestHL(sequenceLength = 20,
nTests = nTests,
numCols = 100,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL2"
tests['HL2'] = TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL3"
tests['HL3'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = True)
print "Test HL4"
tests['HL4'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = None,
noiseLevel = 0.0,
doResets = False)
print "Test HL5"
tests['HL5'] = TestHL(numUniquePatterns = 30,
nTests = nTests,
numCols = 200,
nSequences = [2],
pctShared = 0.66,
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = False)
print "Test HL6"
tests['HL6'] = nTests - TestHL(numUniquePatterns = 20,
nTests = nTests,
numCols = 200,
nSequences = [1],
nTrainingReps = 3,
cellsPerColumn = 1,
seqGenMode = 'no shared subsequence',
noiseModel = 'xor binomial in learning only',
noiseLevel = 0.1,
doResets = True,
hiloOn = False)
print
#---------------------------------------------------------------------------------
nFailures = 0
for k,v in tests.iteritems():
nFailures = nFailures + v
if nFailures > 0: # 1 to account for H1
print "There are failed tests"
print "Test\tn failures"
for k,v in tests.iteritems():
print k, "\t", v
assert 0
else:
print "All tests pass"
#---------------------------------------------------------------------------------
# Keep
if False:
import hotshot
import hotshot.stats
prof = hotshot.Profile("profile.prof")
prof.runcall(TestB2, numUniquePatterns=100, nTests=2)
prof.close()
stats = hotshot.stats.load("profile.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(50)
if __name__=="__main__":
if not TEST_CPP_TP:
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
print "!! WARNING: C++ TP testing is DISABLED until it can be updated."
print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
# Three different test lengths are passed in through the command line.
# Developer tests use --short. Autobuild does not pass in anything.
# Acceptance tests pass in --long. testLength reflects these possibilities
# as "autobuild", "short", and "long"
testLength = "autobuild"
# Scan command line arguments to see what to do for the seed
# TODO: make default be a random seed, once we're sure it will pass reliably!
for i,arg in enumerate(sys.argv):
if 'seed' in arg:
try:
# used specified seed
SEED = int(sys.argv[i+1])
except ValueError as e:
# random seed
SEED = numpy.random.randint(100)
if 'verbosity' in arg:
VERBOSITY = int(sys.argv[i+1])
if 'help' in arg:
print "TPTest.py --short|long --seed number|'rand' --verbosity number"
sys.exit()
if "short" in arg:
testLength = "short"
if "long" in arg:
testLength = "long"
rgen = numpy.random.RandomState(SEED) # always call this rgen, NOT random
# Setup the severity and length of the tests
if testLength == "short":
numUniquePatterns = 50
nTests = 1
elif testLength == "autobuild":
print "Running autobuild tests"
numUniquePatterns = 50
nTests = 1
elif testLength == "long":
numUniquePatterns = 100
nTests = 3
print "TP tests", testLength, "numUniquePatterns=", numUniquePatterns, "nTests=", nTests,
print "seed=", SEED
print
if testLength == "long":
print 'Testing Python TP'
TPClass = TP
runTests(testLength)
if testLength != 'long':
checkSynapseConsistency = False
else:
# Setting this to True causes test to take way too long
# Temporarily turned off so we can investigate
checkSynapseConsistency = False
if TEST_CPP_TP:
print 'Testing C++ TP'
TPClass = TP10X2
runTests(testLength)
| agpl-3.0 |
eeriks/velo.lv | velo/payment/forms.py | 1 | 19328 | from django import forms
from django.contrib import messages
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from crispy_forms.layout import Layout, Div, HTML, Field
from crispy_forms.helper import FormHelper
from velo.payment.models import ActivePaymentChannel, Payment, DiscountCode
from velo.payment.utils import create_application_invoice, create_bank_transaction, create_team_invoice, \
approve_payment
from velo.payment.widgets import PaymentTypeWidget, DoNotRenderWidget
from velo.registration.models import Application
from velo.velo.mixins.forms import RequestKwargModelFormMixin, GetClassNameMixin
from velo.velo.utils import load_class
class ApplicationPayUpdateForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
accept_terms = forms.BooleanField(label=_("I confirm, that: the competition organizers are not responsible for possible injuries of participants, during the competition; my health condition corresponds to the selected distance; I will definitely use a fastened helmet and will observe road traffic regulations and competition regulations; I agree with the conditions for participation in the competition, mentioned in the regulations; I am informed, that the paid participation fee will not be returned and the participant’s starting number shall not be transferred to any other person."),
required=True)
accept_inform_participants = forms.BooleanField(label=_("I will inform all registered participants about rules."),
required=True)
accept_insurance = forms.BooleanField(label="", required=False)
discount_code = forms.CharField(label=_("Discount code"), required=False)
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
participants = None
success_url = None
class Meta:
model = Application
fields = ('discount_code', 'company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',
'invoice_show_names', 'donation')
widgets = {
'donation': DoNotRenderWidget, # We will add field manually
}
def _post_clean(self):
super()._post_clean()
if not bool(self.errors):
try:
instance = self.instance
instance.set_final_price() # if donation have changed, then we need to recalculate,
# because instance is not yet saved and it means,
# that this function on model is not yet run.
if instance.final_price == 0:
payment = Payment.objects.create(content_object=instance,
total=instance.final_price,
status=Payment.STATUSES.ok,
competition=instance.competition)
approve_payment(payment, self.request.user, self.request)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
else:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_application_invoice(instance, active_payment_type)
self.success_url = reverse('application_ok', kwargs={'slug': instance.code})
messages.success(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def save(self, commit=True):
instance = super(ApplicationPayUpdateForm, self).save(commit=False)
if self.request:
instance.updated_by = self.request.user
if instance.payment_status < Application.PAY_STATUS.waiting:
instance.payment_status = Application.PAY_STATUS.waiting
instance.params = dict(self.cleaned_data)
instance.params.pop("donation", None)
discount_code = instance.params.pop("discount_code", None)
if discount_code:
instance.params.update({'discount_code': discount_code.code})
if commit:
instance.save()
return instance
def clean_donation(self):
donation = self.cleaned_data.get('donation', 0.00)
# If person have already taken invoice, then we do not allow changing donation amount
if self.instance.invoice:
return float(self.instance.donation)
else:
return donation
def clean_discount_code(self):
code = self.cleaned_data.get('discount_code', "")
if not code:
return None
else:
if isinstance(code, DiscountCode):
return code
try:
return DiscountCode.objects.get(code=code)
except:
return None
def clean(self):
if not self.cleaned_data.get('donation', ''):
self.cleaned_data.update({'donation': 0.00})
super(ApplicationPayUpdateForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if self.data.get("discount_code", None) and active_payment_type.payment_channel.is_bill:
active_payment_type = None
self._errors.update({'payment_type': [_("Invoice is not available with discount code."), ]})
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
self.participants = kwargs.pop('participants', None)
super(ApplicationPayUpdateForm, self).__init__(*args, **kwargs)
insured_participants = self.participants.exclude(insurance=None)
if insured_participants:
self.fields['accept_insurance'].required = True
insurance_company = insured_participants[0].insurance.insurance_company
terms_doc = "<a href='%s' target='_blank'>%s</a>" % (insurance_company.terms_doc.url, _("Regulation")) if insurance_company.terms_doc else ""
self.fields['accept_insurance'].label = mark_safe("%s %s" % (insurance_company.term, terms_doc))
else:
self.fields['accept_insurance'].widget = forms.HiddenInput()
now = timezone.now()
competition = self.instance.competition
checkboxes = (
'accept_terms',
'accept_inform_participants',
'accept_insurance',
)
if competition.processing_class:
_class = load_class(competition.processing_class)
processing = _class(competition=competition)
if hasattr(processing, 'payment_additional_checkboxes'):
for key, field in processing.payment_additional_checkboxes(application=self.instance):
self.fields[key] = field
checkboxes += (key,)
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
if self.instance.final_price == 0:
self.fields['payment_type'].required = False
self.fields['payment_type'].widget = forms.HiddenInput()
else:
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
if self.instance.discount_code:
self.initial['discount_code'] = self.instance.discount_code.code
self.fields['donation'].required = False
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
*checkboxes,
Div(
Div(
Div(
Field(
"discount_code",
css_class="input-field if--50 if--dark js-placeholder-up"
),
),
css_class="input-wrap w100 bottom-margin--15 col-s-24 col-m-12 col-l-12 col-xl-12"
),
css_class="input-wrap w100 bottom-margin--15",
),
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")) if self.instance.final_price > 0 else HTML(""),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
class TeamPayForm(GetClassNameMixin, RequestKwargModelFormMixin, forms.ModelForm):
payment_type = forms.ChoiceField(choices=(), label="", widget=PaymentTypeWidget)
prepend = 'payment_'
success_url = None
class Meta:
model = Application
fields = ('company_name', 'company_vat', 'company_regnr', 'company_address', 'company_juridical_address',)
def _post_clean(self):
super(TeamPayForm, self)._post_clean()
if not bool(self.errors):
try:
instance = self.instance
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
if active_payment_type.payment_channel.is_bill:
create_team_invoice(instance, active_payment_type)
self.success_url = reverse('account:team', kwargs={'pk2': instance.id})
messages.info(self.request,
_('Invoice successfully created and sent to %(email)s') % {'email': instance.email})
else:
self.success_url = create_bank_transaction(instance, active_payment_type, self.request)
except:
# TODO We need to catch exception and log it to sentry
self._errors['payment_type'] = self.error_class([_("Error in connection with bank. Try again later.")])
def clean(self):
super(TeamPayForm, self).clean()
try:
active_payment_type = ActivePaymentChannel.objects.get(id=self.cleaned_data.get('payment_type'))
except:
active_payment_type = None
if active_payment_type and active_payment_type.payment_channel.is_bill: # Hard coded bill ids.
if self.cleaned_data.get('company_name', '') == '':
self._errors.update({'company_name': [_("Company Name required."), ]})
if self.cleaned_data.get('company_regnr', '') == '':
self._errors.update({'company_regnr': [_("Company registration number required."), ]})
if self.cleaned_data.get('company_address', '') == '':
self._errors.update({'company_address': [_("Company Address required."), ]})
if self.cleaned_data.get('company_juridical_address', '') == '':
self._errors.update({'company_juridical_address': [_("Company Juridical Address required."), ]})
return self.cleaned_data
def __init__(self, *args, **kwargs):
super(TeamPayForm, self).__init__(*args, **kwargs)
now = timezone.now()
competition = self.instance.distance.competition
payments = competition.activepaymentchannel_set.filter(from_date__lte=now, till_date__gte=now).select_related(
'payment_channel')
# If user have already requested bill, then we are not showing possibility to request one more.
if self.instance.invoice:
payments = payments.filter(payment_channel__is_bill=False)
self.fields['payment_type'].choices = [(obj.id, obj) for obj in payments]
self.helper = FormHelper()
self.helper.form_tag = False
self.helper.layout = Layout(
Div(
Div(
css_class="w100 bottom-margin--30",
),
Div(
Div(
HTML(_("Payment method")),
css_class="fs14 fw700 uppercase w100 bottom-margin--30"
),
Div(
Div(
Field('payment_type', wrapper_class="row row--gutters-20"),
css_class="w100"
),
css_class="input-wrap w100"
),
css_class="inner no-padding--560"
),
css_class="w100 border-top"
),
Div(
Div(
# company_name
Div(
Div(
Field(
"company_name",
css_class="input-field if--50 if--dark js-placeholder-up",
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_vat
Div(
Div(
Field(
"company_vat",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_regnr
Div(
Div(
Field(
"company_regnr",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_address
Div(
Div(
Field(
"company_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
# company_juridical_address
Div(
Div(
Field(
"company_juridical_address",
css_class="input-field if--50 if--dark js-placeholder-up"
),
css_class="input-wrap w100 bottom-margin--15"
),
css_class="col-xl-8 col-m-12 col-s-24"
),
'invoice_show_names',
css_class=""
),
css_class="invoice_fields"
)
)
| gpl-3.0 |
monkeypants/MAVProxy | MAVProxy/modules/mavproxy_output.py | 10 | 4229 | #!/usr/bin/env python
'''enable run-time addition and removal of UDP clients , just like --out on the cnd line'''
''' TO USE:
output add 10.11.12.13:14550
output list
output remove 3 # to remove 3rd output
'''
from pymavlink import mavutil
from MAVProxy.modules.lib import mp_module
from MAVProxy.modules.lib import mp_util
class OutputModule(mp_module.MPModule):
def __init__(self, mpstate):
super(OutputModule, self).__init__(mpstate, "output", "output control", public=True)
self.add_command('output', self.cmd_output, "output control",
["<list|add|remove|sysid>"])
def cmd_output(self, args):
'''handle output commands'''
if len(args) < 1 or args[0] == "list":
self.cmd_output_list()
elif args[0] == "add":
if len(args) != 2:
print("Usage: output add OUTPUT")
return
self.cmd_output_add(args[1:])
elif args[0] == "remove":
if len(args) != 2:
print("Usage: output remove OUTPUT")
return
self.cmd_output_remove(args[1:])
elif args[0] == "sysid":
if len(args) != 3:
print("Usage: output sysid SYSID OUTPUT")
return
self.cmd_output_sysid(args[1:])
else:
print("usage: output <list|add|remove|sysid>")
def cmd_output_list(self):
'''list outputs'''
print("%u outputs" % len(self.mpstate.mav_outputs))
for i in range(len(self.mpstate.mav_outputs)):
conn = self.mpstate.mav_outputs[i]
print("%u: %s" % (i, conn.address))
if len(self.mpstate.sysid_outputs) > 0:
print("%u sysid outputs" % len(self.mpstate.sysid_outputs))
for sysid in self.mpstate.sysid_outputs:
conn = self.mpstate.sysid_outputs[sysid]
print("%u: %s" % (sysid, conn.address))
def cmd_output_add(self, args):
'''add new output'''
device = args[0]
print("Adding output %s" % device)
try:
conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system)
conn.mav.srcComponent = self.settings.source_component
except Exception:
print("Failed to connect to %s" % device)
return
self.mpstate.mav_outputs.append(conn)
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
def cmd_output_sysid(self, args):
'''add new output for a specific MAVLink sysID'''
sysid = int(args[0])
device = args[1]
print("Adding output %s for sysid %u" % (device, sysid))
try:
conn = mavutil.mavlink_connection(device, input=False, source_system=self.settings.source_system)
conn.mav.srcComponent = self.settings.source_component
except Exception:
print("Failed to connect to %s" % device)
return
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
if sysid in self.mpstate.sysid_outputs:
self.mpstate.sysid_outputs[sysid].close()
self.mpstate.sysid_outputs[sysid] = conn
def cmd_output_remove(self, args):
'''remove an output'''
device = args[0]
for i in range(len(self.mpstate.mav_outputs)):
conn = self.mpstate.mav_outputs[i]
if str(i) == device or conn.address == device:
print("Removing output %s" % conn.address)
try:
mp_util.child_fd_list_add(conn.port.fileno())
except Exception:
pass
conn.close()
self.mpstate.mav_outputs.pop(i)
return
def idle_task(self):
'''called on idle'''
for m in self.mpstate.mav_outputs:
m.source_system = self.settings.source_system
m.mav.srcSystem = m.source_system
m.mav.srcComponent = self.settings.source_component
def init(mpstate):
'''initialise module'''
return OutputModule(mpstate)
| gpl-3.0 |
TeamExodus/external_chromium_org | content/test/gpu/page_sets/gpu_rasterization_tests.py | 34 | 2394 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class GpuRasterizationTestsPage(page_module.Page):
def __init__(self, page_set):
super(GpuRasterizationTestsPage, self).__init__(
url='file://../../data/gpu/pixel_background.html',
page_set=page_set,
name='GpuRasterization.BlueBox')
self.expectations = [
{'comment': 'body-t',
'color': [255, 255, 255],
'tolerance': 0,
'location': [5, 5]},
{'comment': 'body-r',
'color': [255, 255, 255],
'tolerance': 0,
'location': [215, 5]},
{'comment': 'body-b',
'color': [255, 255, 255],
'tolerance': 0,
'location': [215, 215]},
{'comment': 'body-l',
'color': [255, 255, 255],
'tolerance': 0,
'location': [5, 215]},
{'comment': 'background-t',
'color': [0, 0, 0],
'tolerance': 0,
'location': [30, 30]},
{'comment': 'background-r',
'color': [0, 0, 0],
'tolerance': 0,
'location': [170, 30]},
{'comment': 'background-b',
'color': [0, 0, 0],
'tolerance': 0,
'location': [170, 170]},
{'comment': 'background-l',
'color': [0, 0, 0],
'tolerance': 0,
'location': [30, 170]},
{'comment': 'box-t',
'color': [0, 0, 255],
'tolerance': 0,
'location': [70, 70]},
{'comment': 'box-r',
'color': [0, 0, 255],
'tolerance': 0,
'location': [140, 70]},
{'comment': 'box-b',
'color': [0, 0, 255],
'tolerance': 0,
'location': [140, 140]},
{'comment': 'box-l',
'color': [0, 0, 255],
'tolerance': 0,
'location': [70, 140]}
]
self.test_rect = [0, 0, 220, 220]
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'domAutomationController._finished', timeout_in_seconds=30)
class GpuRasterizationTestsPageSet(page_set_module.PageSet):
""" Basic test cases for GPU rasterization. """
def __init__(self):
super(GpuRasterizationTestsPageSet, self).__init__()
self.AddPage(GpuRasterizationTestsPage(self))
| bsd-3-clause |
chainer/chainer | chainermn/iterators/synchronized_iterator.py | 8 | 2898 | import chainer
import numpy
class _SynchronizedIterator(chainer.dataset.iterator.Iterator):
def __init__(self, actual_iterator, communicator):
if not hasattr(actual_iterator, 'order_sampler'):
raise ValueError('actual_iterator must have order_sampler')
else:
super(_SynchronizedIterator, self).__setattr__(
'actual_iterator', actual_iterator)
# Synchronize random seed.
self.communicator = communicator
if self.communicator.rank == 0:
seed = numpy.random.randint(0, 2 ** 32 - 1)
else:
seed = None
seed = self.communicator.bcast_obj(seed, root=0)
# Random number generator for iterator.
rng = numpy.random.RandomState(seed)
self.actual_iterator.order_sampler = \
chainer.iterators.ShuffleOrderSampler(rng)
self.actual_iterator.reset()
def __getattr__(self, attr_name):
return getattr(self.actual_iterator, attr_name)
def __setattr__(self, attr_name, value):
setattr(self.actual_iterator, attr_name, value)
def __next__(self):
return self.actual_iterator.__next__()
def serialize(self, serializer):
self.actual_iterator.serialize(serializer)
def create_synchronized_iterator(actual_iterator, communicator):
"""Create a synchronized iterator from a Chainer iterator.
This iterator shares the same batches on multiple processes,
using the same random number generators to maintain the order of batch
shuffling same.
Here is an example situation.
When we train a sequence-to-sequence model, where the encoder and
the decoder is located on two different processes, we want to share
the same batches on each process, thus inputs for the encoder and
output teacher signals for the decoder become consistent.
In order to use the synchronized iterator, first create the iterator
from Chainer iterator and ChainerMN communicator::
iterator = chainermn.iterators.create_synchronized_iterator(
chainer.iterators.SerialIterator(
dataset, batch_size, shuffle=True),
communicator)
Then you can use it as the ordinary Chainer iterator::
updater = chainer.training.StandardUpdater(iterator, optimizer)
trainer = training.Trainer(updater)
trainer.run()
The resulting iterator shares the same shuffling order among processes
in the specified communicator.
Args:
actual_iterator: Chainer iterator
(e.g., ``chainer.iterators.SerialIterator``).
communicator: ChainerMN communicator.
Returns:
The synchronized iterator based on ``actual_iterator``.
"""
chainer.utils.experimental(
'chainermn.iterators.create_synchronized_iterator')
return _SynchronizedIterator(actual_iterator, communicator)
| mit |
extremetempz/Wingray-Kernel | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
pshowalter/solutions-geoprocessing-toolbox | military_aspects_of_weather/scripts/MultidimensionSupplementalTools/MultidimensionSupplementalTools/Scripts/mds/netcdf/convention/coordinate.py | 2 | 7086 | # -*- coding: utf-8 -*-
import mds.ordered_set
import convention
class Coordinate(convention.Convention):
"""
This class implements the `Coordinate Attribute Convention`_.
.. _Coordinate Attribute Convention: http://www.unidata.ucar.edu/software/netcdf-java/reference/CoordinateAttributes.html
"""
@staticmethod
def conforms(
dataset):
return "_Coordinates" in convention.Convention.convention_names(dataset)
def __init__(self,
dataset,
filter_out_nd_coordinates):
convention.Convention.__init__(self, dataset, filter_out_nd_coordinates)
def coordinate_variable_names(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
result = mds.ordered_set.OrderedSet()
variable = self.dataset.variables[variable_name]
dimension_names = variable.dimensions
for dimension_name in dimension_names:
# TODO Not every dimension has a corresponding variable.
if dimension_name in self.dataset.variables:
if self.is_coordinate_axis_variable(dimension_name):
result.add(dimension_name)
return result
def is_coordinate_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return len(variable.dimensions) == 1 and (variable.dimensions[0] ==
variable_name or hasattr(variable, "_CoordinateAliasForDimension"))
def is_x_dimension_variable(self,
variable):
result = False
if hasattr(variable, "_CoordinateAxisType") and \
variable._CoordinateAxisType in ["GeoX", "Lon"]:
result = True
return result
def is_y_dimension_variable(self,
variable):
result = False
if hasattr(variable, "_CoordinateAxisType") and \
variable._CoordinateAxisType in ["GeoY", "Lat"]:
result = True
return result
def is_time_dimension_variable(self,
variable):
variable = self.dataset.variables[variable] if isinstance(variable,
basestring) else variable
return self.is_time_dimension(variable)
def is_data_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
return not ((
self.filter_out_nd_coordinates and
self.depends_on_nd_coordinate_variable(variable_name)
) or \
self.is_coordinate_variable(variable_name) or \
self.is_coordinate_transform_variable(variable_name) or \
self.is_coordinate_axis_variable(variable_name))
def is_time_dimension(self,
variable):
result = False
if hasattr(variable, "_CoordinateAxisType") and \
variable._CoordinateAxisType in ["RunTime", "Time"]:
result = True
return result
def is_listed_in_a_coordinate_axes_attribute(self,
variable_name):
for variable in self.dataset.variables.itervalues():
if hasattr(variable, "_CoordinateAxes"):
if variable_name in variable._CoordinateAxes.split():
return True
return False
def is_coordinate_axis_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return (
hasattr(variable, "_CoordinateAxisType") or \
hasattr(variable, "_CoordinateAliasForDimension") or \
hasattr(variable, "_CoordinateZisPositive")) or \
self.is_coordinate_variable(variable_name) or \
self.is_listed_in_a_coordinate_axes_attribute(variable_name)
def is_coordinate_system_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return (hasattr(variable, "_CoordinateTransforms") or \
hasattr(variable, "_CoordinateSystemFor")) or \
variable_name in self.variable_attribute_values(
"_CoordinateSystems")
def is_coordinate_transform_variable(self,
variable_name):
assert variable_name in self.dataset.variables, variable_name
variable = self.dataset.variables[variable_name]
return (hasattr(variable, "_CoordinateTransformType") or \
hasattr(variable, "_CoordinateAxisTypes")) or \
variable_name in self.variable_attribute_values(
"_CoordinateTransforms")
def dependent_variable_names(self,
variable_name):
assert variable_name in self.dataset.variables
# TODO Hackish.
# See if a coordinate transform variable is associated with the
# data variable.
variable = self.dataset.variables[variable_name]
dimension_names = variable.dimensions
coordinate_axis_types1 = []
for dimension_name in dimension_names:
if dimension_name in self.dataset.variables:
dimension_variable = self.dataset.variables[dimension_name]
if hasattr(dimension_variable, "_CoordinateAxisType"):
coordinate_axis_types1.append(
dimension_variable._CoordinateAxisType)
result = mds.OrderedSet()
coordinate_axes1 = []
if hasattr(variable, "_CoordinateAxes"):
coordinate_axes1 = variable._CoordinateAxes.split()
for value in coordinate_axes1:
result.add(value)
# See if there is a coordinate transform variable with the same
# coordinate axis types.
coordinate_transform_variable_names = [name for name in
self.dataset.variables.keys() if
self.is_coordinate_transform_variable(name)]
for coordinate_transform_variable_name in \
coordinate_transform_variable_names:
coordinate_transform_variable = self.dataset.variables[
coordinate_transform_variable_name]
if hasattr(coordinate_transform_variable, "_CoordinateAxisTypes"):
coordinate_axis_types2 = \
coordinate_transform_variable._CoordinateAxisTypes.split()
if all([axis_type in coordinate_axis_types1 for axis_type in
coordinate_axis_types2]):
result.add(coordinate_transform_variable_name)
if hasattr(coordinate_transform_variable, "_CoordinateAxes"):
coordinate_axes2 = \
coordinate_transform_variable._CoordinateAxes.split()
if all([axis in coordinate_axes1 for axis in coordinate_axes2]):
result.add(coordinate_transform_variable_name)
# Get rid of the dimension variables.
return result - dimension_names
| apache-2.0 |
resmo/ansible | lib/ansible/modules/network/aci/mso_schema_template_externalepg.py | 5 | 6606 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_externalepg
short_description: Manage external EPGs in schema templates
description:
- Manage external EPGs in schema templates on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
externalepg:
description:
- The name of the external EPG to manage.
type: str
aliases: [ name ]
display_name:
description:
- The name as displayed on the MSO web interface.
type: str
vrf:
description:
- The VRF associated to this ANP.
type: dict
suboptions:
name:
description:
- The name of the VRF to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced VRF.
- If this parameter is unspecified, it defaults to the current template.
type: str
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new external EPG
mso_schema_template_externalepg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
externalepg: External EPG 1
state: present
delegate_to: localhost
- name: Remove an external EPG
mso_schema_template_externalepg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
externalepg: external EPG1
state: absent
delegate_to: localhost
- name: Query a specific external EPGs
mso_schema_template_externalepg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
externalepg: external EPG1
state: query
delegate_to: localhost
register: query_result
- name: Query all external EPGs
mso_schema_template_externalepg:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_reference_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
externalepg=dict(type='str', aliases=['name']), # This parameter is not required for querying all objects
display_name=dict(type='str'),
vrf=dict(type='dict', options=mso_reference_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['externalepg']],
['state', 'present', ['externalepg', 'vrf']],
],
)
schema = module.params['schema']
template = module.params['template']
externalepg = module.params['externalepg']
display_name = module.params['display_name']
vrf = module.params['vrf']
state = module.params['state']
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get external EPGs
externalepgs = [e['name'] for e in schema_obj['templates'][template_idx]['externalEpgs']]
if externalepg is not None and externalepg in externalepgs:
externalepg_idx = externalepgs.index(externalepg)
mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][externalepg_idx]
if state == 'query':
if externalepg is None:
mso.existing = schema_obj['templates'][template_idx]['externalEpgs']
elif not mso.existing:
mso.fail_json(msg="External EPG '{externalepg}' not found".format(externalepg=externalepg))
mso.exit_json()
eepgs_path = '/templates/{0}/externalEpgs'.format(template)
eepg_path = '/templates/{0}/externalEpgs/{1}'.format(template, externalepg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=eepg_path))
elif state == 'present':
vrf_ref = mso.make_reference(vrf, 'vrf', schema_id, template)
if display_name is None and not mso.existing:
display_name = externalepg
payload = dict(
name=externalepg,
displayName=display_name,
vrfRef=vrf_ref,
# FIXME
subnets=[],
contractRelationships=[],
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=eepg_path, value=mso.sent))
else:
ops.append(dict(op='add', path=eepgs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
JamesClough/networkx | networkx/algorithms/bipartite/tests/test_generators.py | 7 | 7518 | #!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.algorithms.bipartite.generators import *
"""Generators - Bipartite
----------------------
"""
class TestGeneratorsBipartite():
def test_complete_bipartite_graph(self):
G=complete_bipartite_graph(0,0)
assert_true(is_isomorphic( G, null_graph() ))
for i in [1, 5]:
G=complete_bipartite_graph(i,0)
assert_true(is_isomorphic( G, empty_graph(i) ))
G=complete_bipartite_graph(0,i)
assert_true(is_isomorphic( G, empty_graph(i) ))
G=complete_bipartite_graph(2,2)
assert_true(is_isomorphic( G, cycle_graph(4) ))
G=complete_bipartite_graph(1,5)
assert_true(is_isomorphic( G, star_graph(5) ))
G=complete_bipartite_graph(5,1)
assert_true(is_isomorphic( G, star_graph(5) ))
# complete_bipartite_graph(m1,m2) is a connected graph with
# m1+m2 nodes and m1*m2 edges
for m1, m2 in [(5, 11), (7, 3)]:
G=complete_bipartite_graph(m1,m2)
assert_equal(number_of_nodes(G), m1 + m2)
assert_equal(number_of_edges(G), m1 * m2)
assert_raises(networkx.exception.NetworkXError,
complete_bipartite_graph, 7, 3, create_using=DiGraph())
mG=complete_bipartite_graph(7, 3, create_using=MultiGraph())
assert_equal(sorted(mG.edges()), sorted(G.edges()))
# specify nodes rather than number of nodes
G = complete_bipartite_graph([1, 2], ['a', 'b'])
has_edges = G.has_edge(1,'a') & G.has_edge(1,'b') &\
G.has_edge(2,'a') & G.has_edge(2,'b')
assert_true(has_edges)
assert_equal(G.size(), 4)
def test_configuration_model(self):
aseq=[3,3,3,3]
bseq=[2,2,2,2,2]
assert_raises(networkx.exception.NetworkXError,
configuration_model, aseq, bseq)
aseq=[3,3,3,3]
bseq=[2,2,2,2,2,2]
G=configuration_model(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,2,2,2]
bseq=[3,3,3,3]
G=configuration_model(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,1,1,1]
bseq=[3,3,3]
G=configuration_model(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[1, 1, 1, 2, 2, 2, 3, 3, 3])
GU=project(Graph(G),range(len(aseq)))
assert_equal(GU.number_of_nodes(), 6)
GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
assert_equal(GD.number_of_nodes(), 3)
assert_raises(networkx.exception.NetworkXError,
configuration_model, aseq, bseq,
create_using=DiGraph())
def test_havel_hakimi_graph(self):
aseq=[3,3,3,3]
bseq=[2,2,2,2,2]
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, aseq, bseq)
bseq=[2,2,2,2,2,2]
G=havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,2,2,2]
bseq=[3,3,3,3]
G=havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
GU=project(Graph(G),range(len(aseq)))
assert_equal(GU.number_of_nodes(), 6)
GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
assert_equal(GD.number_of_nodes(), 4)
assert_raises(networkx.exception.NetworkXError,
havel_hakimi_graph, aseq, bseq,
create_using=DiGraph())
def test_reverse_havel_hakimi_graph(self):
aseq=[3,3,3,3]
bseq=[2,2,2,2,2]
assert_raises(networkx.exception.NetworkXError,
reverse_havel_hakimi_graph, aseq, bseq)
bseq=[2,2,2,2,2,2]
G=reverse_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,2,2,2]
bseq=[3,3,3,3]
G=reverse_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,1,1,1]
bseq=[3,3,3]
G=reverse_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[1, 1, 1, 2, 2, 2, 3, 3, 3])
GU=project(Graph(G),range(len(aseq)))
assert_equal(GU.number_of_nodes(), 6)
GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
assert_equal(GD.number_of_nodes(), 3)
assert_raises(networkx.exception.NetworkXError,
reverse_havel_hakimi_graph, aseq, bseq,
create_using=DiGraph())
def test_alternating_havel_hakimi_graph(self):
aseq=[3,3,3,3]
bseq=[2,2,2,2,2]
assert_raises(networkx.exception.NetworkXError,
alternating_havel_hakimi_graph, aseq, bseq)
bseq=[2,2,2,2,2,2]
G=alternating_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,2,2,2]
bseq=[3,3,3,3]
G=alternating_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[2, 2, 2, 2, 2, 2, 3, 3, 3, 3])
aseq=[2,2,2,1,1,1]
bseq=[3,3,3]
G=alternating_havel_hakimi_graph(aseq,bseq)
assert_equal(sorted(d for n,d in G.degree()),
[1, 1, 1, 2, 2, 2, 3, 3, 3])
GU=project(Graph(G),range(len(aseq)))
assert_equal(GU.number_of_nodes(), 6)
GD=project(Graph(G),range(len(aseq),len(aseq)+len(bseq)))
assert_equal(GD.number_of_nodes(), 3)
assert_raises(networkx.exception.NetworkXError,
alternating_havel_hakimi_graph, aseq, bseq,
create_using=DiGraph())
def test_preferential_attachment(self):
aseq=[3,2,1,1]
G=preferential_attachment_graph(aseq,0.5)
assert_raises(networkx.exception.NetworkXError,
preferential_attachment_graph, aseq, 0.5,
create_using=DiGraph())
def test_random_graph(self):
n=10
m=20
G=random_graph(n,m,0.9)
assert_equal(len(G),30)
assert_true(is_bipartite(G))
X,Y=nx.algorithms.bipartite.sets(G)
assert_equal(set(range(n)),X)
assert_equal(set(range(n,n+m)),Y)
def test_random_graph(self):
n=10
m=20
G=random_graph(n,m,0.9,directed=True)
assert_equal(len(G),30)
assert_true(is_bipartite(G))
X,Y=nx.algorithms.bipartite.sets(G)
assert_equal(set(range(n)),X)
assert_equal(set(range(n,n+m)),Y)
def test_gnmk_random_graph(self):
n = 10
m = 20
edges = 100
G = gnmk_random_graph(n, m, edges)
assert_equal(len(G),30)
assert_true(is_bipartite(G))
X,Y=nx.algorithms.bipartite.sets(G)
print(X)
assert_equal(set(range(n)),X)
assert_equal(set(range(n,n+m)),Y)
assert_equal(edges, len(list(G.edges())))
| bsd-3-clause |
palaniyappanBala/robobrowser | robobrowser/helpers.py | 5 | 2534 | """
Miscellaneous helper functions
"""
import re
from bs4 import BeautifulSoup
from bs4.element import Tag
from robobrowser.compat import string_types, iteritems
def match_text(text, tag):
if isinstance(text, string_types):
return text in tag.text
if isinstance(text, re._pattern_type):
return text.search(tag.text)
def find_all(soup, name=None, attrs=None, recursive=True, text=None,
limit=None, **kwargs):
"""The `find` and `find_all` methods of `BeautifulSoup` don't handle the
`text` parameter combined with other parameters. This is necessary for
e.g. finding links containing a string or pattern. This method first
searches by text content, and then by the standard BeautifulSoup arguments.
"""
if text is None:
return soup.find_all(
name, attrs or {}, recursive, text, limit, **kwargs
)
if isinstance(text, string_types):
text = re.compile(re.escape(text), re.I)
tags = soup.find_all(
name, attrs or {}, recursive, **kwargs
)
rv = []
for tag in tags:
if match_text(text, tag):
rv.append(tag)
if limit is not None and len(rv) >= limit:
break
return rv
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs):
"""Modified find method; see `find_all`, above.
"""
tags = find_all(
soup, name, attrs or {}, recursive, text, 1, **kwargs
)
if tags:
return tags[0]
def ensure_soup(value, parser=None):
"""Coerce a value (or list of values) to Tag (or list of Tag).
:param value: String, BeautifulSoup, Tag, or list of the above
:param str parser: Parser to use; defaults to BeautifulSoup default
:return: Tag or list of Tags
"""
if isinstance(value, BeautifulSoup):
return value.find()
if isinstance(value, Tag):
return value
if isinstance(value, list):
return [
ensure_soup(item, parser=parser)
for item in value
]
parsed = BeautifulSoup(value, features=parser)
return parsed.find()
def lowercase_attr_names(tag):
"""Lower-case all attribute names of the provided BeautifulSoup tag.
Note: this mutates the tag's attribute names and does not return a new
tag.
:param Tag: BeautifulSoup tag
"""
# Use list comprehension instead of dict comprehension for 2.6 support
tag.attrs = dict([
(key.lower(), value)
for key, value in iteritems(tag.attrs)
])
| bsd-3-clause |
sss/calibre-at-bzr | src/calibre/devices/mtp/unix/driver.py | 3 | 15469 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import operator, traceback, pprint, sys, time
from threading import RLock
from collections import namedtuple
from functools import partial
from calibre import prints, as_unicode
from calibre.constants import plugins, islinux
from calibre.ptempfile import SpooledTemporaryFile
from calibre.devices.errors import OpenFailed, DeviceError, BlacklistedDevice
from calibre.devices.mtp.base import MTPDeviceBase, synchronous, debug
MTPDevice = namedtuple('MTPDevice', 'busnum devnum vendor_id product_id '
'bcd serial manufacturer product')
def fingerprint(d):
return MTPDevice(d.busnum, d.devnum, d.vendor_id, d.product_id, d.bcd,
d.serial, d.manufacturer, d.product)
APPLE = 0x05ac
class MTP_DEVICE(MTPDeviceBase):
# libusb(x) does not work on OS X. So no MTP support for OS X
supported_platforms = ['linux']
def __init__(self, *args, **kwargs):
MTPDeviceBase.__init__(self, *args, **kwargs)
self.libmtp = None
self.known_devices = None
self.detect_cache = {}
self.dev = None
self._filesystem_cache = None
self.lock = RLock()
self.blacklisted_devices = set()
self.ejected_devices = set()
self.currently_connected_dev = None
self._is_device_mtp = None
if islinux:
from calibre.devices.mtp.unix.sysfs import MTPDetect
self._is_device_mtp = MTPDetect()
def is_device_mtp(self, d, debug=None):
''' Returns True iff the _is_device_mtp check returns True and libmtp
is able to probe the device successfully. '''
if self._is_device_mtp is None: return False
return (self._is_device_mtp(d, debug=debug) and
self.libmtp.is_mtp_device(d.busnum, d.devnum))
def set_debug_level(self, lvl):
self.libmtp.set_debug_level(lvl)
@synchronous
def detect_managed_devices(self, devices_on_system, force_refresh=False):
if self.libmtp is None: return None
# First remove blacklisted devices.
devs = set()
for d in devices_on_system:
fp = fingerprint(d)
if fp not in self.blacklisted_devices and fp.vendor_id != APPLE:
# Do not try to open Apple devices
devs.add(fp)
# Clean up ejected devices
self.ejected_devices = devs.intersection(self.ejected_devices)
# Check if the currently connected device is still present
if self.currently_connected_dev is not None:
return (self.currently_connected_dev if
self.currently_connected_dev in devs else None)
# Remove ejected devices
devs = devs - self.ejected_devices
# Now check for MTP devices
if force_refresh:
self.detect_cache = {}
cache = self.detect_cache
for d in devs:
ans = cache.get(d, None)
if ans is None:
ans = (
(d.vendor_id, d.product_id) in self.known_devices or
self.is_device_mtp(d))
cache[d] = ans
if ans:
return d
return None
@synchronous
def debug_managed_device_detection(self, devices_on_system, output):
if self.currently_connected_dev is not None:
return True
p = partial(prints, file=output)
if self.libmtp is None:
err = plugins['libmtp'][1]
if not err:
err = 'startup() not called on this device driver'
p(err)
return False
devs = [d for d in devices_on_system if
( (d.vendor_id, d.product_id) in self.known_devices or
self.is_device_mtp(d, debug=p)) and d.vendor_id != APPLE]
if not devs:
p('No MTP devices connected to system')
return False
p('MTP devices connected:')
for d in devs: p(d)
for d in devs:
p('\nTrying to open:', d)
try:
self.open(d, 'debug')
except BlacklistedDevice:
p('This device has been blacklisted by the user')
continue
except:
p('Opening device failed:')
p(traceback.format_exc())
return False
else:
p('Opened', self.current_friendly_name, 'successfully')
p('Storage info:')
p(pprint.pformat(self.dev.storage_info))
self.post_yank_cleanup()
return True
return False
@synchronous
def create_device(self, connected_device):
d = connected_device
return self.libmtp.Device(d.busnum, d.devnum, d.vendor_id,
d.product_id, d.manufacturer, d.product, d.serial)
@synchronous
def eject(self):
if self.currently_connected_dev is None: return
self.ejected_devices.add(self.currently_connected_dev)
self.post_yank_cleanup()
@synchronous
def post_yank_cleanup(self):
self.dev = self._filesystem_cache = self.current_friendly_name = None
self.currently_connected_dev = None
self.current_serial_num = None
@property
def is_mtp_device_connected(self):
return self.currently_connected_dev is not None
@synchronous
def startup(self):
p = plugins['libmtp']
self.libmtp = p[0]
if self.libmtp is None:
print ('Failed to load libmtp, MTP device detection disabled')
print (p[1])
else:
self.known_devices = frozenset(self.libmtp.known_devices())
for x in vars(self.libmtp):
if x.startswith('LIBMTP'):
setattr(self, x, getattr(self.libmtp, x))
@synchronous
def shutdown(self):
self.dev = self._filesystem_cache = None
def format_errorstack(self, errs):
return '\n'.join(['%d:%s'%(code, msg.decode('utf-8', 'replace')) for
code, msg in errs])
@synchronous
def open(self, connected_device, library_uuid):
self.dev = self._filesystem_cache = None
try:
self.dev = self.create_device(connected_device)
except Exception as e:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('Failed to open %s: Error: %s'%(
connected_device, as_unicode(e)))
storage = sorted(self.dev.storage_info, key=operator.itemgetter('id'))
storage = [x for x in storage if x.get('rw', False)]
if not storage:
self.blacklisted_devices.add(connected_device)
raise OpenFailed('No storage found for device %s'%(connected_device,))
snum = self.dev.serial_number
if snum in self.prefs.get('blacklist', []):
self.blacklisted_devices.add(connected_device)
self.dev = None
raise BlacklistedDevice(
'The %s device has been blacklisted by the user'%(connected_device,))
self._main_id = storage[0]['id']
self._carda_id = self._cardb_id = None
if len(storage) > 1:
self._carda_id = storage[1]['id']
if len(storage) > 2:
self._cardb_id = storage[2]['id']
self.current_friendly_name = self.dev.friendly_name
if not self.current_friendly_name:
self.current_friendly_name = self.dev.model_name or _('Unknown MTP device')
self.current_serial_num = snum
self.currently_connected_dev = connected_device
@synchronous
def device_debug_info(self):
ans = self.get_gui_name()
ans += '\nSerial number: %s'%self.current_serial_num
ans += '\nManufacturer: %s'%self.dev.manufacturer_name
ans += '\nModel: %s'%self.dev.model_name
ans += '\nids: %s'%(self.dev.ids,)
ans += '\nDevice version: %s'%self.dev.device_version
ans += '\nStorage:\n'
storage = sorted(self.dev.storage_info, key=operator.itemgetter('id'))
ans += pprint.pformat(storage)
return ans
def _filesystem_callback(self, entry, level):
name = entry.get('name', '')
self.filesystem_callback(_('Found object: %s')%name)
if (level == 0 and
self.is_folder_ignored(self._currently_getting_sid, name)):
return False
return True
@property
def filesystem_cache(self):
if self._filesystem_cache is None:
st = time.time()
debug('Loading filesystem metadata...')
from calibre.devices.mtp.filesystem_cache import FilesystemCache
with self.lock:
storage, all_items, all_errs = [], [], []
for sid, capacity in zip([self._main_id, self._carda_id,
self._cardb_id], self.total_space()):
if sid is None: continue
name = _('Unknown')
for x in self.dev.storage_info:
if x['id'] == sid:
name = x['name']
break
storage.append({'id':sid, 'size':capacity,
'is_folder':True, 'name':name, 'can_delete':False,
'is_system':True})
self._currently_getting_sid = unicode(sid)
items, errs = self.dev.get_filesystem(sid,
self._filesystem_callback)
all_items.extend(items), all_errs.extend(errs)
if not all_items and all_errs:
raise DeviceError(
'Failed to read filesystem from %s with errors: %s'
%(self.current_friendly_name,
self.format_errorstack(all_errs)))
if all_errs:
prints('There were some errors while getting the '
' filesystem from %s: %s'%(
self.current_friendly_name,
self.format_errorstack(all_errs)))
self._filesystem_cache = FilesystemCache(storage, all_items)
debug('Filesystem metadata loaded in %g seconds (%d objects)'%(
time.time()-st, len(self._filesystem_cache)))
return self._filesystem_cache
@synchronous
def get_basic_device_information(self):
d = self.dev
return (self.current_friendly_name, d.device_version, d.device_version, '')
@synchronous
def total_space(self, end_session=True):
ans = [0, 0, 0]
for s in self.dev.storage_info:
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s['id'], None)
if i is not None:
ans[i] = s['capacity']
return tuple(ans)
@synchronous
def free_space(self, end_session=True):
self.dev.update_storage_info()
ans = [0, 0, 0]
for s in self.dev.storage_info:
i = {self._main_id:0, self._carda_id:1,
self._cardb_id:2}.get(s['id'], None)
if i is not None:
ans[i] = s['freespace_bytes']
return tuple(ans)
@synchronous
def create_folder(self, parent, name):
if not parent.is_folder:
raise ValueError('%s is not a folder'%(parent.full_path,))
e = parent.folder_named(name)
if e is not None:
return e
ename = name.encode('utf-8') if isinstance(name, unicode) else name
sid, pid = parent.storage_id, parent.object_id
if pid == sid:
pid = 0
ans, errs = self.dev.create_folder(sid, pid, ename)
if ans is None:
raise DeviceError(
'Failed to create folder named %s in %s with error: %s'%
(name, parent.full_path, self.format_errorstack(errs)))
return parent.add_child(ans)
@synchronous
def put_file(self, parent, name, stream, size, callback=None, replace=True):
e = parent.folder_named(name)
if e is not None:
raise ValueError('Cannot upload file, %s already has a folder named: %s'%(
parent.full_path, e.name))
e = parent.file_named(name)
if e is not None:
if not replace:
raise ValueError('Cannot upload file %s, it already exists'%(
e.full_path,))
self.delete_file_or_folder(e)
ename = name.encode('utf-8') if isinstance(name, unicode) else name
sid, pid = parent.storage_id, parent.object_id
if pid == sid:
pid = 0
ans, errs = self.dev.put_file(sid, pid, ename, stream, size, callback)
if ans is None:
raise DeviceError('Failed to upload file named: %s to %s: %s'
%(name, parent.full_path, self.format_errorstack(errs)))
return parent.add_child(ans)
@synchronous
def get_mtp_file(self, f, stream=None, callback=None):
if f.is_folder:
raise ValueError('%s if a folder'%(f.full_path,))
set_name = stream is None
if stream is None:
stream = SpooledTemporaryFile(5*1024*1024, '_wpd_receive_file.dat')
ok, errs = self.dev.get_file(f.object_id, stream, callback)
if not ok:
raise DeviceError('Failed to get file: %s with errors: %s'%(
f.full_path, self.format_errorstack(errs)))
stream.seek(0)
if set_name:
stream.name = f.name
return stream
@synchronous
def delete_file_or_folder(self, obj):
if obj.deleted:
return
if not obj.can_delete:
raise ValueError('Cannot delete %s as deletion not allowed'%
(obj.full_path,))
if obj.is_system:
raise ValueError('Cannot delete %s as it is a system object'%
(obj.full_path,))
if obj.files or obj.folders:
raise ValueError('Cannot delete %s as it is not empty'%
(obj.full_path,))
parent = obj.parent
ok, errs = self.dev.delete_object(obj.object_id)
if not ok:
raise DeviceError('Failed to delete %s with error: %s'%
(obj.full_path, self.format_errorstack(errs)))
parent.remove_child(obj)
return parent
def develop():
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
dev = MTP_DEVICE(None)
dev.startup()
try:
cd = dev.detect_managed_devices(scanner.devices)
if cd is None: raise RuntimeError('No MTP device found')
dev.open(cd, 'develop')
pprint.pprint(dev.dev.storage_info)
dev.filesystem_cache
finally:
dev.shutdown()
if __name__ == '__main__':
dev = MTP_DEVICE(None)
dev.startup()
from calibre.devices.scanner import DeviceScanner
scanner = DeviceScanner()
scanner.scan()
devs = scanner.devices
dev.debug_managed_device_detection(devs, sys.stdout)
dev.set_debug_level(dev.LIBMTP_DEBUG_ALL)
dev.shutdown()
| gpl-3.0 |
tdyas/pants | contrib/node/src/python/pants/contrib/node/subsystems/resolvers/node_resolver_base.py | 2 | 2786 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import shutil
from abc import ABC, abstractmethod
from pants.base.build_environment import get_buildroot
from pants.util.dirutil import safe_mkdir
class NodeResolverBase(ABC):
file_regex = re.compile("^file:(.*)$")
@abstractmethod
def resolve_target(
self, node_task, target, results_dir, node_paths, resolve_locally=False, **kwargs
):
"""Resolve a NodePackage target."""
@classmethod
def prepare(cls, options, round_manager):
"""Allows a resolver to add additional product requirements to the NodeResolver task."""
pass
@classmethod
def parse_file_path(cls, file_path):
"""Parse a file address path without the file specifier."""
address = None
pattern = cls.file_regex.match(file_path)
if pattern:
address = pattern.group(1)
return address
def _copy_sources(self, target, results_dir):
"""Copy sources from a target to a results directory.
:param NodePackage target: A subclass of NodePackage
:param string results_dir: The results directory
"""
buildroot = get_buildroot()
source_relative_to = target.address.spec_path
for source in target.sources_relative_to_buildroot():
dest = os.path.join(results_dir, os.path.relpath(source, source_relative_to))
safe_mkdir(os.path.dirname(dest))
shutil.copyfile(os.path.join(buildroot, source), dest)
def _get_target_from_package_name(self, target, package_name, file_path):
"""Get a dependent target given the package name and relative file path.
This will only traverse direct dependencies of the passed target. It is not necessary
to traverse further than that because transitive dependencies will be resolved under the
direct dependencies and every direct dependencies is symlinked to the target.
Returns `None` if the target does not exist.
:param NodePackage target: A subclass of NodePackage
:param string package_name: A package.json name that is required to be the same as the target name
:param string file_path: Relative filepath from target to the package in the format 'file:<address_path>'
"""
address_path = self.parse_file_path(file_path)
if not address_path:
return None
dep_spec_path = os.path.normpath(os.path.join(target.address.spec_path, address_path))
for dep in target.dependencies:
if dep.package_name == package_name and dep.address.spec_path == dep_spec_path:
return dep
return None
| apache-2.0 |
liangjg/openmc | examples/lattice/nested/build_xml.py | 8 | 5536 | import openmc
###############################################################################
# Simulation Input File Parameters
###############################################################################
# OpenMC simulation parameters
batches = 20
inactive = 10
particles = 10000
###############################################################################
# Exporting to OpenMC materials.xml file
###############################################################################
# Instantiate some Materials and register the appropriate Nuclides
fuel = openmc.Material(material_id=1, name='fuel')
fuel.set_density('g/cc', 4.5)
fuel.add_nuclide('U235', 1.)
moderator = openmc.Material(material_id=2, name='moderator')
moderator.set_density('g/cc', 1.0)
moderator.add_element('H', 2.)
moderator.add_element('O', 1.)
moderator.add_s_alpha_beta('c_H_in_H2O')
# Instantiate a Materials collection and export to XML
materials_file = openmc.Materials((moderator, fuel))
materials_file.export_to_xml()
###############################################################################
# Exporting to OpenMC geometry.xml file
###############################################################################
# Instantiate Surfaces
left = openmc.XPlane(surface_id=1, x0=-2, name='left')
right = openmc.XPlane(surface_id=2, x0=2, name='right')
bottom = openmc.YPlane(surface_id=3, y0=-2, name='bottom')
top = openmc.YPlane(surface_id=4, y0=2, name='top')
fuel1 = openmc.ZCylinder(surface_id=5, x0=0, y0=0, r=0.4)
fuel2 = openmc.ZCylinder(surface_id=6, x0=0, y0=0, r=0.3)
fuel3 = openmc.ZCylinder(surface_id=7, x0=0, y0=0, r=0.2)
left.boundary_type = 'vacuum'
right.boundary_type = 'vacuum'
top.boundary_type = 'vacuum'
bottom.boundary_type = 'vacuum'
# Instantiate Cells
cell1 = openmc.Cell(cell_id=1, name='Cell 1')
cell2 = openmc.Cell(cell_id=2, name='Cell 2')
cell3 = openmc.Cell(cell_id=101, name='cell 3')
cell4 = openmc.Cell(cell_id=102, name='cell 4')
cell5 = openmc.Cell(cell_id=201, name='cell 5')
cell6 = openmc.Cell(cell_id=202, name='cell 6')
cell7 = openmc.Cell(cell_id=301, name='cell 7')
cell8 = openmc.Cell(cell_id=302, name='cell 8')
# Use surface half-space to define regions
cell1.region = +left & -right & +bottom & -top
cell2.region = +left & -right & +bottom & -top
cell3.region = -fuel1
cell4.region = +fuel1
cell5.region = -fuel2
cell6.region = +fuel2
cell7.region = -fuel3
cell8.region = +fuel3
# Register Materials with Cells
cell3.fill = fuel
cell4.fill = moderator
cell5.fill = fuel
cell6.fill = moderator
cell7.fill = fuel
cell8.fill = moderator
# Instantiate Universe
univ1 = openmc.Universe(universe_id=1)
univ2 = openmc.Universe(universe_id=2)
univ3 = openmc.Universe(universe_id=3)
univ4 = openmc.Universe(universe_id=5)
root = openmc.Universe(universe_id=0, name='root universe')
# Register Cells with Universe
univ1.add_cells([cell3, cell4])
univ2.add_cells([cell5, cell6])
univ3.add_cells([cell7, cell8])
root.add_cell(cell1)
univ4.add_cell(cell2)
# Instantiate nested Lattices
lattice1 = openmc.RectLattice(lattice_id=4, name='4x4 assembly')
lattice1.lower_left = [-1., -1.]
lattice1.pitch = [1., 1.]
lattice1.universes = [[univ1, univ2],
[univ2, univ3]]
lattice2 = openmc.RectLattice(lattice_id=6, name='4x4 core')
lattice2.lower_left = [-2., -2.]
lattice2.pitch = [2., 2.]
lattice2.universes = [[univ4, univ4],
[univ4, univ4]]
# Fill Cell with the Lattice
cell1.fill = lattice2
cell2.fill = lattice1
# Instantiate a Geometry, register the root Universe, and export to XML
geometry = openmc.Geometry(root)
geometry.export_to_xml()
###############################################################################
# Exporting to OpenMC settings.xml file
###############################################################################
# Instantiate a Settings object, set all runtime parameters, and export to XML
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-1, -1, -1, 1, 1, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
settings_file.export_to_xml()
###############################################################################
# Exporting to OpenMC plots.xml file
###############################################################################
plot = openmc.Plot(plot_id=1)
plot.origin = [0, 0, 0]
plot.width = [4, 4]
plot.pixels = [400, 400]
plot.color_by = 'material'
# Instantiate a Plots object and export to XML
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
###############################################################################
# Exporting to OpenMC tallies.xml file
###############################################################################
# Instantiate a tally mesh
mesh = openmc.RegularMesh(mesh_id=1)
mesh.dimension = [4, 4]
mesh.lower_left = [-2, -2]
mesh.width = [1, 1]
# Instantiate tally Filter
mesh_filter = openmc.MeshFilter(mesh)
# Instantiate the Tally
tally = openmc.Tally(tally_id=1)
tally.filters = [mesh_filter]
tally.scores = ['total']
# Instantiate a Tallies collection, register Tally/RegularMesh, and export to
# XML
tallies_file = openmc.Tallies([tally])
tallies_file.export_to_xml()
| mit |
zycdragonball/tensorflow | tensorflow/python/framework/versions.py | 127 | 1607 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
VERSION = __version__
GIT_VERSION = __git_version__
COMPILER_VERSION = __compiler_version__
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"COMPILER_VERSION",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
]
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/platform/gsutil/gslib/resumable_streaming_upload.py | 36 | 8165 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for streaming resumable uploads."""
import collections
import os
from gslib.exception import CommandException
from gslib.util import GetJsonResumableChunkSize
class ResumableStreamingJsonUploadWrapper(object):
"""Wraps an input stream in a buffer for resumable uploads.
This class takes a non-seekable input stream, buffers it, and exposes it
as a stream with limited seek capabilities such that it can be used in a
resumable JSON API upload.
max_buffer_size bytes of buffering is supported.
"""
def __init__(self, stream, max_buffer_size, test_small_buffer=False):
"""Initializes the wrapper.
Args:
stream: Input stream.
max_buffer_size: Maximum size of internal buffer; should be >= the chunk
size of the resumable upload API to ensure that at least one full
chunk write can be replayed in the event of a server error.
test_small_buffer: Skip check for buffer size vs. chunk size, for testing.
"""
self._orig_fp = stream
if not test_small_buffer and max_buffer_size < GetJsonResumableChunkSize():
raise CommandException('Resumable streaming upload created with buffer '
'size %s, JSON resumable upload chunk size %s. '
'Buffer size must be >= JSON resumable upload '
'chunk size to ensure that uploads can be '
'resumed.' % (max_buffer_size,
GetJsonResumableChunkSize()))
self._max_buffer_size = max_buffer_size
self._buffer = collections.deque()
self._buffer_start = 0
self._buffer_end = 0
self._position = 0
def read(self, size=-1): # pylint: disable=invalid-name
""""Reads from the wrapped stream.
Args:
size: The amount of bytes to read. If omitted or negative, the entire
contents of the stream will be read and returned.
Returns:
Bytes from the wrapped stream.
"""
read_all_bytes = size is None or size < 0
if read_all_bytes:
bytes_remaining = self._max_buffer_size
else:
bytes_remaining = size
data = b''
buffered_data = []
if self._position < self._buffer_end:
# There was a backwards seek, so read from the buffer first.
# TODO: Performance test to validate if it is worth re-aligning
# the buffers in this case. Also, seeking through the buffer for
# each read on a long catch-up is probably not performant, but we'd
# need a more complex data structure than a deque to get around this.
pos_in_buffer = self._buffer_start
buffer_index = 0
# First, find the start position in the buffer.
while pos_in_buffer + len(self._buffer[buffer_index]) < self._position:
# When this loop exits, buffer_index will refer to a buffer that
# has at least some overlap with self._position, and
# pos_in_buffer will be >= self._position
pos_in_buffer += len(self._buffer[buffer_index])
buffer_index += 1
# Read until we've read enough or we're out of buffer.
while pos_in_buffer < self._buffer_end and bytes_remaining > 0:
buffer_len = len(self._buffer[buffer_index])
# This describes how far into the current buffer self._position is.
offset_from_position = self._position - pos_in_buffer
bytes_available_this_buffer = buffer_len - offset_from_position
read_size = min(bytes_available_this_buffer, bytes_remaining)
buffered_data.append(
self._buffer[buffer_index]
[offset_from_position:offset_from_position + read_size])
bytes_remaining -= read_size
pos_in_buffer += buffer_len
buffer_index += 1
self._position += read_size
# At this point we're guaranteed that if there are any bytes left to read,
# then self._position == self._buffer_end, and we can read from the
# wrapped stream if needed.
if read_all_bytes:
# TODO: The user is requesting reading until the end of an
# arbitrary length stream, which is bad we'll need to return data
# with no size limits; if the stream is sufficiently long, we could run
# out of memory. We could break this down into smaller reads and
# buffer it as we go, but we're still left returning the data all at
# once to the caller. We could raise, but for now trust the caller to
# be sane and have enough memory to hold the remaining stream contents.
new_data = self._orig_fp.read(size)
data_len = len(new_data)
if not buffered_data:
data = new_data
else:
buffered_data.append(new_data)
data = b''.join(buffered_data)
self._position += data_len
elif bytes_remaining:
new_data = self._orig_fp.read(bytes_remaining)
if not buffered_data:
data = new_data
else:
buffered_data.append(new_data)
data = b''.join(buffered_data)
data_len = len(new_data)
if data_len:
self._position += data_len
self._buffer.append(new_data)
self._buffer_end += data_len
oldest_data = None
while self._buffer_end - self._buffer_start > self._max_buffer_size:
oldest_data = self._buffer.popleft()
self._buffer_start += len(oldest_data)
if oldest_data:
refill_amount = self._max_buffer_size - (self._buffer_end -
self._buffer_start)
if refill_amount:
self._buffer.appendleft(oldest_data[-refill_amount:])
self._buffer_start -= refill_amount
else:
data = b''.join(buffered_data) if buffered_data else b''
return data
def tell(self): # pylint: disable=invalid-name
"""Returns the current stream position."""
return self._position
def seekable(self): # pylint: disable=invalid-name
"""Returns true since limited seek support exists."""
return True
def seek(self, offset, whence=os.SEEK_SET): # pylint: disable=invalid-name
"""Seeks on the buffered stream.
Args:
offset: The offset to seek to; must be within the buffer bounds.
whence: Must be os.SEEK_SET.
Raises:
CommandException if an unsupported seek mode or position is used.
"""
if whence == os.SEEK_SET:
if offset < self._buffer_start or offset > self._buffer_end:
raise CommandException('Unable to resume upload because of limited '
'buffering available for streaming uploads. '
'Offset %s was requested, but only data from '
'%s to %s is buffered.' %
(offset, self._buffer_start, self._buffer_end))
# Move to a position within the buffer.
self._position = offset
elif whence == os.SEEK_END:
if offset > self._max_buffer_size:
raise CommandException('Invalid SEEK_END offset %s on streaming '
'upload. Only %s can be buffered.' %
(offset, self._max_buffer_size))
# Read to the end and rely on buffering to handle the offset.
while self.read(self._max_buffer_size):
pass
# Now we're at the end.
self._position -= offset
else:
raise CommandException('Invalid seek mode on streaming upload. '
'(mode %s, offset %s)' % (whence, offset))
def close(self): # pylint: disable=invalid-name
return self._orig_fp.close()
| mit |
jbedorf/tensorflow | tensorflow/python/distribute/multi_worker_util_test.py | 10 | 6775 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi_worker_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.protobuf import cluster_pb2
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import test
from tensorflow.python.training import server_lib
class NormalizeClusterSpecTest(test.TestCase):
def assert_same_cluster(self, lhs, rhs):
self.assertEqual(
server_lib.ClusterSpec(lhs).as_dict(),
server_lib.ClusterSpec(rhs).as_dict())
def testDictAsInput(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testClusterDefAsInput(self):
cluster_def = cluster_pb2.ClusterDef()
job = cluster_def.job.add()
job.name = "chief"
job.tasks[0] = "127.0.0.1:1234"
job = cluster_def.job.add()
job.name = "worker"
job.tasks[0] = "127.0.0.1:8964"
job.tasks[1] = "127.0.0.1:2333"
job = cluster_def.job.add()
job.name = "ps"
job.tasks[0] = "127.0.0.1:1926"
job.tasks[1] = "127.0.0.1:3141"
self.assert_same_cluster(
cluster_def, multi_worker_util.normalize_cluster_spec(cluster_def))
def testClusterSpecAsInput(self):
cluster_spec = server_lib.ClusterSpec({
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
})
self.assert_same_cluster(
cluster_spec, multi_worker_util.normalize_cluster_spec(cluster_spec))
def testUnexpectedInput(self):
cluster_spec = ["127.0.0.1:8964", "127.0.0.1:2333"]
with self.assertRaisesRegexp(
ValueError,
"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a "
"`tf.train.ClusterDef` object"):
multi_worker_util.normalize_cluster_spec(cluster_spec)
class IsChiefTest(test.TestCase):
def testClusterWithChief(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "chief", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 0))
def testClusterWithoutChief(self):
cluster_spec = {"worker": ["127.0.0.1:8964", "127.0.0.1:2333"]}
self.assertTrue(multi_worker_util.is_chief(cluster_spec, "worker", 0))
self.assertFalse(multi_worker_util.is_chief(cluster_spec, "worker", 1))
with self.assertRaisesRegexp(
ValueError, "`task_type` 'chief' not found in cluster_spec."):
multi_worker_util.is_chief(cluster_spec, "chief", 0)
with self.assertRaisesRegexp(
ValueError, "The `task_id` 2 exceeds the maximum id of worker."):
multi_worker_util.is_chief(cluster_spec, "worker", 2)
class NumWorkersTest(test.TestCase):
def testCountWorker(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="chief"), 3)
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="worker"), 3)
def testCountEvaluator(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.worker_count(cluster_spec, task_type="evaluator"), 1)
def testTaskTypeNotFound(self):
cluster_spec = {}
with self.assertRaisesRegexp(
ValueError, "`task_type` 'worker' not found in cluster_spec."):
multi_worker_util.worker_count(cluster_spec, task_type="worker")
def testCountPs(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
# A "ps" job shouldn't call this method.
with self.assertRaisesRegexp(ValueError, "Unexpected `task_type` 'ps'"):
multi_worker_util.worker_count(cluster_spec, task_type="ps")
class IdInClusterTest(test.TestCase):
def testChiefId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0), 0)
def testWorkerId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 2)
cluster_spec = {
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"ps": ["127.0.0.1:1926", "127.0.0.1:3141"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "worker", 1), 1)
def testEvaluatorId(self):
cluster_spec = {
"chief": ["127.0.0.1:1234"],
"worker": ["127.0.0.1:8964", "127.0.0.1:2333"],
"evaluator": ["127.0.0.1:7566"]
}
self.assertEqual(
multi_worker_util.id_in_cluster(cluster_spec, "evaluator", 0), 0)
def testPsId(self):
cluster_spec = {"chief": ["127.0.0.1:1234"], "ps": ["127.0.0.1:7566"]}
with self.assertRaisesRegexp(ValueError,
"There is no id for task_type 'ps'"):
multi_worker_util.id_in_cluster(cluster_spec, "ps", 0)
def testMultipleChiefs(self):
cluster_spec = {
"chief": ["127.0.0.1:8258", "127.0.0.1:7566"],
}
with self.assertRaisesRegexp(ValueError,
"There must be at most one 'chief' job."):
multi_worker_util.id_in_cluster(cluster_spec, "chief", 0)
if __name__ == "__main__":
test.main()
| apache-2.0 |
brianjgeiger/osf.io | osf/migrations/0077_add_noderequest_model.py | 18 | 4686 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-16 20:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import osf.models.base
import osf.utils.fields
class Migration(migrations.Migration):
dependencies = [
('osf', '0076_action_rename'),
]
operations = [
migrations.CreateModel(
name='NodeRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('machine_state', models.CharField(choices=[('accepted', 'Accepted'), ('initial', 'Initial'), ('pending', 'Pending'), ('rejected', 'Rejected')], db_index=True, default='initial', max_length=15)),
('date_last_transitioned', models.DateTimeField(blank=True, db_index=True, null=True)),
('request_type', models.CharField(choices=[('access', 'Access')], max_length=31)),
('comment', models.TextField(blank=True, null=True)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='submitted_requests', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='NodeRequestAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_id', models.CharField(db_index=True, default=osf.models.base.generate_object_id, max_length=24, unique=True)),
('trigger', models.CharField(choices=[('accept', 'Accept'), ('edit_comment', 'Edit_Comment'), ('reject', 'Reject'), ('submit', 'Submit')], max_length=31)),
('from_state', models.CharField(choices=[('accepted', 'Accepted'), ('initial', 'Initial'), ('pending', 'Pending'), ('rejected', 'Rejected')], max_length=31)),
('to_state', models.CharField(choices=[('accepted', 'Accepted'), ('initial', 'Initial'), ('pending', 'Pending'), ('rejected', 'Rejected')], max_length=31)),
('comment', models.TextField(blank=True)),
('is_deleted', models.BooleanField(default=False)),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
('target', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='actions', to='osf.NodeRequest')),
],
options={
'abstract': False,
},
),
# We add the access_requests_enabled field in two steps
# 1. Add the field
# 2. Adding a default value of True
# This prevents an expensive table rewrite from locking the node table.
migrations.AddField(
model_name='abstractnode',
name='access_requests_enabled',
field=models.NullBooleanField(db_index=True),
),
# Adding a default does not require a table rewrite
migrations.RunSQL(
[
'ALTER TABLE "osf_abstractnode" ALTER COLUMN "access_requests_enabled" SET DEFAULT TRUE',
'ALTER TABLE "osf_abstractnode" ALTER COLUMN "access_requests_enabled" DROP DEFAULT;',
],
state_operations=[
migrations.AlterField(
model_name='abstractnode',
name='access_requests_enabled',
field=models.NullBooleanField(default=True, db_index=True),
)
],
),
migrations.AddField(
model_name='noderequest',
name='target',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='requests', to='osf.AbstractNode'),
),
migrations.AlterUniqueTogether(
name='noderequest',
unique_together=set([('target', 'creator')]),
),
]
| apache-2.0 |
saffsd/assignmentprint | assignmentprint.py | 1 | 15044 | """
Utility funtions and classes for preparing project marking bundles
for student assignments.
Marco Lui <saffsd@gmail.com>, November 2012
"""
import os, sys, csv, re
import tokenize, textwrap, token
import trace, threading
import imp
import contextlib
from cStringIO import StringIO
from pprint import pformat
import pep8
from collections import Sequence, Mapping, Sized
RE_FILENAME = re.compile(r'proj2-(?P<filename>\w+).py')
RE_DIRNAME = re.compile(r'proj2-(?P<dirname>\w+)')
def as_module(path, name='submitted'):
module = imp.new_module(name)
with open(path) as f:
try:
# suppress stdout
sys.stdout = mystdout = StringIO()
exec f in module.__dict__
except Exception, e:
raise ImportError, "import failed: '{0}'".format(e)
finally:
sys.stdout = sys.__stdout__
return module, mystdout.getvalue()
def item2strs(item, max_lines=None):
output = pformat(item)
if max_lines is None or len(output.splitlines()) <= max_lines:
retval = output.splitlines()
else:
if isinstance(item, Mapping):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item.items()[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
elif isinstance(item, Sequence):
itemlen = len(item)
retval = ["<{0} of len {1}>".format(type(item),itemlen)]
for i in item[:max_lines-2]:
retval.append(str(i))
retval.append(['... ({0} more items)'.format(itemlen-max_lines+2)])
else:
retval = ["<item with repr len {0}>".format(len(repr(item)))]
# Add the item type to the start
retval[0] = "({0}) {1}".format(type(item), retval[0])
return retval
def split_comments(line):
code = []
noncode = []
try:
for tk in tokenize.generate_tokens(StringIO(line).readline):
if tk[2][0] != 1:
break
if tk[0] == tokenize.COMMENT:
noncode.append(tk[:2])
else:
code.append(tk)
except tokenize.TokenError:
pass
retval = tokenize.untokenize(code).strip(), tokenize.untokenize(noncode).strip()
#retval = ''.join(c[1] for c in code), ''.join(c[1] for c in noncode)
return retval
def get_indent(code):
tokens = tokenize.generate_tokens(StringIO(code).readline)
tk = tokens.next()
indent = tk[1] if tk[0] == token.INDENT else ''
return indent
def wrap_comment(line, width, add_indent=2):
"""
This assumes that line contains a (potentially whitespace-indented)
comment, and no actual code. It will assume anything before the
comment marker is padding, and will maintain the indent level
thereof.
"""
code, comm = split_comments(line)
indent = get_indent(line)
if len(indent) > width/2:
# Comment starts really far right, we shift it
# to start quarter way through the width
indent = ' ' * width/4
retval = textwrap.wrap(comm, width,
initial_indent= indent,
subsequent_indent= indent + '#' + ' '*add_indent,
)
return retval
def wrap_code(code, width, add_indent =' '):
"""
Attempts to wrap a single line of code, respecting token
boundaries.
"""
tokens = tokenize.generate_tokens(StringIO(code).readline)
indent = get_indent(code)
chunk_width = width - len(indent)
chunk_start = 0
chunk_end = 0
chunks = []
first_chunk = True
try:
for tk_type, tk_text, tk_start, tk_end, _ in tokens:
if tk_start[0] != tk_end[0]:
raise ValueError, "token spanning multiple lines"
tk_len = tk_end[1] - tk_start[1]
if first_chunk:
chunk_indent = '' # the indent is part of the tokens
else:
chunk_indent = indent + add_indent
chunk_width = width - len(chunk_indent)
if tk_end[1]-chunk_start >= chunk_width:
# this token starts a new chunk
chunk = chunk_indent+code[chunk_start:chunk_end]+'\\'
assert len(chunk) <= width
chunks.append(chunk)
chunk_start = tk_start[1]
first_chunk = False
chunk_end = tk_end[1]
assert len(chunk_indent+code[chunk_start:chunk_end]+'\\') <= width
except tokenize.TokenError:
# unmatched somethingorother, we don't really care as it
# may be matched on another line
pass
finally:
# flush remaining chunk
rest = code[chunk_start:]
if len(rest) == 1:
# if the token is only 1 character, it can replace the line continuation
chunks[-1] = chunks[-1][:-1] + rest
else:
chunk = chunk_indent + rest
assert len(chunk) <= width
chunks.append(chunk)
return chunks
def wrap_line(line, width):
"""
Attempt to intelligently wrap Python code to width
This also moves any comments to a line prior.
"""
if len(line) <= width:
# shortcut if the line is shorter than the width required
return [line]
_line = line.lstrip()
indent = len(line) - len(_line)
code, comm = split_comments(_line)
if code:
# there are comments, we output these first
if comm:
c = ' ' * indent + comm
retval = wrap_comment(c, width)
else:
retval = []
c = ' ' * indent + code
retval.extend(wrap_code(c, width))
return retval
elif comm:
# This line only contains comments. Wrap accordingly.
return wrap_comment(line, width)
else:
return ['']
def find_submission(path):
"""
Tries to find a submission in a given path.
Returns username, submission_path, else None
"""
if os.path.isdir(path):
m = RE_DIRNAME.search(path)
if m is not None:
dir_items = set(os.listdir(path))
username = m.group('dirname')
submission_name = username + '.py'
if submission_name in dir_items:
item_path = os.path.join(path, submission_name)
return username, item_path
elif os.path.isfile(path):
m = RE_FILENAME.search(path)
if m is not None:
username = m.group('filename')
return username, path
# from http://code.activestate.com/recipes/534166-redirectedio-context-manager-and-redirect_io-decor/
class RedirectedIO(object):
def __init__(self, target=None, mode='a+',
close_target=True):
try:
target = open(target, mode)
except TypeError:
if target is None:
target = StringIO()
self.target = target
self.close_target = close_target
def __enter__(self):
""" Redirect IO to self.target.
"""
self.original_stdout = sys.stdout
sys.stdout = self.target
return self.target
def __exit__(self, exc_type, exc_val, exc_tb):
""" Restore stdio and close the file.
"""
sys.stdout = self.original_stdout
if self.close_target:
self.target.close()
class ProjectPrinter(object):
"""
This class wraps a file-like object and provides
a series of methods for doing relevant output to
it.
"""
def __init__(self, target, pagewidth):
self.target = target
self.pagewidth = pagewidth
def writeln(self, line='', wrap=False):
if wrap:
self.target.write(textwrap.fill(line, width=self.pagewidth) + '\n')
else:
for l in line.splitlines():
self.target.write(textwrap.fill(l, width=self.pagewidth) + '\n')
def cwriteln(self, line):
"""
Write a centered line
"""
self.writeln("{0:^{1}}".format(line, self.pagewidth))
def hr(self, symbol='#'):
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
self.writeln(symbol * self.pagewidth)
def boxed_text(self, text, symbol='+', boxwidth=None, align='c', wrap=False):
if boxwidth is None:
boxwidth = self.pagewidth
if boxwidth < 0:
boxwidth = self.pagewidth + boxwidth
if self.pagewidth < boxwidth:
raise ValueError, "box wider than page"
if len(symbol) != 1:
raise ValueError, "symbol must be a single character"
if isinstance(text, basestring):
if wrap:
lines = textwrap.wrap(text, width=boxwidth-2*(len(symbol)+1))
else:
lines = text.splitlines()
else:
lines = text
self.cwriteln(symbol * boxwidth)
for line in lines:
if len(line) > boxwidth-2*(len(symbol)+1):
# line too long!
_lines = textwrap.wrap(line, width=boxwidth-2*(len(symbol)+1), subsequent_indent = ' ')
else:
_lines = [line]
for _line in _lines:
if align == 'c':
self.cwriteln('{0}{1:^{2}}{0}'.format(symbol, _line, boxwidth-2))
elif align == 'r':
self.cwriteln('{0}{1:>{2}} {0}'.format(symbol, _line, boxwidth-3))
else:
self.cwriteln('{0} {1:<{2}}{0}'.format(symbol, _line, boxwidth-3))
self.cwriteln(symbol * boxwidth)
def display_code(self, path):
"""
Display code with intelligent wrapping
"""
with open(path) as f:
for i, line in enumerate(f):
if len(line) > self.pagewidth - 6:
# Line too wide. Need to cleverly wrap it.
#_line = line.lstrip()
#indent = len(line) - len(_line)
indent = get_indent(line)
code, comm = split_comments(line)
if code:
if comm:
for l in wrap_comment(line, self.pagewidth-6):
self.writeln(' {0}'.format(l))
clines = wrap_code(indent + code, self.pagewidth - 6)
self.writeln('{0:>4}* {1}'.format(i+1, clines[0]))
for l in clines[1:]:
self.writeln(' {0}'.format(l))
else:
# only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
if c_wrap:
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
# We splice out comments
try:
tokens = list(tokenize.generate_tokens(StringIO(line).readline))
comments = ''.join(t[1] for t in tokens if t[0] == tokenize.COMMENT)
noncomments = [ (t[0],t[1]) for t in tokens if t[0] != tokenize.COMMENT ]
ncline = tokenize.untokenize(noncomments).rstrip()
except tokenize.TokenError:
# This happens with unmatched things - in particular triplequote
# we just pretend the line had no comments in this case
comments = ''
ncline = line
if ncline.lstrip():
# More than comments on this line
# Write the comments first, followed by the code
if comments.strip():
lead_gap = len(ncline) - len(ncline.lstrip())
comments = ' '*lead_gap + comments
c_wrap = wrap_comment(comments, self.pagewidth-6)
self.writeln(' {0}'.format(c_wrap[0]))
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
if (len(ncline) + 6) > self.pagewidth:
# code is too long, must break
#self.writeln('line:{0} tokens:{1}'.format(len(ncline), len(noncomments)))
try:
broken = wrap_code(ncline, self.pagewidth-6)
except tokenize.TokenError:
# Can't tokenize, so we just wrap this with the same wrapping used
# for noncode and hope for the best.
broken = wrap_comment(ncline, self.pagewidth-6)
self.writeln('{0:>4}* {1}'.format(i+1, broken[0]))
for l in broken[1:]:
self.writeln(' {0}'.format(l))
else:
self.writeln('{0:>4}: {1}'.format(i+1, ncline))
else:
# Only comments on this line
c_wrap = wrap_comment(line, self.pagewidth-6)
self.writeln( '{0:>4}: {1}'.format(i+1, c_wrap[0]) )
for l in c_wrap[1:]:
self.writeln(' {0}'.format(l))
"""
else:
# Line fits on page
self.writeln( '{0:>4}: {1}'.format(i+1, line.rstrip()) )
def display_pep8(self, path, summary=True):
pep8_out = StringIO()
try:
with RedirectedIO(target=pep8_out, close_target=False):
pep8.process_options([path])
pep8.input_file(path)
error_stats = pep8.get_error_statistics()
warning_stats = pep8.get_warning_statistics()
val = pep8_out.getvalue().splitlines()
for line in [ x.split(':',1)[1] for x in val if ':' in x]:
self.writeln(line)
if summary:
self.writeln()
self.writeln("Summary:")
for e in error_stats:
self.writeln(e)
for w in warning_stats:
self.writeln(w)
self.writeln()
except tokenize.TokenError:
self.boxed_text(["PEP8 processing failed - check your source code"], symbol="#")
# adapted from http://code.activestate.com/recipes/473878/
class TimeOutExceeded(Exception): pass
class KThread(threading.Thread):
"""A subclass of threading.Thread, with a kill() method."""
def __init__(self, *args, **keywords):
threading.Thread.__init__(self, *args, **keywords)
self.killed = False
self.result = None
def start(self):
"""Start the thread."""
self.__run_backup = self.run
self.run = self.__run # Force the Thread to install our trace.
threading.Thread.start(self)
def run(self):
# TODO: Capture STDOUT, STDERR
success = True
outstream = StringIO()
try:
with RedirectedIO(target=outstream, close_target=False):
val = self._Thread__target(*self._Thread__args, **self._Thread__kwargs)
except Exception, e:
val = sys.exc_info()
success = False
output = outstream.getvalue()
self.result = success, val, output
def __run(self):
"""Hacked run function, which installs the trace."""
sys.settrace(self.globaltrace)
self.__run_backup()
self.run = self.__run_backup
def globaltrace(self, frame, why, arg):
if why == 'call':
return self.localtrace
else:
return None
def localtrace(self, frame, why, arg):
if self.killed:
if why == 'line':
raise SystemExit()
return self.localtrace
def kill(self):
self.killed = True
def timeout(func, args=(), kwargs={}, timeout_duration=10, default=None):
"""This function will spawn a thread and run the given function
using the args, kwargs and return the given default value if the
timeout_duration is exceeded.
"""
if isinstance(args, basestring):
args = eval(args)
if isinstance(kwargs, basestring):
kwargs = eval(kwargs)
t = KThread(target=func, args=args, kwargs=kwargs)
t.start()
t.join(timeout_duration)
if t.isAlive():
t.kill()
raise TimeOutExceeded()
else:
return t.result
@contextlib.contextmanager
def working_directory(path):
prev_cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(prev_cwd)
| gpl-3.0 |
zerkrx/zerkbox | lib/aiohttp/abc.py | 19 | 2147 | import asyncio
import sys
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sized
PY_35 = sys.version_info >= (3, 5)
class AbstractRouter(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, request):
"""Return MATCH_INFO for given request"""
class AbstractMatchInfo(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def handler(self, request):
"""Execute matched request handler"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def expect_handler(self, request):
"""Expect handler for 100-continue processing"""
@property # pragma: no branch
@abstractmethod
def http_exception(self):
"""HTTPException instance raised on router's resolving, or None"""
@abstractmethod # pragma: no branch
def get_info(self):
"""Return a dict with additional info useful for introspection"""
class AbstractView(ABC):
def __init__(self, request):
self._request = request
@property
def request(self):
return self._request
@asyncio.coroutine # pragma: no branch
@abstractmethod
def __iter__(self):
while False: # pragma: no cover
yield None
if PY_35: # pragma: no branch
@abstractmethod
def __await__(self):
return # pragma: no cover
class AbstractResolver(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, hostname):
"""Return IP address for given hostname"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def close(self):
"""Release resolver"""
class AbstractCookieJar(Sized, Iterable):
def __init__(self, *, loop=None):
self._loop = loop or asyncio.get_event_loop()
@abstractmethod
def clear(self):
"""Clear all cookies."""
@abstractmethod
def update_cookies(self, cookies, response_url=None):
"""Update cookies."""
@abstractmethod
def filter_cookies(self, request_url):
"""Return the jar's cookies filtered by their attributes."""
| gpl-3.0 |
pasberth/autojump | tools/autojump_ipython.py | 29 | 1181 | # This module was contributed by Mario Pastorelli <pastorelli.mario@gmail.com>
# It is released in the public domain
# This tool provides "j" for ipython
# To use it, copy it in your ~/.ipython directory
# and add the following line to ipy_user_conf.py:
# import autojump_ipython
import os
import subprocess as sub
from IPython.ipapi import get
from IPython.iplib import InteractiveShell
ip = get()
def magic_j(self,parameter_s=''):
cmd = ['autojump']+parameter_s.split()
# print 'executing autojump with args %s' % str(cmd)
newpath=sub.Popen(cmd,stdout=sub.PIPE,shell=False).communicate()[0][:-1] # delete last '\n'
# print 'Autojump answer: \'%s\'' % newpath
if newpath:
ip.magic('cd \'%s\'' % newpath)
def cd_decorator(f):
def autojump_cd_monitor(self,parameter_s=''):
f(self,parameter_s)
sub.call(['autojump','-a',os.getcwd()])
return autojump_cd_monitor
# Add the new magic function to the class dict and decorate magic_cd:
InteractiveShell.magic_j = magic_j
InteractiveShell.magic_cd = cd_decorator(InteractiveShell.magic_cd)
# And remove the global name to keep global namespace clean.
del magic_j
del cd_decorator
| gpl-3.0 |
artificialnull/IshanBoot | aliasbot.py | 1 | 9126 | #!/usr/bin/python3
import requests
import json
import os
import time
import random as rand
import subprocess
#telegram bot stuff
url = "https://api.telegram.org/bot%s/%s"
token = open("token.txt").read().replace('\n', '')
print(url % (token, "getUpdates"))
path = os.path.dirname(__file__)
#globals
locked = []
aliases = {}
commands = {}
chat_id = 0
SCH_CHID = -1001032618176
LOG_CHID = -1001098108881
#requests stuff
ConnectionError = requests.exceptions.ConnectionError
def isCommand(text, command):
if text[:len(command)] != command:
return False
else:
return True
def stripCommand(text, command):
return text[len(command) + 1:]
def getUpdates():
try:
r = requests.get(
url % (token, "getUpdates"),
data={"offset": getUpdates.offset},
timeout=60
)
try:
r = json.loads(r.text)
except:
print("Loading error while getting updates")
return [], True
r = r['result']
if len(r) > 0:
getUpdates.offset = int(r[-1]['update_id']) + 1
except ConnectionError:
print("Connection error while getting updates")
return [], True
return r, False
getUpdates.offset = 0
def sendMessage(message, reply_id=False, markdown=True):
payload = {
"chat_id": chat_id,
"text": message,
"parse_mode": "Markdown",
"disable_web_page_preview": True
}
if reply_id:
payload['reply_to_message_id'] = reply_id
if not markdown:
del payload['parse_mode']
try:
tresponse = requests.post(
url % (token, "sendMessage"),
data=payload,
timeout=2
)
resp = json.loads(tresponse.text)
if not resp["ok"]:
return sendMessage(message, reply_id, False)
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
print("Connection error while sending message")
return True
return False
def loadAliases():
aliases = {}
aliasFile = open(path + "/aliases.json").read()
aliases = json.loads(aliasFile)
return aliases
def saveAliases():
aliasFile = open(path + "/aliases.json", "w")
aliasFile.write(json.dumps(aliases, indent=4))
aliasFile.close()
def loadLocked():
locked = []
lfile = open(path + "/locked.txt").read()
for line in lfile.split('\n'):
if line != '':
locked.append(line)
return locked
def logMessage(message):
baseLM = "user: %s ; mesg: %s ; chid: %s\n"
if 'text' in message.keys():
filledLM = baseLM % (message['from']['first_name'],
message['text'],
message['chat']['id'])
logfile = open(path + "/logfile.txt", "a")
logfile.write(filledLM)
logfile.close()
if message['chat']['id'] == SCH_CHID:
payload = {
'chat_id': LOG_CHID,
'from_chat_id': SCH_CHID,
'disable_notification': True,
'message_id': message['message_id']
}
# try:
# tresponse = requests.post(url % (token, "forwardMessage"),
# data=payload, timeout=2)
# except:
# return
def alias(content, uid):
alias = content.split('=')[0]
while alias[0] == ' ':
alias = alias[1:]
while alias[-1] == ' ':
alias = alias[:-1]
alias = alias.replace(' ', '_')
value = '='.join(content.split('=')[1:])
if len(alias.split()) == 1:
if alias not in locked or uid == 204403520:
aliases[alias] = value
print("alias " + alias + "=" + value + " by " + name)
saveAliases()
sendMessage("Aliased " + alias + " to " + value, message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
else:
print("alias malformed")
sendMessage("Alias must be a single term", message_id)
def unalias(content, uid):
alias = content
if alias not in locked:
if len(alias.split()) == 1 and alias in aliases.keys():
aliases[alias] = ''
print("del " + alias)
saveAliases()
sendMessage("Unaliased " + alias, message_id)
else:
print("unalias malformed")
sendMessage("Invalid alias", message_id)
else:
print("cannot unlock alias")
sendMessage("Alias is locked, sorry", message_id)
def random(content, uid):
randomAlias = rand.choice(list(aliases.keys()))
randomAliasStr = "/%s = %s" % (randomAlias, aliases[randomAlias])
print(randomAliasStr)
sendMessage(randomAliasStr)
def uptime(content, uid):
sendMessage('`' + subprocess.Popen('uptime', stdout=subprocess.PIPE).communicate()[0].decode("utf-8") + '`')
def welp(content, uid):
sendMessage("gg")
def rip(content, uid):
response = rand.choice(["me", "rip is right", "rip is me"])
sendMessage(response)
def amirite(content, uid):
if rand.randint(1, 10) == 4:
response = "yep"
else:
response = "¬_¬"
sendMessage(response)
def remind(content, uid):
global chat_id
chat_id = SCH_CHID
sendMessage("heres your periodic schedule reminder!!!\n" + aliases["schedule"])
def newdaypb(content, uid):
sendMessage(aliases["newdaypb"])
def queue(content, uid):
print("cue")
if rand.randint(1, 10) < 3:
print("Q")
sendMessage("u wot m8", message_id)
def stan(content, uid):
sendMessage('no', message_id)
commands = {
'/alias': alias,
'/unalias': unalias,
'/random': random,
'/time': uptime,
'w/elp': welp,
'/rip': rip,
'/amirite': amirite,
'/remindme': remind,
'/newdaypb': newdaypb,
'/q@IshanBot': queue,
'stan': stan,
'hi stan': stan
}
if __name__ == "__main__":
aliases = loadAliases()
locked = loadLocked()
print("Started")
loffset = getUpdates.offset - 1
while getUpdates.offset != loffset:
loffset = getUpdates.offset
getUpdates()
print("Updated to:", getUpdates.offset)
while __name__ == "__main__":
try:
r, err = getUpdates()
if len(r) != 0 and not err:
print("received updates")
elif err:
time.sleep(1)
for update in r:
message = update.get('message')
if message == None:
continue
logMessage(message)
message_id = message['message_id']
print(message_id)
chat = message['chat']
chat_id = chat['id']
user = message.get('from')
name = "@" + user.get('username')
if name == None:
name = user.get('first_name')
uid = user['id']
if chat_id == LOG_CHID:
try:
payload = {
'chat_id': LOG_CHID,
'user_id': uid
}
requests.post(
url % (token, "kickChatMember"),
data=payload,
timeout=2
)
continue
except ConnectionError:
pass
text = message.get('text', ' ')
found = False
for command in commands.keys():
if isCommand(text, command):
content = stripCommand(text, command)
found = True
commands[command](content, uid)
if found:
continue
if "/" in text:
terms = text.split()
response = ''
for term in terms:
if '/' == term[0]:
alias = ''
if '@' in term and term[1:].split('@')[-1] == "IshanBot":
alias = term[1:].split('@')[0]
else:
alias = term[1:]
"""
for key in aliases.keys():
if 'legendary' in aliases[key]:
print(key)
print([ord(c) for c in key])
print([ord(c) for c in alias])
print(alias == key)
"""
response += aliases.get(alias, '')
if response != '':
sendMessage(response + ' ' + name)
except KeyboardInterrupt:
print("Control menu:\n 0 - Quit\n 1 - Reload locks")
choice = int(input("> "))
if choice == 1:
locked = loadLocked()
else:
saveAliases()
raise SystemExit
except BaseException as e:
print(str(e))
| gpl-3.0 |
fnouama/intellij-community | python/helpers/pydev/tests/test_jyserver.py | 54 | 5760 | '''
@author Fabio Zadrozny
'''
import sys
import unittest
import socket
import urllib
IS_JYTHON = sys.platform.find('java') != -1
if IS_JYTHON:
import os
#make it as if we were executing from the directory above this one (so that we can use jycompletionserver
#without the need for it being in the pythonpath)
sys.argv[0] = os.path.dirname(sys.argv[0])
#twice the dirname to get the previous level from this file.
sys.path.insert(1, os.path.join(os.path.dirname(sys.argv[0])))
import pycompletionserver as jycompletionserver
DEBUG = 0
def dbg(s):
if DEBUG:
sys.stdout.write('TEST %s\n' % s)
class Test(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
def tearDown(self):
unittest.TestCase.tearDown(self)
def testIt(self):
if not IS_JYTHON:
return
dbg('ok')
def testMessage(self):
if not IS_JYTHON:
return
t = jycompletionserver.T(0)
t.exit_process_on_kill = False
l = []
l.append(('Def', 'description' , 'args'))
l.append(('Def1', 'description1', 'args1'))
l.append(('Def2', 'description2', 'args2'))
msg = t.processor.formatCompletionMessage('test_jyserver.py', l)
self.assertEquals('@@COMPLETIONS(test_jyserver.py,(Def,description,args),(Def1,description1,args1),(Def2,description2,args2))END@@', msg)
l = []
l.append(('Def', 'desc,,r,,i()ption', ''))
l.append(('Def(1', 'descriptio(n1', ''))
l.append(('De,f)2', 'de,s,c,ription2', ''))
msg = t.processor.formatCompletionMessage(None, l)
expected = '@@COMPLETIONS(None,(Def,desc%2C%2Cr%2C%2Ci%28%29ption, ),(Def%281,descriptio%28n1, ),(De%2Cf%292,de%2Cs%2Cc%2Cription2, ))END@@'
self.assertEquals(expected, msg)
def testCompletionSocketsAndMessages(self):
if not IS_JYTHON:
return
dbg('testCompletionSocketsAndMessages')
t, socket = self.createConnections()
self.socket = socket
dbg('connections created')
try:
#now that we have the connections all set up, check the code completion messages.
msg = urllib.quote_plus('math')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
socket.send(toWrite) #math completions
completions = self.readMsg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assert_(completions.find('@@COMPLETIONS') != -1)
self.assert_(completions.find('END@@') != -1)
msg = urllib.quote_plus('__builtin__.str')
toWrite = '@@IMPORTS:%sEND@@' % msg
dbg('writing' + str(toWrite))
socket.send(toWrite) #math completions
completions = self.readMsg()
dbg(urllib.unquote_plus(completions))
start = '@@COMPLETIONS('
self.assert_(completions.startswith(start), '%s DOESNT START WITH %s' % (completions, start))
self.assert_(completions.find('@@COMPLETIONS') != -1)
self.assert_(completions.find('END@@') != -1)
finally:
try:
self.sendKillMsg(socket)
while not t.ended:
pass #wait until it receives the message and quits.
socket.close()
except:
pass
def createConnections(self, p1=50001):
'''
Creates the connections needed for testing.
'''
t = jycompletionserver.T(p1)
t.exit_process_on_kill = False
t.start()
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((jycompletionserver.HOST, p1))
server.listen(1)
sock, _addr = server.accept()
return t, sock
def readMsg(self):
msg = '@@PROCESSING_END@@'
while msg.startswith('@@PROCESSING'):
msg = self.socket.recv(1024)
if msg.startswith('@@PROCESSING:'):
dbg('Status msg:' + str(msg))
while msg.find('END@@') == -1:
msg += self.socket.recv(1024)
return msg
def sendKillMsg(self, socket):
socket.send(jycompletionserver.MSG_KILL_SERVER)
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\pysrc\pycompletionserver.py 53795 58659
#
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython D:\eclipse_workspace\org.python.pydev\pysrc\tests\test_jyserver.py
#
#"C:\Program Files\Java\jdk1.5.0_04\bin\java.exe" -Dpython.path="C:\bin\jython21\Lib";"C:\bin\jython21";"C:\Program Files\Java\jdk1.5.0_04\jre\lib\rt.jar" -classpath C:/bin/jython21/jython.jar org.python.util.jython d:\runtime-workbench-workspace\jython_test\src\test.py
if __name__ == '__main__':
if IS_JYTHON:
suite = unittest.makeSuite(Test)
unittest.TextTestRunner(verbosity=1).run(suite)
else:
sys.stdout.write('Not running jython tests for non-java platform: %s' % sys.platform)
| apache-2.0 |
persepolisdm/translation-API | pdm_api/views/default.py | 1 | 1257 | from pyramid.response import Response
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPForbidden
from pyramid import request
from sqlalchemy.exc import DBAPIError
from ..models.mymodel import MyModel, request_log, access_log, banlist
from ..settings import get_settings
import datetime
@view_config(route_name='home', renderer='../templates/mytemplate.jinja2')
def my_view(request):
try:
query = request.dbsession.query(MyModel)
one = query.filter(MyModel.name == 'one').first()
except DBAPIError:
return Response(db_err_msg, content_type='text/plain', status=500)
return {'one': one, 'project': 'pdm_api'}
db_err_msg = """\
Pyramid is having a problem using your SQL database. The problem
might be caused by one of the following things:
1. You may need to run the "initialize_pdm_api_db" script
to initialize your database tables. Check your virtual
environment's "bin" directory for this script and try to run it.
2. Your database server may not be running. Check that the
database server referred to by the "sqlalchemy.url" setting in
your "development.ini" file is running.
After you fix the problem, please restart the Pyramid application to
try it again.
"""
| gpl-3.0 |
AevumDecessus/fragforce.org | ffdonations/migrations/0004_auto_20181004_1921.py | 3 | 1156 | # Generated by Django 2.1.2 on 2018-10-04 23:21
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ffdonations', '0003_auto_20181004_1916'),
]
operations = [
migrations.AlterField(
model_name='donationmodel',
name='raw',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True, verbose_name='Raw Data'),
),
migrations.AlterField(
model_name='donormodel',
name='raw',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True, verbose_name='Raw Data'),
),
migrations.AlterField(
model_name='participantmodel',
name='raw',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True, verbose_name='Raw Data'),
),
migrations.AlterField(
model_name='teammodel',
name='raw',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict, null=True, verbose_name='Raw Data'),
),
]
| gpl-2.0 |
incaser/odoo-odoo | addons/account_analytic_plans/report/__init__.py | 445 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crossovered_analytic
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AbhilashReddyM/GeometricMultigrid | mgd3d.py | 1 | 4921 | """
2017 (c) A. R. Malipeddi
3D geometric multigrid code for poissons equation in a cube.
- Finite difference method
- 7pt operator
- trilinear interpolation
- Two-color Gauss Seidel smoothing
"""
import numpy as np
def GSrelax(nx,ny,nz,u,f,iters=1,flag=1):
'''
Red-Black Gauss Seidel smoothing
flag : 1 = pre-sweep
2 = post-sweep
'''
dx=1.0/nx
dy=1.0/ny
dz=1.0/nz
Ax=1.0/dx**2
Ay=1.0/dy**2
Az=1.0/dz**2
Ap=1.0/(2.0*(1.0/dx**2+1.0/dy**2+1.0/dz**2))
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
for it in range(iters):
c=0
for _ in [1,2]:
for i in range(1,nx+1):
cs=c
for j in range(1,ny+1):
for k in range(1+c,nz+1,2):
u[i,j,k]= Ap*( Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- f[i,j,k])
c=1-c
c=1-cs
c=1
#BCs. Needs to be generalized!
u[ 0,:,:] = -u[ 1,:,:]
u[-1,:,:] = -u[-2,:,:]
u[: ,0,:] = -u[:, 1,:]
u[:,-1,:] = -u[:,-2,:]
u[:,:, 0] = -u[:,:, 1]
u[:,:,-1] = -u[:,:,-2]
#if residual not needed
if(flag==2):
return u,None
res=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
res[i,j,k]=f[i,j,k] - (Ax*(u[i+1,j,k]+u[i-1,j,k])
+ Ay*(u[i,j+1,k]+u[i,j-1,k])
+ Az*(u[i,j,k+1]+u[i,j,k-1])
- 2.0*(Ax+Ay+Az)*u[i,j,k])
return u,res
def restrict(nx,ny,nz,v):
'''
restrict 'v' to the coarser grid
'''
v_c=np.zeros([nx+2,ny+2,nz+2])
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_c[i,j,k]=0.125*(v[2*i-1,2*j-1,2*k-1]+v[2*i,2*j-1,2*k-1]+v[2*i-1,2*j,2*k-1]+v[2*i,2*j,2*k-1]
+v[2*i-1,2*j-1,2*k ]+v[2*i,2*j-1,2*k ]+v[2*i-1,2*j,2*k ]+v[2*i,2*j,2*k ])
return v_c
def prolong(nx,ny,nz,v):
'''
interpolate correction to the fine grid
'''
v_f=np.zeros([2*nx+2,2*ny+2,2*nz+2])
a=27.0/64
b= 9.0/64
c= 3.0/64
d= 1.0/64
for i in range(1,nx+1):
for j in range(1,ny+1):
for k in range(1,nz+1):
v_f[2*i-1,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i-1,j-1,k] + v[i-1,j,k-1] + v[i,j-1,k-1]) + d*v[i-1,j-1,k-1]
v_f[2*i ,2*j-1,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k-1]) + c*(v[i+1,j-1,k] + v[i+1,j,k-1] + v[i,j-1,k-1]) + d*v[i+1,j-1,k-1]
v_f[2*i-1,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i-1,j+1,k] + v[i-1,j,k-1] + v[i,j+1,k-1]) + d*v[i-1,j+1,k-1]
v_f[2*i ,2*j ,2*k-1] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k-1]) + c*(v[i+1,j+1,k] + v[i+1,j,k-1] + v[i,j+1,k-1]) + d*v[i+1,j+1,k-1]
v_f[2*i-1,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i-1,j-1,k] + v[i-1,j,k+1] + v[i,j-1,k+1]) + d*v[i-1,j-1,k+1]
v_f[2*i ,2*j-1,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j-1,k] + v[i,j,k+1]) + c*(v[i+1,j-1,k] + v[i+1,j,k+1] + v[i,j-1,k+1]) + d*v[i+1,j-1,k+1]
v_f[2*i-1,2*j ,2*k ] = a*v[i,j,k] + b*(v[i-1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i-1,j+1,k] + v[i-1,j,k+1] + v[i,j+1,k+1]) + d*v[i-1,j+1,k+1]
v_f[2*i ,2*j ,2*k ] = a*v[i,j,k] + b*(v[i+1,j,k] + v[i,j+1,k] + v[i,j,k+1]) + c*(v[i+1,j+1,k] + v[i+1,j,k+1] + v[i,j+1,k+1]) + d*v[i+1,j+1,k+1]
return v_f
def V_cycle(nx,ny,nz,num_levels,u,f,level=1):
'''
V cycle
'''
if(level==num_levels):#bottom solve
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Relax Au=f on this grid
u,res=GSrelax(nx,ny,nz,u,f,2)
#Step 2: Restrict residual to coarse grid
res_c=restrict(nx//2,ny//2,nz//2,res)
#Step 3:Solve A e_c=res_c on the coarse grid. (Recursively)
e_c=np.zeros_like(res_c)
e_c,res_c=V_cycle(nx//2,ny//2,nz//2,num_levels,e_c,res_c,level+1)
#Step 4: Interpolate(prolong) e_c to fine grid and add to u
u+=prolong(nx//2,ny//2,nz//2,e_c)
#Step 5: Relax Au=f on this grid
if(level==1):
u,res=GSrelax(nx,ny,nz,u,f,2,flag=1)
else:
u,res=GSrelax(nx,ny,nz,u,f,2,flag=2)
return u,res
def FMG(nx,ny,nz,num_levels,f,nv=1,level=1):
if(level==num_levels):#bottom solve
u=np.zeros([nx+2,ny+2,nz+2])
u,res=GSrelax(nx,ny,nz,u,f,iters=100)
return u,res
#Step 1: Restrict the rhs to a coarse grid
f_c=restrict(nx//2,ny//2,nz//2,f)
#Step 2: Solve the coarse grid problem using FMG
u_c,_=FMG(nx//2,ny//2,nz//2,num_levels,f_c,nv,level+1)
#Step 3: Interpolate u_c to the fine grid
u=prolong(nx//2,ny//2,nz//2,u_c)
#step 4: Execute 'nv' V-cycles
for _ in range(nv):
u,res=V_cycle(nx,ny,nz,num_levels-level,u,f)
return u,res
| mit |
raildo/nova | nova/scheduler/filters/affinity_filter.py | 56 | 4923 | # Copyright 2012, Piston Cloud Computing, Inc.
# Copyright 2012, OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netaddr
from oslo_log import log as logging
from nova.scheduler import filters
from nova.scheduler.filters import utils
LOG = logging.getLogger(__name__)
class DifferentHostFilter(filters.BaseHostFilter):
'''Schedule the instance on a different host from a set of instances.'''
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
affinity_uuids = scheduler_hints.get('different_host', [])
if affinity_uuids:
overlap = utils.instance_uuids_overlap(host_state, affinity_uuids)
return not overlap
# With no different_host key
return True
class SameHostFilter(filters.BaseHostFilter):
'''Schedule the instance on the same host as another instance in a set of
instances.
'''
# The hosts the instances are running on doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
affinity_uuids = scheduler_hints.get('same_host', [])
if affinity_uuids and host_state.instances:
overlap = utils.instance_uuids_overlap(host_state, affinity_uuids)
return overlap
# With no same_host key or no instances
return True
class SimpleCIDRAffinityFilter(filters.BaseHostFilter):
'''Schedule the instance on a host with a particular cidr
'''
# The address of a host doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, filter_properties):
scheduler_hints = filter_properties.get('scheduler_hints') or {}
affinity_cidr = scheduler_hints.get('cidr', '/24')
affinity_host_addr = scheduler_hints.get('build_near_host_ip')
host_ip = host_state.host_ip
if affinity_host_addr:
affinity_net = netaddr.IPNetwork(str.join('', (affinity_host_addr,
affinity_cidr)))
return netaddr.IPAddress(host_ip) in affinity_net
# We don't have an affinity host address.
return True
class _GroupAntiAffinityFilter(filters.BaseHostFilter):
"""Schedule the instance on a different host from a set of group
hosts.
"""
def host_passes(self, host_state, filter_properties):
# Only invoke the filter is 'anti-affinity' is configured
policies = filter_properties.get('group_policies', [])
if self.policy_name not in policies:
return True
group_hosts = filter_properties.get('group_hosts') or []
LOG.debug("Group anti affinity: check if %(host)s not "
"in %(configured)s", {'host': host_state.host,
'configured': group_hosts})
if group_hosts:
return host_state.host not in group_hosts
# No groups configured
return True
class ServerGroupAntiAffinityFilter(_GroupAntiAffinityFilter):
def __init__(self):
self.policy_name = 'anti-affinity'
super(ServerGroupAntiAffinityFilter, self).__init__()
class _GroupAffinityFilter(filters.BaseHostFilter):
"""Schedule the instance on to host from a set of group hosts.
"""
def host_passes(self, host_state, filter_properties):
# Only invoke the filter is 'affinity' is configured
policies = filter_properties.get('group_policies', [])
if self.policy_name not in policies:
return True
group_hosts = filter_properties.get('group_hosts', [])
LOG.debug("Group affinity: check if %(host)s in "
"%(configured)s", {'host': host_state.host,
'configured': group_hosts})
if group_hosts:
return host_state.host in group_hosts
# No groups configured
return True
class ServerGroupAffinityFilter(_GroupAffinityFilter):
def __init__(self):
self.policy_name = 'affinity'
super(ServerGroupAffinityFilter, self).__init__()
| apache-2.0 |
kevin8909/xjerp | openerp/addons/pad_project/__openerp__.py | 119 | 1478 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Pad on tasks',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds a PAD in all project kanban views.
===================================================
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'depends': ['project', 'pad'],
'data': ['project_task.xml'],
'demo': [],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Infernoman/Sembro-Token | contrib/bitrpc/bitrpc.py | 2348 | 7835 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8332")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8332")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" | mit |
a40423239/2017springcd_hw | plugin/liquid_tags/test_notebook.py | 311 | 3042 | import re
from pelican.tests.support import unittest
from . import notebook
class TestNotebookTagRegex(unittest.TestCase):
def get_argdict(self, markup):
match = notebook.FORMAT.search(markup)
if match:
argdict = match.groupdict()
src = argdict['src']
start = argdict['start']
end = argdict['end']
language = argdict['language']
return src, start, end, language
return None
def test_basic_notebook_tag(self):
markup = u'path/to/thing.ipynb'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_basic_notebook_tag_insensitive_to_whitespace(self):
markup = u' path/to/thing.ipynb '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertIsNone(language)
def test_notebook_tag_with_cells(self):
markup = u'path/to/thing.ipynb cells[1:5]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertIsNone(language)
def test_notebook_tag_with_alphanumeric_language(self):
markup = u'path/to/thing.ipynb language[python3]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, u'python3')
def test_notebook_tag_with_symbol_in_name_language(self):
for short_name in [u'c++', u'cpp-objdump', u'c++-objdumb', u'cxx-objdump']:
markup = u'path/to/thing.ipynb language[{}]'.format(short_name)
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertIsNone(start)
self.assertIsNone(end)
self.assertEqual(language, short_name)
def test_notebook_tag_with_language_and_cells(self):
markup = u'path/to/thing.ipynb cells[1:5] language[julia]'
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
def test_notebook_tag_with_language_and_cells_and_weird_spaces(self):
markup = u' path/to/thing.ipynb cells[1:5] language[julia] '
src, start, end, language = self.get_argdict(markup)
self.assertEqual(src, u'path/to/thing.ipynb')
self.assertEqual(start, u'1')
self.assertEqual(end, u'5')
self.assertEqual(language, u'julia')
if __name__ == '__main__':
unittest.main() | agpl-3.0 |
ol-loginov/intellij-community | python/testData/MockSdk2.7/python_stubs/__builtin__.py | 40 | 174842 | # encoding: utf-8
# module __builtin__
# from (built-in)
# by generator 1.136
from __future__ import print_function
"""
Built-in functions, exceptions, and other objects.
Noteworthy: None is the `nil' object; Ellipsis represents `...' in slices.
"""
# imports
from exceptions import (ArithmeticError, AssertionError, AttributeError,
BaseException, BufferError, BytesWarning, DeprecationWarning, EOFError,
EnvironmentError, Exception, FloatingPointError, FutureWarning,
GeneratorExit, IOError, ImportError, ImportWarning, IndentationError,
IndexError, KeyError, KeyboardInterrupt, LookupError, MemoryError,
NameError, NotImplementedError, OSError, OverflowError,
PendingDeprecationWarning, ReferenceError, RuntimeError, RuntimeWarning,
StandardError, StopIteration, SyntaxError, SyntaxWarning, SystemError,
SystemExit, TabError, TypeError, UnboundLocalError, UnicodeDecodeError,
UnicodeEncodeError, UnicodeError, UnicodeTranslateError, UnicodeWarning,
UserWarning, ValueError, Warning, ZeroDivisionError)
# Variables with simple values
False = False
None = object() # real value of type <type 'NoneType'> replaced
True = True
__debug__ = True
# functions
def abs(number): # real signature unknown; restored from __doc__
"""
abs(number) -> number
Return the absolute value of the argument.
"""
return 0
def all(iterable): # real signature unknown; restored from __doc__
"""
all(iterable) -> bool
Return True if bool(x) is True for all values x in the iterable.
If the iterable is empty, return True.
"""
return False
def any(iterable): # real signature unknown; restored from __doc__
"""
any(iterable) -> bool
Return True if bool(x) is True for any x in the iterable.
If the iterable is empty, return False.
"""
return False
def apply(p_object, args=None, kwargs=None): # real signature unknown; restored from __doc__
"""
apply(object[, args[, kwargs]]) -> value
Call a callable object with positional arguments taken from the tuple args,
and keyword arguments taken from the optional dictionary kwargs.
Note that classes are callable, as are instances with a __call__() method.
Deprecated since release 2.3. Instead, use the extended call syntax:
function(*args, **keywords).
"""
pass
def bin(number): # real signature unknown; restored from __doc__
"""
bin(number) -> string
Return the binary representation of an integer or long integer.
"""
return ""
def callable(p_object): # real signature unknown; restored from __doc__
"""
callable(object) -> bool
Return whether the object is callable (i.e., some kind of function).
Note that classes are callable, as are instances with a __call__() method.
"""
return False
def chr(i): # real signature unknown; restored from __doc__
"""
chr(i) -> character
Return a string of one character with ordinal i; 0 <= i < 256.
"""
return ""
def cmp(x, y): # real signature unknown; restored from __doc__
"""
cmp(x, y) -> integer
Return negative if x<y, zero if x==y, positive if x>y.
"""
return 0
def coerce(x, y): # real signature unknown; restored from __doc__
"""
coerce(x, y) -> (x1, y1)
Return a tuple consisting of the two numeric arguments converted to
a common type, using the same rules as used by arithmetic operations.
If coercion is not possible, raise TypeError.
"""
pass
def compile(source, filename, mode, flags=None, dont_inherit=None): # real signature unknown; restored from __doc__
"""
compile(source, filename, mode[, flags[, dont_inherit]]) -> code object
Compile the source string (a Python module, statement or expression)
into a code object that can be executed by the exec statement or eval().
The filename will be used for run-time error messages.
The mode must be 'exec' to compile a module, 'single' to compile a
single (interactive) statement, or 'eval' to compile an expression.
The flags argument, if present, controls which future statements influence
the compilation of the code.
The dont_inherit argument, if non-zero, stops the compilation inheriting
the effects of any future statements in effect in the code calling
compile; if absent or zero these statements do influence the compilation,
in addition to any features explicitly specified.
"""
pass
def copyright(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def credits(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def delattr(p_object, name): # real signature unknown; restored from __doc__
"""
delattr(object, name)
Delete a named attribute on an object; delattr(x, 'y') is equivalent to
``del x.y''.
"""
pass
def dir(p_object=None): # real signature unknown; restored from __doc__
"""
dir([object]) -> list of strings
If called without an argument, return the names in the current scope.
Else, return an alphabetized list of names comprising (some of) the attributes
of the given object, and of attributes reachable from it.
If the object supplies a method named __dir__, it will be used; otherwise
the default dir() logic is used and returns:
for a module object: the module's attributes.
for a class object: its attributes, and recursively the attributes
of its bases.
for any other object: its attributes, its class's attributes, and
recursively the attributes of its class's base classes.
"""
return []
def divmod(x, y): # known case of __builtin__.divmod
"""
divmod(x, y) -> (quotient, remainder)
Return the tuple ((x-x%y)/y, x%y). Invariant: div*y + mod == x.
"""
return (0, 0)
def eval(source, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
eval(source[, globals[, locals]]) -> value
Evaluate the source in the context of globals and locals.
The source may be a string representing a Python expression
or a code object as returned by compile().
The globals must be a dictionary and locals can be any mapping,
defaulting to the current globals and locals.
If only globals is given, locals defaults to it.
"""
pass
def execfile(filename, globals=None, locals=None): # real signature unknown; restored from __doc__
"""
execfile(filename[, globals[, locals]])
Read and execute a Python script from a file.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to it.
"""
pass
def exit(*args, **kwargs): # real signature unknown
pass
def filter(function_or_none, sequence): # known special case of filter
"""
filter(function or None, sequence) -> list, tuple, or string
Return those items of sequence for which function(item) is true. If
function is None, return the items that are true. If sequence is a tuple
or string, return the same type, else return a list.
"""
pass
def format(value, format_spec=None): # real signature unknown; restored from __doc__
"""
format(value[, format_spec]) -> string
Returns value.__format__(format_spec)
format_spec defaults to ""
"""
return ""
def getattr(object, name, default=None): # known special case of getattr
"""
getattr(object, name[, default]) -> value
Get a named attribute from an object; getattr(x, 'y') is equivalent to x.y.
When a default argument is given, it is returned when the attribute doesn't
exist; without it, an exception is raised in that case.
"""
pass
def globals(): # real signature unknown; restored from __doc__
"""
globals() -> dictionary
Return the dictionary containing the current scope's global variables.
"""
return {}
def hasattr(p_object, name): # real signature unknown; restored from __doc__
"""
hasattr(object, name) -> bool
Return whether the object has an attribute with the given name.
(This is done by calling getattr(object, name) and catching exceptions.)
"""
return False
def hash(p_object): # real signature unknown; restored from __doc__
"""
hash(object) -> integer
Return a hash value for the object. Two objects with the same value have
the same hash value. The reverse is not necessarily true, but likely.
"""
return 0
def help(with_a_twist): # real signature unknown; restored from __doc__
"""
Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
pass
def hex(number): # real signature unknown; restored from __doc__
"""
hex(number) -> string
Return the hexadecimal representation of an integer or long integer.
"""
return ""
def id(p_object): # real signature unknown; restored from __doc__
"""
id(object) -> integer
Return the identity of an object. This is guaranteed to be unique among
simultaneously existing objects. (Hint: it's the object's memory address.)
"""
return 0
def input(prompt=None): # real signature unknown; restored from __doc__
"""
input([prompt]) -> value
Equivalent to eval(raw_input(prompt)).
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def isinstance(p_object, class_or_type_or_tuple): # real signature unknown; restored from __doc__
"""
isinstance(object, class-or-type-or-tuple) -> bool
Return whether an object is an instance of a class or of a subclass thereof.
With a type as second argument, return whether that is the object's type.
The form using a tuple, isinstance(x, (A, B, ...)), is a shortcut for
isinstance(x, A) or isinstance(x, B) or ... (etc.).
"""
return False
def issubclass(C, B): # real signature unknown; restored from __doc__
"""
issubclass(C, B) -> bool
Return whether class C is a subclass (i.e., a derived class) of class B.
When using a tuple as the second argument issubclass(X, (A, B, ...)),
is a shortcut for issubclass(X, A) or issubclass(X, B) or ... (etc.).
"""
return False
def iter(source, sentinel=None): # known special case of iter
"""
iter(collection) -> iterator
iter(callable, sentinel) -> iterator
Get an iterator from an object. In the first form, the argument must
supply its own iterator, or be a sequence.
In the second form, the callable is called until it returns the sentinel.
"""
pass
def len(p_object): # real signature unknown; restored from __doc__
"""
len(object) -> integer
Return the number of items of a sequence or mapping.
"""
return 0
def license(*args, **kwargs): # real signature unknown
"""
interactive prompt objects for printing the license text, a list of
contributors and the copyright notice.
"""
pass
def locals(): # real signature unknown; restored from __doc__
"""
locals() -> dictionary
Update and return a dictionary containing the current scope's local variables.
"""
return {}
def map(function, sequence, *sequence_1): # real signature unknown; restored from __doc__
"""
map(function, sequence[, sequence, ...]) -> list
Return a list of the results of applying the function to the items of
the argument sequence(s). If more than one sequence is given, the
function is called with an argument list consisting of the corresponding
item of each sequence, substituting None for missing values when not all
sequences have the same length. If the function is None, return a list of
the items of the sequence (or a list of tuples if more than one sequence).
"""
return []
def max(*args, **kwargs): # known special case of max
"""
max(iterable[, key=func]) -> value
max(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its largest item.
With two or more arguments, return the largest argument.
"""
pass
def min(*args, **kwargs): # known special case of min
"""
min(iterable[, key=func]) -> value
min(a, b, c, ...[, key=func]) -> value
With a single iterable argument, return its smallest item.
With two or more arguments, return the smallest argument.
"""
pass
def next(iterator, default=None): # real signature unknown; restored from __doc__
"""
next(iterator[, default])
Return the next item from the iterator. If default is given and the iterator
is exhausted, it is returned instead of raising StopIteration.
"""
pass
def oct(number): # real signature unknown; restored from __doc__
"""
oct(number) -> string
Return the octal representation of an integer or long integer.
"""
return ""
def open(name, mode=None, buffering=None): # real signature unknown; restored from __doc__
"""
open(name[, mode[, buffering]]) -> file object
Open a file using the file() type, returns a file object. This is the
preferred way to open a file. See file.__doc__ for further information.
"""
return file('/dev/null')
def ord(c): # real signature unknown; restored from __doc__
"""
ord(c) -> integer
Return the integer ordinal of a one-character string.
"""
return 0
def pow(x, y, z=None): # real signature unknown; restored from __doc__
"""
pow(x, y[, z]) -> number
With two arguments, equivalent to x**y. With three arguments,
equivalent to (x**y) % z, but may be more efficient (e.g. for longs).
"""
return 0
def print(*args, **kwargs): # known special case of print
"""
print(value, ..., sep=' ', end='\n', file=sys.stdout)
Prints the values to a stream, or to sys.stdout by default.
Optional keyword arguments:
file: a file-like object (stream); defaults to the current sys.stdout.
sep: string inserted between values, default a space.
end: string appended after the last value, default a newline.
"""
pass
def quit(*args, **kwargs): # real signature unknown
pass
def range(start=None, stop=None, step=None): # known special case of range
"""
range(stop) -> list of integers
range(start, stop[, step]) -> list of integers
Return a list containing an arithmetic progression of integers.
range(i, j) returns [i, i+1, i+2, ..., j-1]; start (!) defaults to 0.
When step is given, it specifies the increment (or decrement).
For example, range(4) returns [0, 1, 2, 3]. The end point is omitted!
These are exactly the valid indices for a list of 4 elements.
"""
pass
def raw_input(prompt=None): # real signature unknown; restored from __doc__
"""
raw_input([prompt]) -> string
Read a string from standard input. The trailing newline is stripped.
If the user hits EOF (Unix: Ctl-D, Windows: Ctl-Z+Return), raise EOFError.
On Unix, GNU readline is used if enabled. The prompt string, if given,
is printed without a trailing newline before reading.
"""
return ""
def reduce(function, sequence, initial=None): # real signature unknown; restored from __doc__
"""
reduce(function, sequence[, initial]) -> value
Apply a function of two arguments cumulatively to the items of a sequence,
from left to right, so as to reduce the sequence to a single value.
For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates
((((1+2)+3)+4)+5). If initial is present, it is placed before the items
of the sequence in the calculation, and serves as a default when the
sequence is empty.
"""
pass
def reload(module): # real signature unknown; restored from __doc__
"""
reload(module) -> module
Reload the module. The module must have been successfully imported before.
"""
pass
def repr(p_object): # real signature unknown; restored from __doc__
"""
repr(object) -> string
Return the canonical string representation of the object.
For most object types, eval(repr(object)) == object.
"""
return ""
def round(number, ndigits=None): # real signature unknown; restored from __doc__
"""
round(number[, ndigits]) -> floating point number
Round a number to a given precision in decimal digits (default 0 digits).
This always returns a floating point number. Precision may be negative.
"""
return 0.0
def setattr(p_object, name, value): # real signature unknown; restored from __doc__
"""
setattr(object, name, value)
Set a named attribute on an object; setattr(x, 'y', v) is equivalent to
``x.y = v''.
"""
pass
def sorted(iterable, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
""" sorted(iterable, cmp=None, key=None, reverse=False) --> new sorted list """
pass
def sum(sequence, start=None): # real signature unknown; restored from __doc__
"""
sum(sequence[, start]) -> value
Return the sum of a sequence of numbers (NOT strings) plus the value
of parameter 'start' (which defaults to 0). When the sequence is
empty, return start.
"""
pass
def unichr(i): # real signature unknown; restored from __doc__
"""
unichr(i) -> Unicode character
Return a Unicode string of one character with ordinal i; 0 <= i <= 0x10ffff.
"""
return u""
def vars(p_object=None): # real signature unknown; restored from __doc__
"""
vars([object]) -> dictionary
Without arguments, equivalent to locals().
With an argument, equivalent to object.__dict__.
"""
return {}
def zip(seq1, seq2, *more_seqs): # known special case of zip
"""
zip(seq1 [, seq2 [...]]) -> [(seq1[0], seq2[0] ...), (...)]
Return a list of tuples, where each tuple contains the i-th element
from each of the argument sequences. The returned list is truncated
in length to the length of the shortest argument sequence.
"""
pass
def __import__(name, globals={}, locals={}, fromlist=[], level=-1): # real signature unknown; restored from __doc__
"""
__import__(name, globals={}, locals={}, fromlist=[], level=-1) -> module
Import a module. Because this function is meant for use by the Python
interpreter and not for general use it is better to use
importlib.import_module() to programmatically import a module.
The globals argument is only used to determine the context;
they are not modified. The locals argument is unused. The fromlist
should be a list of names to emulate ``from name import ...'', or an
empty list to emulate ``import name''.
When importing a module from a package, note that __import__('A.B', ...)
returns package A when fromlist is empty, but its submodule B when
fromlist is not empty. Level is used to determine whether to perform
absolute or relative imports. -1 is the original strategy of attempting
both absolute and relative imports, 0 is absolute, a positive number
is the number of parent directories to search relative to the current module.
"""
pass
# classes
class ___Classobj:
'''A mock class representing the old style class base.'''
__module__ = ''
__class__ = None
def __init__(self):
pass
__dict__ = {}
__doc__ = ''
class __generator(object):
'''A mock class representing the generator function type.'''
def __init__(self):
self.gi_code = None
self.gi_frame = None
self.gi_running = 0
def __iter__(self):
'''Defined to support iteration over container.'''
pass
def next(self):
'''Return the next item from the container.'''
pass
def close(self):
'''Raises new GeneratorExit exception inside the generator to terminate the iteration.'''
pass
def send(self, value):
'''Resumes the generator and "sends" a value that becomes the result of the current yield-expression.'''
pass
def throw(self, type, value=None, traceback=None):
'''Used to raise an exception inside the generator.'''
pass
class __function(object):
'''A mock class representing function type.'''
def __init__(self):
self.__name__ = ''
self.__doc__ = ''
self.__dict__ = ''
self.__module__ = ''
self.func_defaults = {}
self.func_globals = {}
self.func_closure = None
self.func_code = None
self.func_name = ''
self.func_doc = ''
self.func_dict = ''
self.__defaults__ = {}
self.__globals__ = {}
self.__closure__ = None
self.__code__ = None
self.__name__ = ''
class __method(object):
'''A mock class representing method type.'''
def __init__(self):
self.im_class = None
self.im_self = None
self.im_func = None
self.__func__ = None
self.__self__ = None
class __namedtuple(tuple):
'''A mock base class for named tuples.'''
__slots__ = ()
_fields = ()
def __new__(cls, *args, **kwargs):
'Create a new instance of the named tuple.'
return tuple.__new__(cls, *args)
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new named tuple object from a sequence or iterable.'
return new(cls, iterable)
def __repr__(self):
return ''
def _asdict(self):
'Return a new dict which maps field types to their values.'
return {}
def _replace(self, **kwargs):
'Return a new named tuple object replacing specified fields with new values.'
return self
def __getnewargs__(self):
return tuple(self)
class object:
""" The most base type """
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" default object formatter """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self): # known special case of object.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
@staticmethod # known case of __new__
def __new__(cls, *more): # known special case of object.__new__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" helper for pickle """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
__sizeof__() -> int
size of object in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
@classmethod # known case
def __subclasshook__(cls, subclass): # known special case of object.__subclasshook__
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
__class__ = None # (!) forward: type, real value is ''
__dict__ = {}
__doc__ = ''
__module__ = ''
class basestring(object):
""" Type basestring cannot be instantiated; it is the base for str and unicode. """
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class int(object):
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
int.bit_length() -> int
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x, base=10): # known special case of int.__init__
"""
int(x=0) -> int or long
int(x, base=10) -> int or long
Convert a number or string to an integer, or return 0 if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is outside the integer range, the function returns a long instead.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4
# (copied from class doc)
"""
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class bool(int):
"""
bool(x) -> bool
Returns True when the argument x is true, False otherwise.
The builtins True and False are the only two instances of the class bool.
The class bool is a subclass of the class int, and cannot be subclassed.
"""
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class buffer(object):
"""
buffer(object [, offset[, size]])
Create a new buffer object which references the given object.
The buffer will reference a slice of the target object from the
start of the object (or at the specified offset). The slice will
extend to the end of the target object (or with the specified size).
"""
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, p_object, offset=None, size=None): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class bytearray(object):
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
"""
def append(self, p_int): # real signature unknown; restored from __doc__
"""
B.append(int) -> None
Append a single item to the end of B.
"""
pass
def capitalize(self): # real signature unknown; restored from __doc__
"""
B.capitalize() -> copy of B
Return a copy of B with only its first character capitalized (ASCII)
and the rest lower-cased.
"""
pass
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.center(width[, fillchar]) -> copy of B
Return B centered in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.count(sub [,start [,end]]) -> int
Return the number of non-overlapping occurrences of subsection sub in
bytes B[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
B.decode([encoding[, errors]]) -> unicode object.
Decodes B using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return u""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.endswith(suffix [,start [,end]]) -> bool
Return True if B ends with the specified suffix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
B.expandtabs([tabsize]) -> copy of B
Return a copy of B where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
pass
def extend(self, iterable_int): # real signature unknown; restored from __doc__
"""
B.extend(iterable int) -> None
Append all the elements from the iterator or sequence to the
end of B.
"""
pass
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.find(sub [,start [,end]]) -> int
Return the lowest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
@classmethod # known case
def fromhex(cls, string): # real signature unknown; restored from __doc__
"""
bytearray.fromhex(string) -> bytearray
Create a bytearray object from a string of hexadecimal numbers.
Spaces between two numbers are accepted.
Example: bytearray.fromhex('B9 01EF') -> bytearray(b'\xb9\x01\xef').
"""
return bytearray
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.index(sub [,start [,end]]) -> int
Like B.find() but raise ValueError when the subsection is not found.
"""
return 0
def insert(self, index, p_int): # real signature unknown; restored from __doc__
"""
B.insert(index, int) -> None
Insert a single item into the bytearray before the given index.
"""
pass
def isalnum(self): # real signature unknown; restored from __doc__
"""
B.isalnum() -> bool
Return True if all characters in B are alphanumeric
and there is at least one character in B, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
B.isalpha() -> bool
Return True if all characters in B are alphabetic
and there is at least one character in B, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
B.isdigit() -> bool
Return True if all characters in B are digits
and there is at least one character in B, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
B.islower() -> bool
Return True if all cased characters in B are lowercase and there is
at least one cased character in B, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
B.isspace() -> bool
Return True if all characters in B are whitespace
and there is at least one character in B, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
B.istitle() -> bool
Return True if B is a titlecased string and there is at least one
character in B, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
B.isupper() -> bool
Return True if all cased characters in B are uppercase and there is
at least one cased character in B, False otherwise.
"""
return False
def join(self, iterable_of_bytes): # real signature unknown; restored from __doc__
"""
B.join(iterable_of_bytes) -> bytes
Concatenates any number of bytearray objects, with B in between each pair.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.ljust(width[, fillchar]) -> copy of B
Return B left justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
pass
def lower(self): # real signature unknown; restored from __doc__
"""
B.lower() -> copy of B
Return a copy of B with all ASCII characters converted to lowercase.
"""
pass
def lstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.lstrip([bytes]) -> bytearray
Strip leading bytes contained in the argument.
If the argument is omitted, strip leading ASCII whitespace.
"""
return bytearray
def partition(self, sep): # real signature unknown; restored from __doc__
"""
B.partition(sep) -> (head, sep, tail)
Searches for the separator sep in B, and returns the part before it,
the separator itself, and the part after it. If the separator is not
found, returns B and two empty bytearray objects.
"""
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
B.pop([index]) -> int
Remove and return a single item from B. If no index
argument is given, will pop the last value.
"""
return 0
def remove(self, p_int): # real signature unknown; restored from __doc__
"""
B.remove(int) -> None
Remove the first occurance of a value in B.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
B.replace(old, new[, count]) -> bytes
Return a copy of B with all occurrences of subsection
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def reverse(self): # real signature unknown; restored from __doc__
"""
B.reverse() -> None
Reverse the order of the values in B in place.
"""
pass
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rfind(sub [,start [,end]]) -> int
Return the highest index in B where subsection sub is found,
such that sub is contained within B[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.rindex(sub [,start [,end]]) -> int
Like B.rfind() but raise ValueError when the subsection is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
B.rjust(width[, fillchar]) -> copy of B
Return B right justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
pass
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
B.rpartition(sep) -> (head, sep, tail)
Searches for the separator sep in B, starting at the end of B,
and returns the part before it, the separator itself, and the
part after it. If the separator is not found, returns two empty
bytearray objects and B.
"""
pass
def rsplit(self, sep, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.rsplit(sep[, maxsplit]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter,
starting at the end of B and working to the front.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def rstrip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.rstrip([bytes]) -> bytearray
Strip trailing bytes contained in the argument.
If the argument is omitted, strip trailing ASCII whitespace.
"""
return bytearray
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
B.split([sep[, maxsplit]]) -> list of bytearray
Return a list of the sections in B, using sep as the delimiter.
If sep is not given, B is split on ASCII whitespace characters
(space, tab, return, newline, formfeed, vertical tab).
If maxsplit is given, at most maxsplit splits are done.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
B.splitlines(keepends=False) -> list of lines
Return a list of the lines in B, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
B.startswith(prefix [,start [,end]]) -> bool
Return True if B starts with the specified prefix, False otherwise.
With optional start, test B beginning at that position.
With optional end, stop comparing B at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, bytes=None): # real signature unknown; restored from __doc__
"""
B.strip([bytes]) -> bytearray
Strip leading and trailing bytes contained in the argument.
If the argument is omitted, strip ASCII whitespace.
"""
return bytearray
def swapcase(self): # real signature unknown; restored from __doc__
"""
B.swapcase() -> copy of B
Return a copy of B with uppercase ASCII characters converted
to lowercase ASCII and vice versa.
"""
pass
def title(self): # real signature unknown; restored from __doc__
"""
B.title() -> copy of B
Return a titlecased version of B, i.e. ASCII words start with uppercase
characters, all remaining cased characters have lowercase.
"""
pass
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
B.translate(table[, deletechars]) -> bytearray
Return a copy of B, where all characters occurring in the
optional argument deletechars are removed, and the remaining
characters have been mapped through the given translation
table, which must be a bytes object of length 256.
"""
return bytearray
def upper(self): # real signature unknown; restored from __doc__
"""
B.upper() -> copy of B
Return a copy of B with all ASCII characters converted to uppercase.
"""
pass
def zfill(self, width): # real signature unknown; restored from __doc__
"""
B.zfill(width) -> copy of B
Pad a numeric string B with zeros on the left, to fill a field
of the specified width. B is never truncated.
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __alloc__(self): # real signature unknown; restored from __doc__
"""
B.__alloc__() -> int
Returns the number of bytes actually allocated.
"""
return 0
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, source=None, encoding=None, errors='strict'): # known special case of bytearray.__init__
"""
bytearray(iterable_of_ints) -> bytearray.
bytearray(string, encoding[, errors]) -> bytearray.
bytearray(bytes_or_bytearray) -> mutable copy of bytes_or_bytearray.
bytearray(memory_view) -> bytearray.
Construct an mutable bytearray object from:
- an iterable yielding integers in range(256)
- a text string encoded using the specified encoding
- a bytes or a bytearray object
- any object implementing the buffer API.
bytearray(int) -> bytearray.
Construct a zero-initialized bytearray of the given length.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
"""
B.__sizeof__() -> int
Returns the size of B in memory, in bytes
"""
return 0
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class str(basestring):
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> string
Return a copy of the string S with only its first character
capitalized.
"""
return ""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> string
Return S centered in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
string S[start:end]. Optional arguments start and end are interpreted
as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> object
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return object()
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> object
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that is able to handle UnicodeEncodeErrors.
"""
return object()
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> string
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return ""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(*args, **kwargs): # known special case of str.format
"""
S.format(*args, **kwargs) -> string
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. uppercase characters may only follow uncased
characters and lowercase characters only cased ones. Return False
otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> string
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return ""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> string
Return S left-justified in a string of length width. Padding is
done using the specified fill character (default is a space).
"""
return ""
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> string
Return a copy of the string S converted to lowercase.
"""
return ""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> string or unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> string
Return a copy of string S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return ""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> string
Return S right-justified in a string of length width. Padding is
done using the specified fill character (default is a space)
"""
return ""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> string or unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in the string S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are removed
from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> string or unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping
"""
return ""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> string
Return a copy of the string S with uppercase characters
converted to lowercase and vice versa.
"""
return ""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> string
Return a titlecased version of S, i.e. words start with uppercase
characters, all remaining cased characters have lowercase.
"""
return ""
def translate(self, table, deletechars=None): # real signature unknown; restored from __doc__
"""
S.translate(table [,deletechars]) -> string
Return a copy of the string S, where all characters occurring
in the optional argument deletechars are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256 or None.
If the table argument is None, no translation is applied and
the operation simply removes the characters in deletechars.
"""
return ""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> string
Return a copy of the string S converted to uppercase.
"""
return ""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> string
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return ""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> string
Return a formatted version of S as described by format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=''): # known special case of str.__init__
"""
str(object='') -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
bytes = str
class classmethod(object):
"""
classmethod(function) -> method
Convert a function to be a class method.
A class method receives the class as implicit first argument,
just like an instance method receives the instance.
To declare a class method, use this idiom:
class C:
def f(cls, arg1, arg2, ...): ...
f = classmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
If a class method is called for a derived class, the derived class
object is passed as the implied first argument.
Class methods are different than C++ or Java static methods.
If you want those, see the staticmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class complex(object):
"""
complex(real[, imag]) -> complex number
Create a complex number from a real part and an optional imaginary part.
This is equivalent to (real + imag*1j) where imag defaults to 0.
"""
def conjugate(self): # real signature unknown; restored from __doc__
"""
complex.conjugate() -> complex
Return the complex conjugate of its argument. (3-4j).conjugate() == 3+4j.
"""
return complex
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self): # real signature unknown; restored from __doc__
"""
complex.__format__() -> str
Convert to a string according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, real, imag=None): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
imag = property(lambda self: 0.0)
"""the imaginary part of a complex number
:type: float
"""
real = property(lambda self: 0.0)
"""the real part of a complex number
:type: float
"""
class dict(object):
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
"""
def clear(self): # real signature unknown; restored from __doc__
""" D.clear() -> None. Remove all items from D. """
pass
def copy(self): # real signature unknown; restored from __doc__
""" D.copy() -> a shallow copy of D """
pass
@staticmethod # known case
def fromkeys(S, v=None): # real signature unknown; restored from __doc__
"""
dict.fromkeys(S[,v]) -> New dict with keys from S and values equal to v.
v defaults to None.
"""
pass
def get(self, k, d=None): # real signature unknown; restored from __doc__
""" D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. """
pass
def has_key(self, k): # real signature unknown; restored from __doc__
""" D.has_key(k) -> True if D has a key k, else False """
return False
def items(self): # real signature unknown; restored from __doc__
""" D.items() -> list of D's (key, value) pairs, as 2-tuples """
return []
def iteritems(self): # real signature unknown; restored from __doc__
""" D.iteritems() -> an iterator over the (key, value) items of D """
pass
def iterkeys(self): # real signature unknown; restored from __doc__
""" D.iterkeys() -> an iterator over the keys of D """
pass
def itervalues(self): # real signature unknown; restored from __doc__
""" D.itervalues() -> an iterator over the values of D """
pass
def keys(self): # real signature unknown; restored from __doc__
""" D.keys() -> list of D's keys """
return []
def pop(self, k, d=None): # real signature unknown; restored from __doc__
"""
D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised
"""
pass
def popitem(self): # real signature unknown; restored from __doc__
"""
D.popitem() -> (k, v), remove and return some (key, value) pair as a
2-tuple; but raise KeyError if D is empty.
"""
pass
def setdefault(self, k, d=None): # real signature unknown; restored from __doc__
""" D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D """
pass
def update(self, E=None, **F): # known special case of dict.update
"""
D.update([E, ]**F) -> None. Update D from dict/iterable E and F.
If E present and has a .keys() method, does: for k in E: D[k] = E[k]
If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v
In either case, this is followed by: for k in F: D[k] = F[k]
"""
pass
def values(self): # real signature unknown; restored from __doc__
""" D.values() -> list of D's values """
return []
def viewitems(self): # real signature unknown; restored from __doc__
""" D.viewitems() -> a set-like object providing a view on D's items """
pass
def viewkeys(self): # real signature unknown; restored from __doc__
""" D.viewkeys() -> a set-like object providing a view on D's keys """
pass
def viewvalues(self): # real signature unknown; restored from __doc__
""" D.viewvalues() -> an object providing a view on D's values """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, k): # real signature unknown; restored from __doc__
""" D.__contains__(k) -> True if D has a key k, else False """
return False
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, seq=None, **kwargs): # known special case of dict.__init__
"""
dict() -> new empty dictionary
dict(mapping) -> new dictionary initialized from a mapping object's
(key, value) pairs
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
dict(**kwargs) -> new dictionary initialized with the name=value pairs
in the keyword argument list. For example: dict(one=1, two=2)
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" D.__sizeof__() -> size of D in memory, in bytes """
pass
__hash__ = None
class enumerate(object):
"""
enumerate(iterable[, start]) -> iterator for index, value of iterable
Return an enumerate object. iterable must be another object that supports
iteration. The enumerate object yields pairs containing a count (from
start, which defaults to zero) and a value yielded by the iterable argument.
enumerate is useful for obtaining an indexed list:
(0, seq[0]), (1, seq[1]), (2, seq[2]), ...
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, iterable, start=0): # known special case of enumerate.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class file(object):
"""
file(name[, mode[, buffering]]) -> file object
Open a file. The mode can be 'r', 'w' or 'a' for reading (default),
writing or appending. The file will be created if it doesn't exist
when opened for writing or appending; it will be truncated when
opened for writing. Add a 'b' to the mode for binary files.
Add a '+' to the mode to allow simultaneous reading and writing.
If the buffering argument is given, 0 means unbuffered, 1 means line
buffered, and larger numbers specify the buffer size. The preferred way
to open a file is with the builtin open() function.
Add a 'U' to mode to open the file for input with universal newline
support. Any line ending in the input file will be seen as a '\n'
in Python. Also, a file so opened gains the attribute 'newlines';
the value for this attribute is one of None (no newline read yet),
'\r', '\n', '\r\n' or a tuple containing all the newline types seen.
'U' cannot be combined with 'w' or '+' mode.
"""
def close(self): # real signature unknown; restored from __doc__
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
pass
def fileno(self): # real signature unknown; restored from __doc__
"""
fileno() -> integer "file descriptor".
This is needed for lower-level file interfaces, such os.read().
"""
return 0
def flush(self): # real signature unknown; restored from __doc__
""" flush() -> None. Flush the internal I/O buffer. """
pass
def isatty(self): # real signature unknown; restored from __doc__
""" isatty() -> true or false. True if the file is connected to a tty device. """
return False
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def read(self, size=None): # real signature unknown; restored from __doc__
"""
read([size]) -> read at most size bytes, returned as a string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was requested
may be returned, even if no size parameter was given.
"""
pass
def readinto(self): # real signature unknown; restored from __doc__
""" readinto() -> Undocumented. Don't use this; it may go away. """
pass
def readline(self, size=None): # real signature unknown; restored from __doc__
"""
readline([size]) -> next line from the file, as a string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
pass
def readlines(self, size=None): # real signature unknown; restored from __doc__
"""
readlines([size]) -> list of strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
return []
def seek(self, offset, whence=None): # real signature unknown; restored from __doc__
"""
seek(offset[, whence]) -> None. Move to new file position.
Argument offset is a byte count. Optional argument whence defaults to
0 (offset from start of file, offset should be >= 0); other values are 1
(move relative to current position, positive or negative), and 2 (move
relative to end of file, usually negative, although many platforms allow
seeking beyond the end of a file). If the file is opened in text mode,
only offsets returned by tell() are legal. Use of other offsets causes
undefined behavior.
Note that not all file objects are seekable.
"""
pass
def tell(self): # real signature unknown; restored from __doc__
""" tell() -> current file position, an integer (may be a long integer). """
pass
def truncate(self, size=None): # real signature unknown; restored from __doc__
"""
truncate([size]) -> None. Truncate the file to at most size bytes.
Size defaults to the current file position, as returned by tell().
"""
pass
def write(self, p_str): # real signature unknown; restored from __doc__
"""
write(str) -> None. Write string str to file.
Note that due to buffering, flush() or close() may be needed before
the file on disk reflects the data written.
"""
pass
def writelines(self, sequence_of_strings): # real signature unknown; restored from __doc__
"""
writelines(sequence_of_strings) -> None. Write the strings to the file.
Note that newlines are not added. The sequence can be any iterable object
producing strings. This is equivalent to calling write() for each string.
"""
pass
def xreadlines(self): # real signature unknown; restored from __doc__
"""
xreadlines() -> returns self.
For backward compatibility. File objects now include the performance
optimizations previously implemented in the xreadlines module.
"""
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __enter__(self): # real signature unknown; restored from __doc__
""" __enter__() -> self. """
return self
def __exit__(self, *excinfo): # real signature unknown; restored from __doc__
""" __exit__(*excinfo) -> None. Closes the file. """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, name, mode=None, buffering=None): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
closed = property(lambda self: True)
"""True if the file is closed
:type: bool
"""
encoding = property(lambda self: '')
"""file encoding
:type: string
"""
errors = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Unicode error handler"""
mode = property(lambda self: '')
"""file mode ('r', 'U', 'w', 'a', possibly with 'b' or '+' added)
:type: string
"""
name = property(lambda self: '')
"""file name
:type: string
"""
newlines = property(lambda self: '')
"""end-of-line convention used in this file
:type: string
"""
softspace = property(lambda self: True)
"""flag indicating that a space needs to be printed; used by print
:type: bool
"""
class float(object):
"""
float(x) -> floating point number
Convert a string or number to a floating point number, if possible.
"""
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
float.as_integer_ratio() -> (int, int)
Return a pair of integers, whose ratio is exactly equal to the original
float and with a positive denominator.
Raise OverflowError on infinities and a ValueError on NaNs.
>>> (10.0).as_integer_ratio()
(10, 1)
>>> (0.0).as_integer_ratio()
(0, 1)
>>> (-.25).as_integer_ratio()
(-1, 4)
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Return self, the complex conjugate of any float. """
pass
def fromhex(self, string): # real signature unknown; restored from __doc__
"""
float.fromhex(string) -> float
Create a floating-point number from a hexadecimal string.
>>> float.fromhex('0x1.ffffp10')
2047.984375
>>> float.fromhex('-0x1p-1074')
-4.9406564584124654e-324
"""
return 0.0
def hex(self): # real signature unknown; restored from __doc__
"""
float.hex() -> string
Return a hexadecimal representation of a floating-point number.
>>> (-0.1).hex()
'-0x1.999999999999ap-4'
>>> 3.14159.hex()
'0x1.921f9f01b866ep+1'
"""
return ""
def is_integer(self, *args, **kwargs): # real signature unknown
""" Return True if the float is an integer. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
float.__format__(format_spec) -> string
Formats the float according to format_spec.
"""
return ""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getformat__(self, typestr): # real signature unknown; restored from __doc__
"""
float.__getformat__(typestr) -> string
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. This function returns whichever of
'unknown', 'IEEE, big-endian' or 'IEEE, little-endian' best describes the
format of floating point numbers used by the C type named by typestr.
"""
return ""
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, x): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __setformat__(self, typestr, fmt): # real signature unknown; restored from __doc__
"""
float.__setformat__(typestr, fmt) -> None
You probably don't want to use this function. It exists mainly to be
used in Python's test suite.
typestr must be 'double' or 'float'. fmt must be one of 'unknown',
'IEEE, big-endian' or 'IEEE, little-endian', and in addition can only be
one of the latter two if it appears to match the underlying C reality.
Override the automatic determination of C-level floating point type.
This affects how floats are converted to and from binary strings.
"""
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Return the Integral closest to x between 0 and x. """
pass
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class frozenset(object):
"""
frozenset() -> empty frozenset object
frozenset(iterable) -> frozenset object
Build an immutable unordered collection of unique elements.
"""
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of frozenset.__init__
""" x.__init__(...) initializes x; see help(type(x)) for signature """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
class list(object):
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
"""
def append(self, p_object): # real signature unknown; restored from __doc__
""" L.append(object) -- append object to end """
pass
def count(self, value): # real signature unknown; restored from __doc__
""" L.count(value) -> integer -- return number of occurrences of value """
return 0
def extend(self, iterable): # real signature unknown; restored from __doc__
""" L.extend(iterable) -- extend list by appending elements from the iterable """
pass
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
L.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def insert(self, index, p_object): # real signature unknown; restored from __doc__
""" L.insert(index, object) -- insert object before index """
pass
def pop(self, index=None): # real signature unknown; restored from __doc__
"""
L.pop([index]) -> item -- remove and return item at index (default last).
Raises IndexError if list is empty or index is out of range.
"""
pass
def remove(self, value): # real signature unknown; restored from __doc__
"""
L.remove(value) -- remove first occurrence of value.
Raises ValueError if the value is not present.
"""
pass
def reverse(self): # real signature unknown; restored from __doc__
""" L.reverse() -- reverse *IN PLACE* """
pass
def sort(self, cmp=None, key=None, reverse=False): # real signature unknown; restored from __doc__
"""
L.sort(cmp=None, key=None, reverse=False) -- stable sort *IN PLACE*;
cmp(x, y) -> -1, 0, 1
"""
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __delslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iadd__(self, y): # real signature unknown; restored from __doc__
""" x.__iadd__(y) <==> x+=y """
pass
def __imul__(self, y): # real signature unknown; restored from __doc__
""" x.__imul__(y) <==> x*=y """
pass
def __init__(self, seq=()): # known special case of list.__init__
"""
list() -> new empty list
list(iterable) -> new list initialized from iterable's items
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self): # real signature unknown; restored from __doc__
""" L.__reversed__() -- return a reverse iterator over the list """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
def __setslice__(self, i, j, y): # real signature unknown; restored from __doc__
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" L.__sizeof__() -- size of L in memory, in bytes """
pass
__hash__ = None
class long(object):
"""
long(x=0) -> long
long(x, base=10) -> long
Convert a number or string to a long integer, or return 0L if no arguments
are given. If x is floating point, the conversion truncates towards zero.
If x is not a number or if base is given, then x must be a string or
Unicode object representing an integer literal in the given base. The
literal can be preceded by '+' or '-' and be surrounded by whitespace.
The base defaults to 10. Valid bases are 0 and 2-36. Base 0 means to
interpret the base from the string as an integer literal.
>>> int('0b100', base=0)
4L
"""
def bit_length(self): # real signature unknown; restored from __doc__
"""
long.bit_length() -> int or long
Number of bits necessary to represent self in binary.
>>> bin(37L)
'0b100101'
>>> (37L).bit_length()
6
"""
return 0
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any long. """
pass
def __abs__(self): # real signature unknown; restored from __doc__
""" x.__abs__() <==> abs(x) """
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __coerce__(self, y): # real signature unknown; restored from __doc__
""" x.__coerce__(y) <==> coerce(x, y) """
pass
def __divmod__(self, y): # real signature unknown; restored from __doc__
""" x.__divmod__(y) <==> divmod(x, y) """
pass
def __div__(self, y): # real signature unknown; restored from __doc__
""" x.__div__(y) <==> x/y """
pass
def __float__(self): # real signature unknown; restored from __doc__
""" x.__float__() <==> float(x) """
pass
def __floordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__floordiv__(y) <==> x//y """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __hex__(self): # real signature unknown; restored from __doc__
""" x.__hex__() <==> hex(x) """
pass
def __index__(self): # real signature unknown; restored from __doc__
""" x[y:z] <==> x[y.__index__():z.__index__()] """
pass
def __init__(self, x=0): # real signature unknown; restored from __doc__
pass
def __int__(self): # real signature unknown; restored from __doc__
""" x.__int__() <==> int(x) """
pass
def __invert__(self): # real signature unknown; restored from __doc__
""" x.__invert__() <==> ~x """
pass
def __long__(self): # real signature unknown; restored from __doc__
""" x.__long__() <==> long(x) """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, y): # real signature unknown; restored from __doc__
""" x.__mul__(y) <==> x*y """
pass
def __neg__(self): # real signature unknown; restored from __doc__
""" x.__neg__() <==> -x """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __nonzero__(self): # real signature unknown; restored from __doc__
""" x.__nonzero__() <==> x != 0 """
pass
def __oct__(self): # real signature unknown; restored from __doc__
""" x.__oct__() <==> oct(x) """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __pos__(self): # real signature unknown; restored from __doc__
""" x.__pos__() <==> +x """
pass
def __pow__(self, y, z=None): # real signature unknown; restored from __doc__
""" x.__pow__(y[, z]) <==> pow(x, y[, z]) """
pass
def __radd__(self, y): # real signature unknown; restored from __doc__
""" x.__radd__(y) <==> y+x """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __rdivmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rdivmod__(y) <==> divmod(y, x) """
pass
def __rdiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rdiv__(y) <==> y/x """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rfloordiv__(self, y): # real signature unknown; restored from __doc__
""" x.__rfloordiv__(y) <==> y//x """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, y): # real signature unknown; restored from __doc__
""" x.__rmul__(y) <==> y*x """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rpow__(self, x, z=None): # real signature unknown; restored from __doc__
""" y.__rpow__(x[, z]) <==> pow(x, y[, z]) """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rtruediv__(self, y): # real signature unknown; restored from __doc__
""" x.__rtruediv__(y) <==> y/x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __truediv__(self, y): # real signature unknown; restored from __doc__
""" x.__truediv__(y) <==> x/y """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
class memoryview(object):
"""
memoryview(object)
Create a new memoryview object which references the given object.
"""
def tobytes(self, *args, **kwargs): # real signature unknown
pass
def tolist(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, y): # real signature unknown; restored from __doc__
""" x.__delitem__(y) <==> del x[y] """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, p_object): # real signature unknown; restored from __doc__
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setitem__(self, i, y): # real signature unknown; restored from __doc__
""" x.__setitem__(i, y) <==> x[i]=y """
pass
format = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
itemsize = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ndim = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
readonly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
shape = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
strides = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
suboffsets = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class property(object):
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
"""
def deleter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the deleter on a property. """
pass
def getter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the getter on a property. """
pass
def setter(self, *args, **kwargs): # real signature unknown
""" Descriptor to change the setter on a property. """
pass
def __delete__(self, obj): # real signature unknown; restored from __doc__
""" descr.__delete__(obj) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, fget=None, fset=None, fdel=None, doc=None): # known special case of property.__init__
"""
property(fget=None, fset=None, fdel=None, doc=None) -> property attribute
fget is a function to be used for getting an attribute value, and likewise
fset is a function for setting, and fdel a function for del'ing, an
attribute. Typical use is to define a managed attribute x:
class C(object):
def getx(self): return self._x
def setx(self, value): self._x = value
def delx(self): del self._x
x = property(getx, setx, delx, "I'm the 'x' property.")
Decorators make defining new properties or modifying existing ones easy:
class C(object):
@property
def x(self):
"I am the 'x' property."
return self._x
@x.setter
def x(self, value):
self._x = value
@x.deleter
def x(self):
del self._x
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __set__(self, obj, value): # real signature unknown; restored from __doc__
""" descr.__set__(obj, value) """
pass
fdel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fget = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
fset = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class reversed(object):
"""
reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
def next(self): # real signature unknown; restored from __doc__
""" x.next() -> the next value, or raise StopIteration """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __init__(self, sequence): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __length_hint__(self, *args, **kwargs): # real signature unknown
""" Private method returning an estimate of len(list(it)). """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
class set(object):
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
"""
def add(self, *args, **kwargs): # real signature unknown
"""
Add an element to a set.
This has no effect if the element is already present.
"""
pass
def clear(self, *args, **kwargs): # real signature unknown
""" Remove all elements from this set. """
pass
def copy(self, *args, **kwargs): # real signature unknown
""" Return a shallow copy of a set. """
pass
def difference(self, *args, **kwargs): # real signature unknown
"""
Return the difference of two or more sets as a new set.
(i.e. all elements that are in this set but not the others.)
"""
pass
def difference_update(self, *args, **kwargs): # real signature unknown
""" Remove all elements of another set from this set. """
pass
def discard(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
pass
def intersection(self, *args, **kwargs): # real signature unknown
"""
Return the intersection of two or more sets as a new set.
(i.e. elements that are common to all of the sets.)
"""
pass
def intersection_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the intersection of itself and another. """
pass
def isdisjoint(self, *args, **kwargs): # real signature unknown
""" Return True if two sets have a null intersection. """
pass
def issubset(self, *args, **kwargs): # real signature unknown
""" Report whether another set contains this set. """
pass
def issuperset(self, *args, **kwargs): # real signature unknown
""" Report whether this set contains another set. """
pass
def pop(self, *args, **kwargs): # real signature unknown
"""
Remove and return an arbitrary set element.
Raises KeyError if the set is empty.
"""
pass
def remove(self, *args, **kwargs): # real signature unknown
"""
Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
pass
def symmetric_difference(self, *args, **kwargs): # real signature unknown
"""
Return the symmetric difference of two sets as a new set.
(i.e. all elements that are in exactly one of the sets.)
"""
pass
def symmetric_difference_update(self, *args, **kwargs): # real signature unknown
""" Update a set with the symmetric difference of itself and another. """
pass
def union(self, *args, **kwargs): # real signature unknown
"""
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
"""
pass
def update(self, *args, **kwargs): # real signature unknown
""" Update a set with the union of itself and others. """
pass
def __and__(self, y): # real signature unknown; restored from __doc__
""" x.__and__(y) <==> x&y """
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x. """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __iand__(self, y): # real signature unknown; restored from __doc__
""" x.__iand__(y) <==> x&=y """
pass
def __init__(self, seq=()): # known special case of set.__init__
"""
set() -> new empty set object
set(iterable) -> new set object
Build an unordered collection of unique elements.
# (copied from class doc)
"""
pass
def __ior__(self, y): # real signature unknown; restored from __doc__
""" x.__ior__(y) <==> x|=y """
pass
def __isub__(self, y): # real signature unknown; restored from __doc__
""" x.__isub__(y) <==> x-=y """
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __ixor__(self, y): # real signature unknown; restored from __doc__
""" x.__ixor__(y) <==> x^=y """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __or__(self, y): # real signature unknown; restored from __doc__
""" x.__or__(y) <==> x|y """
pass
def __rand__(self, y): # real signature unknown; restored from __doc__
""" x.__rand__(y) <==> y&x """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __ror__(self, y): # real signature unknown; restored from __doc__
""" x.__ror__(y) <==> y|x """
pass
def __rsub__(self, y): # real signature unknown; restored from __doc__
""" x.__rsub__(y) <==> y-x """
pass
def __rxor__(self, y): # real signature unknown; restored from __doc__
""" x.__rxor__(y) <==> y^x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __sub__(self, y): # real signature unknown; restored from __doc__
""" x.__sub__(y) <==> x-y """
pass
def __xor__(self, y): # real signature unknown; restored from __doc__
""" x.__xor__(y) <==> x^y """
pass
__hash__ = None
class slice(object):
"""
slice(stop)
slice(start, stop[, step])
Create a slice object. This is used for extended slicing (e.g. a[0:10:2]).
"""
def indices(self, len): # real signature unknown; restored from __doc__
"""
S.indices(len) -> (start, stop, stride)
Assuming a sequence of length len, calculate the start and stop
indices, and the stride length of the extended slice described by
S. Out of bounds indices are clipped in a manner consistent with the
handling of normal slices.
"""
pass
def __cmp__(self, y): # real signature unknown; restored from __doc__
""" x.__cmp__(y) <==> cmp(x,y) """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Return state information for pickling. """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
start = property(lambda self: 0)
""":type: int"""
step = property(lambda self: 0)
""":type: int"""
stop = property(lambda self: 0)
""":type: int"""
class staticmethod(object):
"""
staticmethod(function) -> method
Convert a function to be a static method.
A static method does not receive an implicit first argument.
To declare a static method, use this idiom:
class C:
def f(arg1, arg2, ...): ...
f = staticmethod(f)
It can be called either on the class (e.g. C.f()) or on an instance
(e.g. C().f()). The instance is ignored except for its class.
Static methods in Python are similar to those found in Java or C++.
For a more advanced concept, see the classmethod builtin.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, function): # real signature unknown; restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
__func__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
class super(object):
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __get__(self, obj, type=None): # real signature unknown; restored from __doc__
""" descr.__get__(obj[, type]) -> value """
pass
def __init__(self, type1, type2=None): # known special case of super.__init__
"""
super(type, obj) -> bound super object; requires isinstance(obj, type)
super(type) -> unbound super object
super(type, type2) -> bound super object; requires issubclass(type2, type)
Typical use to call a cooperative superclass method:
class C(B):
def meth(self, arg):
super(C, self).meth(arg)
# (copied from class doc)
"""
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
__self_class__ = property(lambda self: type(object))
"""the type of the instance invoking super(); may be None
:type: type
"""
__self__ = property(lambda self: type(object))
"""the instance invoking super(); may be None
:type: type
"""
__thisclass__ = property(lambda self: type(object))
"""the class invoking super()
:type: type
"""
class tuple(object):
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
"""
def count(self, value): # real signature unknown; restored from __doc__
""" T.count(value) -> integer -- return number of occurrences of value """
return 0
def index(self, value, start=None, stop=None): # real signature unknown; restored from __doc__
"""
T.index(value, [start, [stop]]) -> integer -- return first index of value.
Raises ValueError if the value is not present.
"""
return 0
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, seq=()): # known special case of tuple.__init__
"""
tuple() -> empty tuple
tuple(iterable) -> tuple initialized from iterable's items
If the argument is a tuple, the return value is the same object.
# (copied from class doc)
"""
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" T.__sizeof__() -- size of T in memory, in bytes """
pass
class type(object):
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
"""
def mro(self): # real signature unknown; restored from __doc__
"""
mro() -> list
return a type's method resolution order
"""
return []
def __call__(self, *more): # real signature unknown; restored from __doc__
""" x.__call__(...) <==> x(...) """
pass
def __delattr__(self, name): # real signature unknown; restored from __doc__
""" x.__delattr__('name') <==> del x.name """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(cls, what, bases=None, dict=None): # known special case of type.__init__
"""
type(object) -> the object's type
type(name, bases, dict) -> a new type
# (copied from class doc)
"""
pass
def __instancecheck__(self): # real signature unknown; restored from __doc__
"""
__instancecheck__() -> bool
check if an object is an instance
"""
return False
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __setattr__(self, name, value): # real signature unknown; restored from __doc__
""" x.__setattr__('name', value) <==> x.name = value """
pass
def __subclasscheck__(self): # real signature unknown; restored from __doc__
"""
__subclasscheck__() -> bool
check if a class is a subclass
"""
return False
def __subclasses__(self): # real signature unknown; restored from __doc__
""" __subclasses__() -> list of immediate subclasses """
return []
__abstractmethods__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__bases__ = (
object,
)
__base__ = object
__basicsize__ = 872
__dictoffset__ = 264
__dict__ = None # (!) real value is ''
__flags__ = 2148423147
__itemsize__ = 40
__mro__ = (
None, # (!) forward: type, real value is ''
object,
)
__name__ = 'type'
__weakrefoffset__ = 368
class unicode(basestring):
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
"""
def capitalize(self): # real signature unknown; restored from __doc__
"""
S.capitalize() -> unicode
Return a capitalized version of S, i.e. make the first character
have upper case and the rest lower case.
"""
return u""
def center(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.center(width[, fillchar]) -> unicode
Return S centered in a Unicode string of length width. Padding is
done using the specified fill character (default is a space)
"""
return u""
def count(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.count(sub[, start[, end]]) -> int
Return the number of non-overlapping occurrences of substring sub in
Unicode string S[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return 0
def decode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.decode([encoding[,errors]]) -> string or unicode
Decodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeDecodeError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle UnicodeDecodeErrors.
"""
return ""
def encode(self, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
S.encode([encoding[,errors]]) -> string or unicode
Encodes S using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a UnicodeEncodeError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle UnicodeEncodeErrors.
"""
return ""
def endswith(self, suffix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.endswith(suffix[, start[, end]]) -> bool
Return True if S ends with the specified suffix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
suffix can also be a tuple of strings to try.
"""
return False
def expandtabs(self, tabsize=None): # real signature unknown; restored from __doc__
"""
S.expandtabs([tabsize]) -> unicode
Return a copy of S where all tab characters are expanded using spaces.
If tabsize is not given, a tab size of 8 characters is assumed.
"""
return u""
def find(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.find(sub [,start [,end]]) -> int
Return the lowest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def format(*args, **kwargs): # known special case of unicode.format
"""
S.format(*args, **kwargs) -> unicode
Return a formatted version of S, using substitutions from args and kwargs.
The substitutions are identified by braces ('{' and '}').
"""
pass
def index(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.index(sub [,start [,end]]) -> int
Like S.find() but raise ValueError when the substring is not found.
"""
return 0
def isalnum(self): # real signature unknown; restored from __doc__
"""
S.isalnum() -> bool
Return True if all characters in S are alphanumeric
and there is at least one character in S, False otherwise.
"""
return False
def isalpha(self): # real signature unknown; restored from __doc__
"""
S.isalpha() -> bool
Return True if all characters in S are alphabetic
and there is at least one character in S, False otherwise.
"""
return False
def isdecimal(self): # real signature unknown; restored from __doc__
"""
S.isdecimal() -> bool
Return True if there are only decimal characters in S,
False otherwise.
"""
return False
def isdigit(self): # real signature unknown; restored from __doc__
"""
S.isdigit() -> bool
Return True if all characters in S are digits
and there is at least one character in S, False otherwise.
"""
return False
def islower(self): # real signature unknown; restored from __doc__
"""
S.islower() -> bool
Return True if all cased characters in S are lowercase and there is
at least one cased character in S, False otherwise.
"""
return False
def isnumeric(self): # real signature unknown; restored from __doc__
"""
S.isnumeric() -> bool
Return True if there are only numeric characters in S,
False otherwise.
"""
return False
def isspace(self): # real signature unknown; restored from __doc__
"""
S.isspace() -> bool
Return True if all characters in S are whitespace
and there is at least one character in S, False otherwise.
"""
return False
def istitle(self): # real signature unknown; restored from __doc__
"""
S.istitle() -> bool
Return True if S is a titlecased string and there is at least one
character in S, i.e. upper- and titlecase characters may only
follow uncased characters and lowercase characters only cased ones.
Return False otherwise.
"""
return False
def isupper(self): # real signature unknown; restored from __doc__
"""
S.isupper() -> bool
Return True if all cased characters in S are uppercase and there is
at least one cased character in S, False otherwise.
"""
return False
def join(self, iterable): # real signature unknown; restored from __doc__
"""
S.join(iterable) -> unicode
Return a string which is the concatenation of the strings in the
iterable. The separator between elements is S.
"""
return u""
def ljust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.ljust(width[, fillchar]) -> int
Return S left-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return 0
def lower(self): # real signature unknown; restored from __doc__
"""
S.lower() -> unicode
Return a copy of the string S converted to lowercase.
"""
return u""
def lstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.lstrip([chars]) -> unicode
Return a copy of the string S with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def partition(self, sep): # real signature unknown; restored from __doc__
"""
S.partition(sep) -> (head, sep, tail)
Search for the separator sep in S, and return the part before it,
the separator itself, and the part after it. If the separator is not
found, return S and two empty strings.
"""
pass
def replace(self, old, new, count=None): # real signature unknown; restored from __doc__
"""
S.replace(old, new[, count]) -> unicode
Return a copy of S with all occurrences of substring
old replaced by new. If the optional argument count is
given, only the first count occurrences are replaced.
"""
return u""
def rfind(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rfind(sub [,start [,end]]) -> int
Return the highest index in S where substring sub is found,
such that sub is contained within S[start:end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return 0
def rindex(self, sub, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.rindex(sub [,start [,end]]) -> int
Like S.rfind() but raise ValueError when the substring is not found.
"""
return 0
def rjust(self, width, fillchar=None): # real signature unknown; restored from __doc__
"""
S.rjust(width[, fillchar]) -> unicode
Return S right-justified in a Unicode string of length width. Padding is
done using the specified fill character (default is a space).
"""
return u""
def rpartition(self, sep): # real signature unknown; restored from __doc__
"""
S.rpartition(sep) -> (head, sep, tail)
Search for the separator sep in S, starting at the end of S, and return
the part before it, the separator itself, and the part after it. If the
separator is not found, return two empty strings and S.
"""
pass
def rsplit(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.rsplit([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string, starting at the end of the string and
working to the front. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified, any whitespace string
is a separator.
"""
return []
def rstrip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.rstrip([chars]) -> unicode
Return a copy of the string S with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def split(self, sep=None, maxsplit=None): # real signature unknown; restored from __doc__
"""
S.split([sep [,maxsplit]]) -> list of strings
Return a list of the words in S, using sep as the
delimiter string. If maxsplit is given, at most maxsplit
splits are done. If sep is not specified or is None, any
whitespace string is a separator and empty strings are
removed from the result.
"""
return []
def splitlines(self, keepends=False): # real signature unknown; restored from __doc__
"""
S.splitlines(keepends=False) -> list of strings
Return a list of the lines in S, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
return []
def startswith(self, prefix, start=None, end=None): # real signature unknown; restored from __doc__
"""
S.startswith(prefix[, start[, end]]) -> bool
Return True if S starts with the specified prefix, False otherwise.
With optional start, test S beginning at that position.
With optional end, stop comparing S at that position.
prefix can also be a tuple of strings to try.
"""
return False
def strip(self, chars=None): # real signature unknown; restored from __doc__
"""
S.strip([chars]) -> unicode
Return a copy of the string S with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is a str, it will be converted to unicode before stripping
"""
return u""
def swapcase(self): # real signature unknown; restored from __doc__
"""
S.swapcase() -> unicode
Return a copy of S with uppercase characters converted to lowercase
and vice versa.
"""
return u""
def title(self): # real signature unknown; restored from __doc__
"""
S.title() -> unicode
Return a titlecased version of S, i.e. words start with title case
characters, all remaining cased characters have lower case.
"""
return u""
def translate(self, table): # real signature unknown; restored from __doc__
"""
S.translate(table) -> unicode
Return a copy of the string S, where all characters have been mapped
through the given translation table, which must be a mapping of
Unicode ordinals to Unicode ordinals, Unicode strings or None.
Unmapped characters are left untouched. Characters mapped to None
are deleted.
"""
return u""
def upper(self): # real signature unknown; restored from __doc__
"""
S.upper() -> unicode
Return a copy of S converted to uppercase.
"""
return u""
def zfill(self, width): # real signature unknown; restored from __doc__
"""
S.zfill(width) -> unicode
Pad a numeric string S with zeros on the left, to fill a field
of the specified width. The string S is never truncated.
"""
return u""
def _formatter_field_name_split(self, *args, **kwargs): # real signature unknown
pass
def _formatter_parser(self, *args, **kwargs): # real signature unknown
pass
def __add__(self, y): # real signature unknown; restored from __doc__
""" x.__add__(y) <==> x+y """
pass
def __contains__(self, y): # real signature unknown; restored from __doc__
""" x.__contains__(y) <==> y in x """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __format__(self, format_spec): # real signature unknown; restored from __doc__
"""
S.__format__(format_spec) -> unicode
Return a formatted version of S as described by format_spec.
"""
return u""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __getslice__(self, i, j): # real signature unknown; restored from __doc__
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __hash__(self): # real signature unknown; restored from __doc__
""" x.__hash__() <==> hash(x) """
pass
def __init__(self, string=u'', encoding=None, errors='strict'): # known special case of unicode.__init__
"""
unicode(object='') -> unicode object
unicode(string[, encoding[, errors]]) -> unicode object
Create a new Unicode object from the given encoded string.
encoding defaults to the current default string encoding.
errors can be 'strict', 'replace' or 'ignore' and defaults to 'strict'.
# (copied from class doc)
"""
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __mod__(self, y): # real signature unknown; restored from __doc__
""" x.__mod__(y) <==> x%y """
pass
def __mul__(self, n): # real signature unknown; restored from __doc__
""" x.__mul__(n) <==> x*n """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __rmod__(self, y): # real signature unknown; restored from __doc__
""" x.__rmod__(y) <==> y%x """
pass
def __rmul__(self, n): # real signature unknown; restored from __doc__
""" x.__rmul__(n) <==> n*x """
pass
def __sizeof__(self): # real signature unknown; restored from __doc__
""" S.__sizeof__() -> size of S in memory, in bytes """
pass
def __str__(self): # real signature unknown; restored from __doc__
""" x.__str__() <==> str(x) """
pass
class xrange(object):
"""
xrange(stop) -> xrange object
xrange(start, stop[, step]) -> xrange object
Like range(), but instead of returning a list, returns an object that
generates the numbers in the range on demand. For looping, this is
slightly faster than range() and more memory efficient.
"""
def __getattribute__(self, name): # real signature unknown; restored from __doc__
""" x.__getattribute__('name') <==> x.name """
pass
def __getitem__(self, y): # real signature unknown; restored from __doc__
""" x.__getitem__(y) <==> x[y] """
pass
def __init__(self, stop): # real signature unknown; restored from __doc__
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
def __len__(self): # real signature unknown; restored from __doc__
""" x.__len__() <==> len(x) """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self): # real signature unknown; restored from __doc__
""" x.__repr__() <==> repr(x) """
pass
def __reversed__(self, *args, **kwargs): # real signature unknown
""" Returns a reverse iterator. """
pass
# variables with complex values
Ellipsis = None # (!) real value is ''
NotImplemented = None # (!) real value is ''
| apache-2.0 |
takis/django | django/middleware/locale.py | 358 | 2983 | "This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.functional import cached_property
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
language = translation.get_language_from_request(
request, check_path=self.is_language_prefix_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/')
and is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
@cached_property
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
| bsd-3-clause |
aalien/mib | mib.py | 1 | 7386 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# mib: Modular irc bot
# Copyright Antti Laine <antti.a.laine@tut.fi>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ircsocket import IrcSocket
from ircutils import regexpify
from parser import parse, IRCMsg
import config
import os
import re
import sys
class Mib:
""" Main class which handles most of the core functionality.
"""
def __init__(self):
""" Initialize variables and read config.
"""
sys.path.append('plugins')
self.loaded_plugins = {} # plugin name : module
self.cmd_callbacks = {} # command : set(function)
self.privmsg_cmd_callbacks = {} # command : set(function)
self.command_masks = {} # command : list(regexp)
self.plugins = set(config.LOAD_PLUGINS)
self.cmd_prefixes = set(config.CMD_PREFIXES)
self.nick = config.NICK
self.username = config.USERNAME
self.realname = config.REALNAME
self.server, self.port = config.SERVER
self.channels = config.CHANNELS
self.socket = IrcSocket(self.server, self.port, self.nick,
self.username, self.realname)
self.socket.register_readline_cb(self.parse_line)
for channel in self.channels:
self.socket.join(channel)
for plugin in self.plugins:
print self.load_plugin(plugin)[1]
def run(self):
""" Start socket's main loop.
"""
self.socket.run()
def clean(self):
for plugin in self.loaded_plugins.itervalues():
plugin.clean()
def parse_line(self, line):
""" Parse line and call callbacks registered for command.
"""
print line
parsed = parse(line)
if not parsed:
print 'Unable to parse line: "%s"' %(line)
return
# call registered functions
for function in self.cmd_callbacks.get(parsed.cmd, ()):
try:
function(parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
# call registered privmsg functions with pre-parsed line
if parsed.cmd == 'PRIVMSG':
cmd_prefix = parsed.postfix.split(' ', 1)[0]
postfix = parsed.postfix[len(cmd_prefix):].lstrip()
if cmd_prefix in self.cmd_prefixes:
print 'Found command prefix', cmd_prefix
cmd = postfix.lstrip().split(' ', 1)[0]
postfix = postfix[len(cmd):].lstrip()
stripped_parsed = IRCMsg(parsed.prefix, parsed.cmd,
parsed.params, postfix)
print "stripped_parsed = ", stripped_parsed
print 'Searching for command', cmd
for function in self.privmsg_cmd_callbacks.get(cmd, ()):
run = False
if cmd not in self.command_masks:
run = True
else:
print 'There are limitations for this command'
for regexp in self.command_masks[cmd]:
print 'Matching %s to %s' % (parsed.prefix,
regexp.pattern)
if regexp.match(parsed.prefix):
run = True
break
if run:
try:
print 'Executing command %s' % cmd
function(stripped_parsed)
except Exception, e:
print 'Error from function', repr(function), ':', e
def load_plugin(self, plugin, params=None):
""" str, ([]) -> (bool, str)
Loads plugin from plugins/<plugin>.py
Params will be given to plugin's constructor.
Returns a tuple with a boolean stating if the plugin
was loaded properly and a message telling what happened.
"""
if plugin in self.loaded_plugins:
return (False, 'Plugin %s already loaded' %(plugin))
if not os.path.exists(os.path.join('plugins', plugin + '.py')):
return (False, 'Plugin %s does not exists' %(plugin))
try:
module = __import__(plugin)
if params:
obj = module.init(self, params)
else:
obj = module.init(self)
success = True
except Exception, err:
success = False
print err
if success:
self.loaded_plugins[plugin] = obj
return (True, 'Loaded plugin %s' %(plugin))
else:
return (False, 'Failed to load plugin %s' %(plugin))
def register_cmd(self, cmd, function):
""" Registers a function to be called when a line with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params, postfix)
"""
self.cmd_callbacks.setdefault(cmd, set()).add(function)
def register_privmsg_cmd(self, cmd, function):
""" Registers a function to be called when a PRIVMSG with
cmd is seen. Function must take one named tuple parameter.
Tuple contains line in parsed form with fields
(prefix, cmd, params,
postfix stripped from one of CMD_PREFIXES and cmd)
"""
self.privmsg_cmd_callbacks.setdefault(cmd, set()).add(function)
def add_cmd_permission(self, cmd, mask, regexpify=True):
""" Creates a regular expression from the mask and adds it
to the list of allowed regexps for the cmd.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
m = re.compile(mask)
self.command_masks.setdefault(cmd, []).append(m)
def rm_cmd_permission(self, cmd, mask):
""" Creates a regular expression from the mask, and removes
the permission for that expression from cmd's list.
mask is an IRC mask, and will be changed into a corresponding
regular expression.
"""
mask = regexpify(mask)
if cmd in self.command_masks:
for index, regexp in enumerate(self.command_masks[cmd]):
if regexp.pattern == mask:
del self.command_masks[cmd][index]
break
if __name__ == "__main__":
mib = Mib()
try:
mib.run()
except Exception, e:
print 'ERROR: ', e
except:
pass
mib.clean()
print 'Quiting!'
| mit |
daigotanaka/kawaraban | wsgi.py | 1 | 1445 | """
WSGI config for the website project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit |
yugang/crosswalk-test-suite | webapi/tct-websetting-tizen-tests/inst.xpk.py | 176 | 6857 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t xpk -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
0x010C/Pywikibot-scripts | notif-bot.py | 2 | 1806 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#Autor: Antoine "0x010C" Lamielle
#Creation date: 28 October 2015
#Last modification: 10 February 2017
#License: GNU GPL v3
import sys
import time
import pywiki
#Paramètres
version = "2.0"
reply = {
"section_title": "Bonjour $1 !",
"content": "J'ai bien reçu votre notification, je vous notifie en retour : {{Notif|$1}} {{clin}} ~~~~",
"summary": "Notification",
}
def get_notifications_list(self):
response = self.request( {
"action": "query",
"format": "json",
"meta": "notifications",
"notfilter": "!read",
"notprop": "list",
"notsections": "alert",
"notformat": "model",
"notlimit": "50"
} )
IDs = []
names = []
for notification in response["query"]["notifications"]["list"]:
IDs.append(notification["id"])
if notification["type"] == "mention":
names.append(notification["agent"]["name"])
return (IDs, names)
pywiki.Pywiki.get_notifications_list = get_notifications_list
def clean_notifications(self, notificationsIDs):
self.request( {
"action":"echomarkread",
"list":"|".join(notificationsIDs),
"token":self.get_csrf_token(),
"assert":self.assertion,
"format":"json"
} )
pywiki.Pywiki.clean_notifications = clean_notifications
def main():
reload(sys)
sys.setdefaultencoding('utf8')
pw = pywiki.Pywiki("frwiki-NotifBot")
pw.login()
while True:
(IDs, names) = pw.get_notifications_list()
for name in names:
print name
title = "\n== " + name.join(reply["section_title"].split("$1")) + " ==\n"
text = name.join(reply["content"].split("$1"))
summary = name.join(reply["summary"].split("$1"))
pw.append('User_talk:' + pw.basic_user_name, title + text, summary, nocreate=True)
pw.clean_notifications(IDs)
time.sleep(180)
main()
| gpl-3.0 |
fiji-flo/servo | components/script/dom/bindings/codegen/parser/tests/test_method.py | 83 | 7795 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestMethods {
void basic();
static void basicStatic();
void basicWithSimpleArgs(boolean arg1, byte arg2, unsigned long arg3);
boolean basicBoolean();
static boolean basicStaticBoolean();
boolean basicBooleanWithSimpleArgs(boolean arg1, byte arg2, unsigned long arg3);
void optionalArg(optional byte? arg1, optional sequence<byte> arg2);
void variadicArg(byte?... arg1);
object getObject();
void setObject(object arg1);
void setAny(any arg1);
float doFloats(float arg1);
};
""")
results = parser.finish()
harness.ok(True, "TestMethods interface parsed without error.")
harness.check(len(results), 1, "Should be one production.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestMethods", "Interface has the right QName")
harness.check(iface.identifier.name, "TestMethods", "Interface has the right name")
harness.check(len(iface.members), 12, "Expect 12 members")
methods = iface.members
def checkArgument(argument, QName, name, type, optional, variadic):
harness.ok(isinstance(argument, WebIDL.IDLArgument),
"Should be an IDLArgument")
harness.check(argument.identifier.QName(), QName, "Argument has the right QName")
harness.check(argument.identifier.name, name, "Argument has the right name")
harness.check(str(argument.type), type, "Argument has the right return type")
harness.check(argument.optional, optional, "Argument has the right optional value")
harness.check(argument.variadic, variadic, "Argument has the right variadic value")
def checkMethod(method, QName, name, signatures,
static=False, getter=False, setter=False, creator=False,
deleter=False, legacycaller=False, stringifier=False):
harness.ok(isinstance(method, WebIDL.IDLMethod),
"Should be an IDLMethod")
harness.ok(method.isMethod(), "Method is a method")
harness.ok(not method.isAttr(), "Method is not an attr")
harness.ok(not method.isConst(), "Method is not a const")
harness.check(method.identifier.QName(), QName, "Method has the right QName")
harness.check(method.identifier.name, name, "Method has the right name")
harness.check(method.isStatic(), static, "Method has the correct static value")
harness.check(method.isGetter(), getter, "Method has the correct getter value")
harness.check(method.isSetter(), setter, "Method has the correct setter value")
harness.check(method.isCreator(), creator, "Method has the correct creator value")
harness.check(method.isDeleter(), deleter, "Method has the correct deleter value")
harness.check(method.isLegacycaller(), legacycaller, "Method has the correct legacycaller value")
harness.check(method.isStringifier(), stringifier, "Method has the correct stringifier value")
harness.check(len(method.signatures()), len(signatures), "Method has the correct number of signatures")
sigpairs = zip(method.signatures(), signatures)
for (gotSignature, expectedSignature) in sigpairs:
(gotRetType, gotArgs) = gotSignature
(expectedRetType, expectedArgs) = expectedSignature
harness.check(str(gotRetType), expectedRetType,
"Method has the expected return type.")
for i in range(0, len(gotArgs)):
(QName, name, type, optional, variadic) = expectedArgs[i]
checkArgument(gotArgs[i], QName, name, type, optional, variadic)
checkMethod(methods[0], "::TestMethods::basic", "basic", [("Void", [])])
checkMethod(methods[1], "::TestMethods::basicStatic", "basicStatic",
[("Void", [])], static=True)
checkMethod(methods[2], "::TestMethods::basicWithSimpleArgs",
"basicWithSimpleArgs",
[("Void",
[("::TestMethods::basicWithSimpleArgs::arg1", "arg1", "Boolean", False, False),
("::TestMethods::basicWithSimpleArgs::arg2", "arg2", "Byte", False, False),
("::TestMethods::basicWithSimpleArgs::arg3", "arg3", "UnsignedLong", False, False)])])
checkMethod(methods[3], "::TestMethods::basicBoolean", "basicBoolean", [("Boolean", [])])
checkMethod(methods[4], "::TestMethods::basicStaticBoolean", "basicStaticBoolean", [("Boolean", [])], static=True)
checkMethod(methods[5], "::TestMethods::basicBooleanWithSimpleArgs",
"basicBooleanWithSimpleArgs",
[("Boolean",
[("::TestMethods::basicBooleanWithSimpleArgs::arg1", "arg1", "Boolean", False, False),
("::TestMethods::basicBooleanWithSimpleArgs::arg2", "arg2", "Byte", False, False),
("::TestMethods::basicBooleanWithSimpleArgs::arg3", "arg3", "UnsignedLong", False, False)])])
checkMethod(methods[6], "::TestMethods::optionalArg",
"optionalArg",
[("Void",
[("::TestMethods::optionalArg::arg1", "arg1", "ByteOrNull", True, False),
("::TestMethods::optionalArg::arg2", "arg2", "ByteSequence", True, False)])])
checkMethod(methods[7], "::TestMethods::variadicArg",
"variadicArg",
[("Void",
[("::TestMethods::variadicArg::arg1", "arg1", "ByteOrNull", True, True)])])
checkMethod(methods[8], "::TestMethods::getObject",
"getObject", [("Object", [])])
checkMethod(methods[9], "::TestMethods::setObject",
"setObject",
[("Void",
[("::TestMethods::setObject::arg1", "arg1", "Object", False, False)])])
checkMethod(methods[10], "::TestMethods::setAny",
"setAny",
[("Void",
[("::TestMethods::setAny::arg1", "arg1", "Any", False, False)])])
checkMethod(methods[11], "::TestMethods::doFloats",
"doFloats",
[("Float",
[("::TestMethods::doFloats::arg1", "arg1", "Float", False, False)])])
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
void foo(optional float bar = 1);
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(not threw, "Should allow integer to float type corecion")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[GetterThrows] void foo();
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [GetterThrows] on methods")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[SetterThrows] void foo();
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow [SetterThrows] on methods")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
[Throw] void foo();
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should spell [Throws] correctly on methods")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface A {
void __noSuchMethod__();
};
""")
results = parser.finish()
except Exception, x:
threw = True
harness.ok(threw, "Should not allow __noSuchMethod__ methods")
| mpl-2.0 |
Simage/shinken | shinken/objects/realm.py | 3 | 13800 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
import copy
from item import Item
from itemgroup import Itemgroup, Itemgroups
from shinken.property import BoolProp, IntegerProp, StringProp, DictProp, ListProp
from shinken.log import logger
# It change from hostgroup Class because there is no members
# properties, just the realm_members that we rewrite on it.
class Realm(Itemgroup):
id = 1 # zero is always a little bit special... like in database
my_type = 'realm'
properties = Itemgroup.properties.copy()
properties.update({
'id': IntegerProp(default=0, fill_brok=['full_status']),
'realm_name': StringProp(fill_brok=['full_status']),
# No status_broker_name because it put hosts, not host_name
'realm_members': ListProp(default=[], split_on_coma=True),
'higher_realms': ListProp(default=[], split_on_coma=True),
'default': BoolProp(default=False),
'broker_complete_links': BoolProp(default=False),
# 'alias': {'required': True, 'fill_brok': ['full_status']},
# 'notes': {'required': False, 'default':'', 'fill_brok': ['full_status']},
# 'notes_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
# 'action_url': {'required': False, 'default':'', 'fill_brok': ['full_status']},
})
running_properties = Item.running_properties.copy()
running_properties.update({
'serialized_confs': DictProp(default={}),
})
macros = {
'REALMNAME': 'realm_name',
'REALMMEMBERS': 'members',
}
def get_name(self):
return self.realm_name
def get_realms(self):
return self.realm_members
def add_string_member(self, member):
self.realm_members += ',' + member
def get_realm_members(self):
if self.has('realm_members'):
return [r.strip() for r in self.realm_members]
else:
return []
# We fillfull properties with template ones if need
# Because hostgroup we call may not have it's members
# we call get_hosts_by_explosion on it
def get_realms_by_explosion(self, realms):
# First we tag the hg so it will not be explode
# if a son of it already call it
self.already_explode = True
# Now the recursive part
# rec_tag is set to False every HG we explode
# so if True here, it must be a loop in HG
# calls... not GOOD!
if self.rec_tag:
err = "Error: we've got a loop in realm definition %s" % self.get_name()
self.configuration_errors.append(err)
if self.has('members'):
return self.members
else:
return ''
# Ok, not a loop, we tag it and continue
self.rec_tag = True
p_mbrs = self.get_realm_members()
for p_mbr in p_mbrs:
p = realms.find_by_name(p_mbr.strip())
if p is not None:
value = p.get_realms_by_explosion(realms)
if value is not None:
self.add_string_member(value)
if self.has('members'):
return self.members
else:
return ''
def get_all_subs_satellites_by_type(self, sat_type):
r = copy.copy(getattr(self, sat_type))
for p in self.realm_members:
tmps = p.get_all_subs_satellites_by_type(sat_type)
for s in tmps:
r.append(s)
return r
def count_reactionners(self):
self.nb_reactionners = 0
for reactionner in self.reactionners:
if not reactionner.spare:
self.nb_reactionners += 1
for realm in self.higher_realms:
for reactionner in realm.reactionners:
if not reactionner.spare and reactionner.manage_sub_realms:
self.nb_reactionners += 1
def count_pollers(self):
self.nb_pollers = 0
for poller in self.pollers:
if not poller.spare:
self.nb_pollers += 1
for realm in self.higher_realms:
for poller in realm.pollers:
if not poller.spare and poller.manage_sub_realms:
self.nb_pollers += 1
def count_brokers(self):
self.nb_brokers = 0
for broker in self.brokers:
if not broker.spare:
self.nb_brokers += 1
for realm in self.higher_realms:
for broker in realm.brokers:
if not broker.spare and broker.manage_sub_realms:
self.nb_brokers += 1
def count_receivers(self):
self.nb_receivers = 0
for receiver in self.receivers:
if not receiver.spare:
self.nb_receivers += 1
for realm in self.higher_realms:
for receiver in realm.receivers:
if not receiver.spare and receiver.manage_sub_realms:
self.nb_receivers += 1
# Return the list of satellites of a certain type
# like reactionner -> self.reactionners
def get_satellties_by_type(self, type):
if hasattr(self, type + 's'):
return getattr(self, type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
def fill_potential_satellites_by_type(self, sat_type):
setattr(self, 'potential_%s' % sat_type, [])
for satellite in getattr(self, sat_type):
getattr(self, 'potential_%s' % sat_type).append(satellite)
for realm in self.higher_realms:
for satellite in getattr(realm, sat_type):
if satellite.manage_sub_realms:
getattr(self, 'potential_%s' % sat_type).append(satellite)
# Return the list of potentials satellites of a certain type
# like reactionner -> self.potential_reactionners
def get_potential_satellites_by_type(self, type):
if hasattr(self, 'potential_' + type + 's'):
return getattr(self, 'potential_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return []
# Return the list of potentials satellites of a certain type
# like reactionner -> self.nb_reactionners
def get_nb_of_must_have_satellites(self, type):
if hasattr(self, 'nb_' + type + 's'):
return getattr(self, 'nb_' + type + 's')
else:
logger.debug("[realm] do not have this kind of satellites: %s", type)
return 0
# Fill dict of realms for managing the satellites confs
def prepare_for_satellites_conf(self):
self.to_satellites = {}
self.to_satellites['reactionner'] = {}
self.to_satellites['poller'] = {}
self.to_satellites['broker'] = {}
self.to_satellites['receiver'] = {}
self.to_satellites_need_dispatch = {}
self.to_satellites_need_dispatch['reactionner'] = {}
self.to_satellites_need_dispatch['poller'] = {}
self.to_satellites_need_dispatch['broker'] = {}
self.to_satellites_need_dispatch['receiver'] = {}
self.to_satellites_managed_by = {}
self.to_satellites_managed_by['reactionner'] = {}
self.to_satellites_managed_by['poller'] = {}
self.to_satellites_managed_by['broker'] = {}
self.to_satellites_managed_by['receiver'] = {}
self.count_reactionners()
self.fill_potential_satellites_by_type('reactionners')
self.count_pollers()
self.fill_potential_satellites_by_type('pollers')
self.count_brokers()
self.fill_potential_satellites_by_type('brokers')
self.count_receivers()
self.fill_potential_satellites_by_type('receivers')
s = "%s: (in/potential) (schedulers:%d) (pollers:%d/%d) (reactionners:%d/%d) (brokers:%d/%d) (receivers:%d/%d)" % \
(self.get_name(),
len(self.schedulers),
self.nb_pollers, len(self.potential_pollers),
self.nb_reactionners, len(self.potential_reactionners),
self.nb_brokers, len(self.potential_brokers),
self.nb_receivers, len(self.potential_receivers)
)
logger.info(s)
# TODO: find a better name...
# TODO: and if he goes active?
def fill_broker_with_poller_reactionner_links(self, broker):
# First we create/void theses links
broker.cfg['pollers'] = {}
broker.cfg['reactionners'] = {}
broker.cfg['receivers'] = {}
# First our own level
for p in self.pollers:
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
for r in self.reactionners:
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
for b in self.receivers:
cfg = b.give_satellite_cfg()
broker.cfg['receivers'][b.id] = cfg
# Then sub if we must to it
if broker.manage_sub_realms:
# Now pollers
for p in self.get_all_subs_satellites_by_type('pollers'):
cfg = p.give_satellite_cfg()
broker.cfg['pollers'][p.id] = cfg
# Now reactionners
for r in self.get_all_subs_satellites_by_type('reactionners'):
cfg = r.give_satellite_cfg()
broker.cfg['reactionners'][r.id] = cfg
# Now receivers
for r in self.get_all_subs_satellites_by_type('receivers'):
cfg = r.give_satellite_cfg()
broker.cfg['receivers'][r.id] = cfg
# Get a conf package of satellites links that can be useful for
# a scheduler
def get_satellites_links_for_scheduler(self):
cfg = {}
# First we create/void theses links
cfg['pollers'] = {}
cfg['reactionners'] = {}
# First our own level
for p in self.pollers:
c = p.give_satellite_cfg()
cfg['pollers'][p.id] = c
for r in self.reactionners:
c = r.give_satellite_cfg()
cfg['reactionners'][r.id] = c
# print "***** Preparing a satellites conf for a scheduler", cfg
return cfg
class Realms(Itemgroups):
name_property = "realm_name" # is used for finding hostgroups
inner_class = Realm
def get_members_by_name(self, pname):
realm = self.find_by_name(pname)
if realm is None:
return []
return realm.get_realms()
def linkify(self):
self.linkify_p_by_p()
# prepare list of satellites and confs
for p in self:
p.pollers = []
p.schedulers = []
p.reactionners = []
p.brokers = []
p.receivers = []
p.packs = []
p.confs = {}
# We just search for each realm the others realms
# and replace the name by the realm
def linkify_p_by_p(self):
for p in self.items.values():
mbrs = p.get_realm_members()
# The new member list, in id
new_mbrs = []
for mbr in mbrs:
new_mbr = self.find_by_name(mbr)
if new_mbr is not None:
new_mbrs.append(new_mbr)
# We find the id, we replace the names
p.realm_members = new_mbrs
# Now put higher realm in sub realms
# So after they can
for p in self.items.values():
p.higher_realms = []
for p in self.items.values():
self.recur_higer_realms(p, p.realm_members)
# I add the R realm in the sons.higer_realms, and
# also in the son.sons and so on
def recur_higer_realms(self, r, sons):
for sub_p in sons:
sub_p.higher_realms.append(r)
# and call for our sons too
self.recur_higer_realms(r, sub_p.realm_members)
# Use to fill members with hostgroup_members
def explode(self):
# We do not want a same hg to be explode again and again
# so we tag it
for tmp_p in self.items.values():
tmp_p.already_explode = False
for p in self:
if p.has('realm_members') and not p.already_explode:
# get_hosts_by_explosion is a recursive
# function, so we must tag hg so we do not loop
for tmp_p in self:
tmp_p.rec_tag = False
p.get_realms_by_explosion(self)
# We clean the tags
for tmp_p in self.items.values():
if hasattr(tmp_p, 'rec_tag'):
del tmp_p.rec_tag
del tmp_p.already_explode
def get_default(self):
for r in self:
if getattr(r, 'default', False):
return r
return None
def prepare_for_satellites_conf(self):
for r in self:
r.prepare_for_satellites_conf()
| agpl-3.0 |
dynaryu/inasafe | safe/engine/test/test_interpolation_qgis.py | 6 | 5184 | # coding=utf-8
"""Tests for interpolation functionality done with QGIS API."""
import unittest
from qgis.core import QgsFeatureRequest, QgsVectorLayer
from safe.test.utilities import get_qgis_app, TESTDATA
from safe.gis.qgis_vector_tools import create_layer
from safe.engine.interpolation_qgis import interpolate_polygon_polygon
QGIS_APP, CANVAS, IFACE, PARENT = get_qgis_app()
class TestInterpolationQGIS(unittest.TestCase):
"""Test for interpolation functionality done with QGIS API"""
def test_interpolation_from_polygons_one_poly(self):
"""Point interpolation using one polygon from Maumere works
There's a test with the same name in test_engine.py not using QGIS API.
This one deals correctly with holes in polygons,
so the resulting numbers are a bit different.
"""
# Name file names for hazard level and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/building_Maumere.shp' % TESTDATA)
# Read input data
H_all = QgsVectorLayer(hazard_filename, 'Hazard', 'ogr')
# Cut down to make test quick
# Polygon #799 is the one used in separate test
H = create_layer(H_all)
polygon799 = H_all.getFeatures(QgsFeatureRequest(799)).next()
H.dataProvider().addFeatures([polygon799])
E = QgsVectorLayer(exposure_filename, 'Exposure', 'ogr')
# Test interpolation function
I = interpolate_polygon_polygon(H, E, E.extent())
N = I.dataProvider().featureCount()
assert N == I.dataProvider().featureCount()
# Assert that expected attribute names exist
I_names = [field.name() for field in I.dataProvider().fields()]
for field in H.dataProvider().fields():
name = field.name()
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
for field in E.dataProvider().fields():
name = field.name()
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Verify interpolated values with test result
count = 0
for f in I.getFeatures():
category = f['Category']
if category is not None:
count += 1
msg = ('Expected 453 points tagged with category, '
'but got only %i' % count)
assert count == 453, msg
def test_interpolation_from_polygons_multiple(self):
"""Point interpolation using multiple polygons from Maumere works
There's a test with the same name in test_engine.py not using QGIS API.
This one deals correctly with holes in polygons,
so the resulting numbers are a bit different.
"""
# Name file names for hazard and exposure
hazard_filename = ('%s/tsunami_polygon_WGS84.shp' % TESTDATA)
exposure_filename = ('%s/building_Maumere.shp' % TESTDATA)
# Read input data
H = QgsVectorLayer(hazard_filename, 'Hazard', 'ogr')
E = QgsVectorLayer(exposure_filename, 'Exposure', 'ogr')
# Test interpolation function
I = interpolate_polygon_polygon(H, E, E.extent())
N = I.dataProvider().featureCount()
assert N == E.dataProvider().featureCount()
# Assert that expected attribute names exist
I_names = [field.name() for field in I.dataProvider().fields()]
for field in H.dataProvider().fields():
name = field.name()
msg = 'Did not find hazard name "%s" in %s' % (name, I_names)
assert name in I_names, msg
for field in E.dataProvider().fields():
name = field.name()
msg = 'Did not find exposure name "%s" in %s' % (name, I_names)
assert name in I_names, msg
# Verify interpolated values with test result
counts = {}
for f in I.getFeatures():
# Count items in each specific category
category = f['Category']
if category not in counts:
counts[category] = 0
counts[category] += 1
assert H.dataProvider().featureCount() == 1032
assert I.dataProvider().featureCount() == 3528
# The full version
msg = ('Expected 2267 points tagged with category "High", '
'but got only %i' % counts['High'])
assert counts['High'] == 2267, msg
msg = ('Expected 1179 points tagged with category "Very High", '
'but got only %i' % counts['Very High'])
assert counts['Very High'] == 1179, msg
msg = ('Expected 2 points tagged with category "Medium" '
'but got only %i' % counts['Medium'])
assert counts['Medium'] == 2, msg
msg = ('Expected 4 points tagged with category "Low" '
'but got only %i' % counts['Low'])
assert counts['Low'] == 4, msg
msg = ('Expected 76 points tagged with no category '
'but got only %i' % counts[None])
assert counts[None] == 76, msg
test_interpolation_from_polygons_multiple.slow = True
| gpl-3.0 |
joshuajan/odoo | openerp/addons/base/ir/ir_model.py | 3 | 59396 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import re
import time
import types
import openerp
import openerp.modules.registry
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields,osv
from openerp.osv.orm import Model, browse_null
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools import config
from openerp.tools.translate import _
from openerp.osv.orm import except_orm, browse_record, MAGIC_COLUMNS
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type',readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
openerp.modules.registry.RegistryManager.new(cr.dbname)
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
class x_custom_model(osv.osv):
_custom = True
x_custom_model._name = model
x_custom_model._module = False
a = x_custom_model.create_instance(self.pool, cr)
if not a._columns:
x_name = 'id'
elif 'x_name' in a._columns.keys():
x_name = 'x_name'
else:
x_name = a._columns.keys()[0]
x_custom_model._rec_name = x_name
a._rec_name = x_name
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade','Cascade'),('set null','Set NULL')], 'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
model._columns.pop(field.name, None)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
self.pool[vals['model']]._rec_name = 'x_name'
self.pool[vals['model']].__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
self.pool[vals['model']]._auto_init(cr, ctx)
self.pool[vals['model']]._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % column_rename[1])
# This is VERY risky, but let us have this feature:
# we want to change the key of column in obj._columns dict
col = column_rename[0]._columns.pop(column_rename[1][1]) # take object out, w/o copy
column_rename[0]._columns[column_rename[1][2]] = col
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
openerp.modules.registry.RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, browse_record):
assert model._table_name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return r or False
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, *args, **argv)
return res
def create(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, *args, **argv)
return res
def unlink(self, cr, uid, *args, **argv):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, *args, **argv)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def _display_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
result2 = {}
for res in self.browse(cr, uid, ids, context=context):
if res.id:
result.setdefault(res.model, {})
result[res.model][res.res_id] = res.id
result2[res.id] = False
for model in result:
try:
r = dict(self.pool[model].name_get(cr, uid, result[model].keys(), context=context))
for key,val in result[model].items():
result2[val] = r.get(key, False)
except:
# some object have no valid name_get implemented, we accept this
pass
return result2
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'display_name': fields.function(_display_name_get, type='char', string='Record Name'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'date_update': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
self.doinit = True
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
self.loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return the browse_null
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xml_id))
return browse_null()
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return a browse_null, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
if (not xml_id) and (not self.doinit):
return False
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, res_id,
[inherit_field])[inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
if not res:
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.osv.orm.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', size=64, required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PaulWay/spacewalk | backend/server/apacheUploadServer.py | 2 | 5758 | #
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import string
from spacewalk.common import apache
import rhnSession
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error, log_setreq, initLOG
from spacewalk.common.rhnConfig import CFG, initCFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.common.rhnTB import Traceback
from spacewalk.server import rhnImport
class UploadHandler:
def __init__(self):
self.servers = {}
self.server = None
def headerParserHandler(self, req):
log_setreq(req)
# init configuration options with proper component
options = req.get_options()
# if we are initializing out of a <Location> handler don't
# freak out
if not options.has_key("RHNComponentType"):
# clearly nothing to do
return apache.OK
initCFG(options["RHNComponentType"])
initLOG(CFG.LOG_FILE, CFG.DEBUG)
if req.method == 'GET':
# This is the ping method
return apache.OK
self.servers = rhnImport.load("upload_server/handlers",
interface_signature='upload_class')
if not options.has_key('SERVER'):
log_error("SERVER not set in the apache config files!")
return apache.HTTP_INTERNAL_SERVER_ERROR
server_name = options['SERVER']
if not self.servers.has_key(server_name):
log_error("Unable to load server %s from available servers %s" %
(server_name, self.servers))
return apache.HTTP_INTERNAL_SERVER_ERROR
server_class = self.servers[server_name]
self.server = server_class(req)
return self._wrapper(req, "headerParserHandler")
def handler(self, req):
if req.method == 'GET':
# This is the ping method
log_debug(1, "GET method received, returning")
req.headers_out['Content-Length'] = '0'
# pkilambi:check for new version of rhnpush to differentiate
# new sats from old satellites.
req.headers_out['X-RHN-Check-Package-Exists'] = '1'
req.send_http_header()
return apache.OK
return self._wrapper(req, "handler")
def cleanupHandler(self, req):
if req.method == 'GET':
# This is the ping method
return apache.OK
retval = self._wrapper(req, "cleanupHandler")
# Reset the logger to stderr
initLOG()
self.server = None
return retval
def logHandler(self, req):
if req.method == 'GET':
# This is the ping method
return apache.OK
retval = self._wrapper(req, "logHandler")
return retval
def _wrapper(self, req, function_name):
#log_debug(1, "_wrapper", req, function_name)
if not hasattr(self.server, function_name):
log_error("%s doesn't have a %s function" %
(self.server, function_name))
return apache.HTTP_NOT_FOUND
function = getattr(self.server, function_name)
try:
log_debug(5, "Calling", function)
ret = function(req)
except rhnFault, e:
log_debug(4, "rhnFault caught: %s" % (e, ))
error_string = self._exception_to_text(e)
error_code = e.code
self._error_to_headers(req.err_headers_out, error_code, error_string)
ret = rhnFlags.get("apache-return-code")
if not ret:
ret = apache.HTTP_INTERNAL_SERVER_ERROR
req.status = ret
log_debug(4, "_wrapper %s exited with apache code %s" %
(function_name, ret))
except rhnSession.ExpiredSessionError, e:
# if session expires we catch here and return a forbidden
# abd make it re-authenticate
log_debug(4, "Expire Session Error Caught: %s" % (e, ))
return 403
except:
Traceback("server.apacheUploadServer._wrapper", req=req)
log_error("Unhandled exception")
return apache.HTTP_INTERNAL_SERVER_ERROR
return ret
# Adds an error code and error string to the headers passed in
def _error_to_headers(self, headers, error_code, error_string):
error_string = string.strip(error_string)
import base64
error_string = string.strip(base64.encodestring(error_string))
for line in map(string.strip, string.split(error_string, '\n')):
headers.add(self.server.error_header_prefix + '-String', line)
headers[self.server.error_header_prefix + '-Code'] = str(error_code)
def _exception_to_text(self, exception):
return """\
Error Message:
%s
Error Class Code: %s
Error Class Info: %s
""" % (string.strip(exception.text), exception.code,
string.rstrip(exception.arrayText))
# Instantiate external entry points:
apache_server = UploadHandler()
HeaderParserHandler = apache_server.headerParserHandler
Handler = apache_server.handler
CleanupHandler = apache_server.cleanupHandler
LogHandler = apache_server.logHandler
| gpl-2.0 |
team-vigir/vigir_behaviors | vigir_flexbe_states/src/vigir_flexbe_states/read_dynamic_parameter_state.py | 1 | 2586 | #!/usr/bin/env python
from flexbe_core import EventState, Logger
import rospy
from dynamic_reconfigure.client import Client
"""
Created on 11/03/2014
@author: Philipp Schillinger
"""
class ReadDynamicParameterState(EventState):
"""
Reads a given trajectory controller parameter.
"""
LEFT_ARM_WRX = ['left_arm_traj_controller', 'l_arm_wrx']
LEFT_ARM_WRY = ['left_arm_traj_controller', 'l_arm_wry']
LEFT_ARM_ELX = ['left_arm_traj_controller', 'l_arm_elx']
LEFT_ARM_ELY = ['left_arm_traj_controller', 'l_arm_ely']
LEFT_ARM_SHX = ['left_arm_traj_controller', 'l_arm_shx']
LEFT_ARM_SHZ = ['left_arm_traj_controller', 'l_arm_shz']
RIGHT_ARM_WRX = ['right_arm_traj_controller', 'r_arm_wrx']
RIGHT_ARM_WRY = ['right_arm_traj_controller', 'r_arm_wry']
RIGHT_ARM_ELX = ['right_arm_traj_controller', 'r_arm_elx']
RIGHT_ARM_ELY = ['right_arm_traj_controller', 'r_arm_ely']
RIGHT_ARM_SHX = ['right_arm_traj_controller', 'r_arm_shx']
RIGHT_ARM_SHZ = ['right_arm_traj_controller', 'r_arm_shz']
def __init__(self, param):
"""Constructor"""
super(ReadDynamicParameterState, self).__init__(outcomes=['read', 'failed'],
input_keys=['traj_controller'],
output_keys=['parameter_value'])
self._param = param
self._failed = False
self._clients = {}
self._waiting_for_response = []
self._parameter_value_list = []
def execute(self, userdata):
if self._failed:
return 'failed'
value_offset = 0
for i in range(len(self._clients.keys())):
if self._waiting_for_response[i]:
param_dict = self._clients.values()[i].get_configuration(0.1)
if param_dict is not None:
self._waiting_for_response[i] = False
value_list = []
for j in range(len(self._param.values()[i])):
value_list.append(param_dict[self._param.values()[i][j]])
self._parameter_value_list[value_offset:value_offset+len(value_list)] = value_list
value_offset += len(self._param.values()[i])
if all(not waiting for waiting in self._waiting_for_response):
userdata.parameter_value = self._parameter_value_list
return 'read'
def on_enter(self, userdata):
self._clients = {}
self._waiting_for_response = [True] * len(self._param.keys())
self._parameter_value_list = [None] * sum(map(len, self._param.values()))
try:
for server in self._param.keys():
self._clients[server] = Client("/trajectory_controllers/" + userdata.traj_controller[0] + "/" + server + "/" + userdata.traj_controller[1])
except Exception as e:
Logger.logwarn('Was unable to reach parameter server:\n%s' % str(e))
self._failed = True
| bsd-3-clause |
amyvmiwei/kbengine | kbe/res/scripts/common/Lib/test/test_bz2.py | 72 | 32972 | from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO
import os
import pickle
import random
import subprocess
import sys
from test.support import unlink
try:
import threading
except ImportError:
threading = None
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
def setUp(self):
self.filename = support.TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
if sys.platform == "win32":
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
else:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, "/dev/null", "z")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rx")
self.assertRaises(ValueError, BZ2File, "/dev/null", "rbt")
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=0)
self.assertRaises(ValueError, BZ2File, "/dev/null", compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = bz2._BUFFER_SIZE
bz2._BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
bz2._BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testWithoutThreading(self):
module = support.import_fresh_module("bz2", blocked=("threading",))
with module.BZ2File(self.filename, "wb") as f:
f.write(b"abc")
with module.BZ2File(self.filename, "rb") as f:
self.assertEqual(f.read(), b"abc")
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(self.decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor())
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor())
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = self.decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
| lgpl-3.0 |
digital-abyss/ansible-modules-extras | cloud/vmware/vmware_vmkernel_ip_config.py | 45 | 4192 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_vmkernel_ip_config
short_description: Configure the VMkernel IP Address
description:
- Configure the VMkernel IP Address
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
hostname:
description:
- The hostname or IP address of the ESXi server
required: True
username:
description:
- The username of the ESXi server
required: True
aliases: ['user', 'admin']
password:
description:
- The password of the ESXi server
required: True
aliases: ['pass', 'pwd']
vmk_name:
description:
- VMkernel interface name
required: True
ip_address:
description:
- IP address to assign to VMkernel interface
required: True
subnet_mask:
description:
- Subnet Mask to assign to VMkernel interface
required: True
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Configure IP address on ESX host
local_action:
module: vmware_vmkernel_ip_config
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vmk_name: vmk0
ip_address: 10.0.0.10
subnet_mask: 255.255.255.0
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
for vnic in host_network_system.networkConfig.vnic:
if vnic.device == vmk_name:
spec = vnic.spec
if spec.ip.ipAddress != ip_address:
spec.ip.dhcp = False
spec.ip.ipAddress = ip_address
spec.ip.subnetMask = subnet_mask
host_network_system.UpdateVirtualNic(vmk_name, spec)
return True
return False
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmk_name = module.params['vmk_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
try:
content = connect_to_api(module, False)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
will-iam/Variant | script/process/ergodicity_scaling.py | 1 | 4083 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
import __future__
import parser
import sys
import matplotlib.pyplot as plt
#plt.style.use('ggplot')
import numpy as np
import operator
from collections import *
caseSize = (8192, 8192)
if parser.args.res:
maxAvailableNode = parser.args.res
else:
maxAvailableNode = 8
sizeDataDict = []
for p in range(0, int(np.log2(maxAvailableNode)) + 1):
filterDict = {'nSizeX' : caseSize[0], 'nSizeY' : caseSize[1], 'R' : 64 * 2**p}
print filterDict
data = parser.getData(filterDict)
if len(data):
sizeDataDict.append(data)
if len(sizeDataDict) == 0:
print("No data found.")
sys.exit(1)
loopTimeDict = dict()
for data in sizeDataDict:
for key, value in data.items():
keyDict = parser.extractKey(key)
Nt = keyDict['Nt']
R = keyDict['R']
if keyDict['Ny'] != caseSize[0] or keyDict['Nx'] != caseSize[1]:
print("Error in collected data")
sys.exit(1)
for run in value:
nSDD = run['point'][0] * run['point'][1]
# On several nodes, select only pure SDD, which is the best result.
if R > 64 and nSDD < R:
continue
# Don't remove HyperThreading.
# We assume that hyperthreading with SDD leads to same results as with SDS.
#if R > 64 and nSDD == R and Nt > 1.0:
# continue
# On a single node, select only pure SDS
if R == 64 and nSDD > 1:
continue
loopT = run['loopTime'] * caseSize[0] * caseSize[1] * keyDict['Ni'] / 1000.
if R not in loopTimeDict.keys():
loopTimeDict[R] = list()
loopTimeDict[R].append(loopT)
# And now, we must plot that
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
#ax = fig.add_subplot(211)
#ax.set_xscale('log', basex=2)
#ax.set_yscale('log')
maxSimulationNumber = 42
xArray = range(1, maxSimulationNumber + 1)
'''
#Perfect Scale
loopTimeDict[128] = [k / 2. for k in loopTimeDict[64]]
loopTimeDict[256] = [k / 4. for k in loopTimeDict[64]]
loopTimeDict[512] = [k / 8. for k in loopTimeDict[64]]
'''
for r in sorted(loopTimeDict):
nodeNeeded = r // 64
minT = np.min(loopTimeDict[r])
print("Min Time %s node(s) = %s" % (nodeNeeded, minT))
totalTimeArray = np.zeros(maxSimulationNumber)
for i in xArray:
totalTimeArray[i-1] = minT * (1 + (i * nodeNeeded - 1) // maxAvailableNode)
ax.plot(xArray, totalTimeArray, '-', label="Batch Size %s" % (r // 64))
parser.outputCurve("ergodicity_scaling-%s.dat" % (r//64), xArray, totalTimeArray)
'''
minSize = int(np.sqrt(np.min(syncTimeDict.keys())))
maxSize = int(np.sqrt(np.max(syncTimeDict.keys())))
nodeNumber = (caseSize[0] * caseSize[1] / (maxSize * maxSize))
'''
plt.title('%sx%s batch time with %s node(s) available at the same time.' % (caseSize[0], caseSize[1], maxAvailableNode))
plt.xlabel('Total number of simulation to run')
plt.ylabel('Loop Time')
plt.legend()
'''
bx = fig.add_subplot(212)
bx.set_xscale('log', basex=2)
bx.plot(sorted(sdsWeakDict), [np.min(v) for k, v in sorted(sdsWeakDict.items(), key=operator.itemgetter(0))], 'g+-', label="SDS scaling")
bx.plot(sorted(sddWeakDict), [np.min(v) for k, v in sorted(sddWeakDict.items())], 'b+-', label="SDD scaling")
#bx.plot(sorted(hybridWeakDict), [np.min(v) for k, v in sorted(hybridWeakDict.items())], 'y+-', label="Hybrid scaling")
bx.plot(sorted(sddWeakDict), [firstValueSDD for k in sorted(sddWeakDict.keys())], 'b--', label="SDD ideal")
bx.plot(sorted(sdsWeakDict), [firstValueSDS for k in sorted(sdsWeakDict.keys())], 'g--', label="SDS ideal")
for k in sdsWeakDict:
bx.plot(np.full(len(sdsWeakDict[k]), k), sdsWeakDict[k], 'g+')
for k in sddWeakDict:
bx.plot(np.full(len(sddWeakDict[k]), k), sddWeakDict[k], 'b+')
plt.title('Weak Scaling from %sx%s to %sx%s' % (initSize, initSize, initSize * 2**((maxPower-1) / 2), initSize * 2**((maxPower-1) / 2)) )
plt.xlabel('Core(s)')
plt.ylabel('Loop Time / iteration')
plt.legend()
'''
plt.show()
| mit |
rfleschenberg/django-shop | example/myshop/migrations/polymorphic/0003_add_polymorphic.py | 1 | 9751 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import cms.models.fields
import djangocms_text_ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
('contenttypes', '0002_remove_content_type_name'),
('filer', '0002_auto_20150606_2003'),
('myshop', '0002_add_i18n'),
]
operations = [
migrations.CreateModel(
name='OperatingSystem',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='Name')),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated at')),
('active', models.BooleanField(default=True, help_text='Is this product publicly visible.', verbose_name='Active')),
('product_name', models.CharField(max_length=255, verbose_name='Product Name')),
('slug', models.SlugField(unique=True, verbose_name='Slug')),
('order', models.PositiveIntegerField(verbose_name='Sort by', db_index=True)),
],
options={
'ordering': ('order',),
},
),
migrations.CreateModel(
name='ProductTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('language_code', models.CharField(max_length=15, verbose_name='Language', db_index=True)),
('description', djangocms_text_ckeditor.fields.HTMLField(help_text='Description for the list view of products.', verbose_name='Description')),
],
),
migrations.CreateModel(
name='SmartPhone',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('storage', models.PositiveIntegerField(help_text='Internal storage in MB', verbose_name='Internal Storage')),
],
),
migrations.AlterUniqueTogether(
name='smartcardtranslation',
unique_together=set([]),
),
migrations.RemoveField(
model_name='smartcardtranslation',
name='master',
),
migrations.AlterModelOptions(
name='smartcard',
options={'verbose_name': 'Smart Card', 'verbose_name_plural': 'Smart Cards'},
),
migrations.RemoveField(
model_name='smartcard',
name='active',
),
migrations.RemoveField(
model_name='smartcard',
name='cms_pages',
),
migrations.RemoveField(
model_name='smartcard',
name='created_at',
),
migrations.RemoveField(
model_name='smartcard',
name='id',
),
migrations.RemoveField(
model_name='smartcard',
name='images',
),
migrations.RemoveField(
model_name='smartcard',
name='manufacturer',
),
migrations.RemoveField(
model_name='smartcard',
name='order',
),
migrations.RemoveField(
model_name='smartcard',
name='polymorphic_ctype',
),
migrations.RemoveField(
model_name='smartcard',
name='product_name',
),
migrations.RemoveField(
model_name='smartcard',
name='slug',
),
migrations.RemoveField(
model_name='smartcard',
name='updated_at',
),
migrations.AlterField(
model_name='cartitem',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='orderitem',
name='product',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, verbose_name='Product', blank=True, to='myshop.Product', null=True),
),
migrations.AlterField(
model_name='productimage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.AlterField(
model_name='productpage',
name='product',
field=models.ForeignKey(to='myshop.Product'),
),
migrations.CreateModel(
name='Commodity',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('unit_price', models.DecimalField(default='0', help_text='Net price for this product', max_digits=30, decimal_places=3)),
('product_code', models.CharField(unique=True, max_length=255, verbose_name='Product code')),
('placeholder', cms.models.fields.PlaceholderField(slotname='Commodity Details', editable=False, to='cms.Placeholder', null=True)),
],
options={
'verbose_name': 'Commodity',
'verbose_name_plural': 'Commodities',
},
bases=('myshop.product',),
),
migrations.CreateModel(
name='SmartPhoneModel',
fields=[
('product_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='myshop.Product')),
('battery_type', models.PositiveSmallIntegerField(verbose_name='Battery type', choices=[(1, 'Lithium Polymer (Li-Poly)'), (2, 'Lithium Ion (Li-Ion)')])),
('battery_capacity', models.PositiveIntegerField(help_text='Battery capacity in mAh', verbose_name='Capacity')),
('ram_storage', models.PositiveIntegerField(help_text='RAM storage in MB', verbose_name='RAM')),
('wifi_connectivity', models.PositiveIntegerField(help_text='WiFi Connectivity', verbose_name='WiFi', choices=[(1, '802.11 b/g/n')])),
('bluetooth', models.PositiveIntegerField(help_text='Bluetooth Connectivity', verbose_name='Bluetooth', choices=[(1, 'Bluetooth 4.0')])),
('gps', models.BooleanField(default=False, help_text='GPS integrated', verbose_name='GPS')),
('width', models.DecimalField(help_text='Width in mm', verbose_name='Width', max_digits=4, decimal_places=1)),
('height', models.DecimalField(help_text='Height in mm', verbose_name='Height', max_digits=4, decimal_places=1)),
('weight', models.DecimalField(help_text='Weight in gram', verbose_name='Weight', max_digits=5, decimal_places=1)),
('screen_size', models.DecimalField(help_text='Diagonal screen size in inch', verbose_name='Screen size', max_digits=4, decimal_places=2)),
('operating_system', models.ForeignKey(verbose_name='Operating System', to='myshop.OperatingSystem')),
],
options={
'verbose_name': 'Smart Phone',
'verbose_name_plural': 'Smart Phones',
},
bases=('myshop.product',),
),
migrations.DeleteModel(
name='SmartCardTranslation',
),
migrations.AddField(
model_name='producttranslation',
name='master',
field=models.ForeignKey(related_name='translations', to='myshop.Product', null=True),
),
migrations.AddField(
model_name='product',
name='cms_pages',
field=models.ManyToManyField(help_text='Choose list view this product shall appear on.', to='cms.Page', through='myshop.ProductPage'),
),
migrations.AddField(
model_name='product',
name='images',
field=models.ManyToManyField(to='filer.Image', through='myshop.ProductImage'),
),
migrations.AddField(
model_name='product',
name='manufacturer',
field=models.ForeignKey(verbose_name='Manufacturer', to='myshop.Manufacturer'),
),
migrations.AddField(
model_name='product',
name='polymorphic_ctype',
field=models.ForeignKey(related_name='polymorphic_myshop.product_set+', editable=False, to='contenttypes.ContentType', null=True),
),
migrations.AddField(
model_name='smartcard',
name='product_ptr',
field=models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, default=None, serialize=False, to='myshop.Product'),
preserve_default=False,
),
migrations.AddField(
model_name='smartphone',
name='product',
field=models.ForeignKey(verbose_name='Smart-Phone Model', to='myshop.SmartPhoneModel'),
),
migrations.AlterUniqueTogether(
name='producttranslation',
unique_together=set([('language_code', 'master')]),
),
]
| bsd-3-clause |
Seedstars/django-react-redux-jwt-base | src/accounts/migrations/0002_clean_user_model.py | 7 | 1653 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.timezone import utc
import datetime
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='groups',
),
migrations.RemoveField(
model_name='user',
name='user_permissions',
),
migrations.RemoveField(
model_name='user',
name='username',
),
migrations.AddField(
model_name='user',
name='date_updated',
field=models.DateTimeField(default=datetime.datetime(2017, 3, 17, 14, 31, 13, 924508, tzinfo=utc), auto_now=True, verbose_name='date updated'),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, verbose_name='date joined'),
),
migrations.AlterField(
model_name='user',
name='is_active',
field=models.BooleanField(default=True, verbose_name='active'),
),
migrations.AlterField(
model_name='user',
name='is_staff',
field=models.BooleanField(default=False, verbose_name='staff status'),
),
migrations.AlterField(
model_name='user',
name='is_superuser',
field=models.BooleanField(default=False, verbose_name='superuser status'),
),
]
| mit |
gymnasium/edx-platform | common/test/acceptance/pages/common/auto_auth.py | 17 | 3483 | """
Auto-auth page (used to automatically log in during testing).
"""
import json
import os
import urllib
from bok_choy.page_object import PageObject, unguarded
# The URL used for user auth in testing
HOSTNAME = os.environ.get('BOK_CHOY_HOSTNAME', 'localhost')
CMS_PORT = os.environ.get('BOK_CHOY_CMS_PORT', 8031)
AUTH_BASE_URL = os.environ.get('test_url', 'http://{}:{}'.format(HOSTNAME, CMS_PORT))
FULL_NAME = 'Test'
class AutoAuthPage(PageObject):
"""
The automatic authorization page.
When enabled via the Django settings file, visiting this url will create a user and log them in.
"""
# Internal cache for parsed user info.
_user_info = None
def __init__(self, browser, username=None, email=None, password=None, full_name=FULL_NAME, staff=False, superuser=None,
course_id=None, enrollment_mode=None, roles=None, no_login=False, is_active=True, course_access_roles=None):
"""
Auto-auth is an end-point for HTTP GET requests.
By default, it will create accounts with random user credentials,
but you can also specify credentials using querystring parameters.
`username`, `email`, and `password` are the user's credentials (strings)
'full_name' is the profile full name value
`staff` is a boolean indicating whether the user is global staff.
`superuser` is a boolean indicating whether the user is a super user.
`course_id` is the ID of the course to enroll the student in.
Currently, this has the form "org/number/run"
Note that "global staff" is NOT the same as course staff.
"""
super(AutoAuthPage, self).__init__(browser)
# This will eventually hold the details about the user account
self._user_info = None
course_access_roles = course_access_roles or []
course_access_roles = ','.join(course_access_roles)
self._params = {
'full_name': full_name,
'staff': staff,
'superuser': superuser,
'is_active': is_active,
'course_access_roles': course_access_roles,
}
if username:
self._params['username'] = username
if email:
self._params['email'] = email
if password:
self._params['password'] = password
if superuser is not None:
self._params['superuser'] = "true" if superuser else "false"
if course_id:
self._params['course_id'] = course_id
if enrollment_mode:
self._params['enrollment_mode'] = enrollment_mode
if roles:
self._params['roles'] = roles
if no_login:
self._params['no_login'] = True
@property
def url(self):
"""
Construct the URL.
"""
url = AUTH_BASE_URL + "/auto_auth"
query_str = urllib.urlencode(self._params)
if query_str:
url += "?" + query_str
return url
def is_browser_on_page(self):
return bool(self.user_info)
@property
@unguarded
def user_info(self):
"""A dictionary containing details about the user account."""
if not self._user_info:
body = self.q(css='BODY').text[0]
self._user_info = json.loads(body)
return self._user_info
def get_user_id(self):
"""
Finds and returns the user_id
"""
return self.user_info['user_id']
| agpl-3.0 |
anryko/ansible | test/units/modules/network/check_point/test_cp_mgmt_run_script.py | 19 | 2549 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_run_script
PAYLOAD = {
"script": "ls -l /",
"targets": [
"corporate-gateway"
],
"script_name": "Script Example: List files under / dir",
"wait_for_task": False
}
RETURN_PAYLOAD = {
"task-id": "53de74b7-8f19-4cbe-99fc-a81ef0759bad"
}
command = 'run-script'
failure_msg = '{command failed}'
class TestCheckpointRunScript(object):
module = cp_mgmt_run_script
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_command(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, RETURN_PAYLOAD)
result = self._run_module(PAYLOAD)
assert result['changed']
assert RETURN_PAYLOAD == result[command]
def test_command_fail(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(PAYLOAD)
except Exception as e:
result = e.args[0]
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
pipermerriam/django | django/db/backends/oracle/utils.py | 539 | 1252 | import datetime
from django.utils.encoding import force_bytes, force_text
from .base import Database
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class Oracle_datetime(datetime.datetime):
"""
A datetime object, with an additional class attribute
to tell cx_Oracle to save the microseconds too.
"""
input_size = Database.TIMESTAMP
@classmethod
def from_datetime(cls, dt):
return Oracle_datetime(
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond,
)
| bsd-3-clause |
v1bri/gnuradio | gr-digital/python/digital/qa_correlate_access_code.py | 37 | 3173 | #!/usr/bin/env python
#
# Copyright 2006,2007,2010,2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, digital, blocks
default_access_code = '\xAC\xDD\xA4\xE2\xF2\x8C\x20\xFC'
def string_to_1_0_list(s):
r = []
for ch in s:
x = ord(ch)
for i in range(8):
t = (x >> i) & 0x1
r.append(t)
return r
def to_1_0_string(L):
return ''.join(map(lambda x: chr(x + ord('0')), L))
class test_correlate_access_code(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
pad = (0,) * 64
# 0 0 0 1 0 0 0 1
src_data = (1, 0, 1, 1, 1, 1, 0, 1, 1) + pad + (0,) * 7
expected_result = pad + (1, 0, 1, 1, 3, 1, 0, 1, 1, 2) + (0,) * 6
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_bb("1011", 0)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_002(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
expected_result = pad + code + (3, 0, 1, 1)
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_bb(access_code, 0)
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def test_003(self):
code = tuple(string_to_1_0_list(default_access_code))
access_code = to_1_0_string(code)
pad = (0,) * 64
#print code
#print access_code
src_data = code + (1, 0, 1, 1) + pad
expected_result = code + (1, 0, 1, 1) + pad
src = blocks.vector_source_b(src_data)
op = digital.correlate_access_code_tag_bb(access_code, 0, "test")
dst = blocks.vector_sink_b()
self.tb.connect(src, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_correlate_access_code, "test_correlate_access_code.xml")
| gpl-3.0 |
kamalx/edx-platform | lms/djangoapps/instructor/features/bulk_email.py | 116 | 6813 | """
Define steps for bulk email acceptance test.
"""
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from lettuce.django import mail
from nose.tools import assert_in, assert_equal # pylint: disable=no-name-in-module
from django.core.management import call_command
from django.conf import settings
from courseware.tests.factories import StaffFactory, InstructorFactory
@step(u'Given there is a course with a staff, instructor and student')
def make_populated_course(step): # pylint: disable=unused-argument
## This is different than the function defined in common.py because it enrolls
## a staff, instructor, and student member regardless of what `role` is, then
## logs `role` in. This is to ensure we have 3 class participants to email.
# Clear existing courses to avoid conflicts
world.clear_courses()
# Create a new course
course = world.CourseFactory.create(
org='edx',
number='888',
display_name='Bulk Email Test Course'
)
world.bulk_email_course_key = course.id
try:
# See if we've defined the instructor & staff user yet
world.bulk_email_instructor
except AttributeError:
# Make & register an instructor for the course
world.bulk_email_instructor = InstructorFactory(course_key=world.bulk_email_course_key)
world.enroll_user(world.bulk_email_instructor, world.bulk_email_course_key)
# Make & register a staff member
world.bulk_email_staff = StaffFactory(course_key=course.id)
world.enroll_user(world.bulk_email_staff, world.bulk_email_course_key)
# Make & register a student
world.register_by_course_key(
course.id,
username='student',
password='test',
is_staff=False
)
# Store the expected recipients
# given each "send to" option
staff_emails = [world.bulk_email_staff.email, world.bulk_email_instructor.email]
world.expected_addresses = {
'course staff': staff_emails,
'students, staff, and instructors': staff_emails + ['student@edx.org']
}
# Dictionary mapping a description of the email recipient
# to the corresponding <option> value in the UI.
SEND_TO_OPTIONS = {
'myself': 'myself',
'course staff': 'staff',
'students, staff, and instructors': 'all'
}
@step(u'I am logged in to the course as "([^"]*)"')
def log_into_the_course(step, role): # pylint: disable=unused-argument
# Store the role
assert_in(role, ['instructor', 'staff'])
# Log in as the an instructor or staff for the course
my_email = world.bulk_email_instructor.email
if role == 'instructor':
world.log_in(
username=world.bulk_email_instructor.username,
password='test',
email=my_email,
name=world.bulk_email_instructor.profile.name
)
else:
my_email = world.bulk_email_staff.email
world.log_in(
username=world.bulk_email_staff.username,
password='test',
email=my_email,
name=world.bulk_email_staff.profile.name
)
# Store the "myself" send to option
world.expected_addresses['myself'] = [my_email]
@step(u'I send email to "([^"]*)"')
def when_i_send_an_email(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Clear the queue of existing emails
while not mail.queue.empty(): # pylint: disable=no-member
mail.queue.get() # pylint: disable=no-member
# Because we flush the database before each run,
# we need to ensure that the email template fixture
# is re-loaded into the database
call_command('loaddata', 'course_email_template.json')
# Go to the email section of the instructor dash
url = '/courses/{}'.format(world.bulk_email_course_key)
world.visit(url)
world.css_click('a[href="{}/instructor"]'.format(url))
world.css_click('a[data-section="send_email"]')
# Select the recipient
world.select_option('send_to', SEND_TO_OPTIONS[recipient])
# Enter subject and message
world.css_fill('input#id_subject', 'Hello')
with world.browser.get_iframe('mce_0_ifr') as iframe:
editor = iframe.find_by_id('tinymce')[0]
editor.fill('test message')
# Click send
world.css_click('input[name="send"]', dismiss_alert=True)
# Expect to see a message that the email was sent
expected_msg = "Your email was successfully queued for sending."
world.wait_for_visible('#request-response')
assert_in(
expected_msg, world.css_text('#request-response'),
msg="Could not find email success message."
)
UNSUBSCRIBE_MSG = 'To stop receiving email like this'
@step(u'Email is sent to "([^"]*)"')
def then_the_email_is_sent(step, recipient): # pylint: disable=unused-argument
# Check that the recipient is valid
assert_in(
recipient, SEND_TO_OPTIONS,
msg="Invalid recipient: {}".format(recipient)
)
# Retrieve messages. Because we are using celery in "always eager"
# mode, we expect all messages to be sent by this point.
messages = []
while not mail.queue.empty(): # pylint: disable=no-member
messages.append(mail.queue.get()) # pylint: disable=no-member
# Check that we got the right number of messages
assert_equal(
len(messages), len(world.expected_addresses[recipient]),
msg="Received {0} instead of {1} messages for {2}".format(
len(messages), len(world.expected_addresses[recipient]), recipient
)
)
# Check that the message properties were correct
recipients = []
for msg in messages:
assert_in('Hello', msg.subject)
assert_in(settings.BULK_EMAIL_DEFAULT_FROM_EMAIL, msg.from_email)
# Message body should have the message we sent
# and an unsubscribe message
assert_in('test message', msg.body)
assert_in(UNSUBSCRIBE_MSG, msg.body)
# Should have alternative HTML form
assert_equal(len(msg.alternatives), 1)
content, mime_type = msg.alternatives[0]
assert_equal(mime_type, 'text/html')
assert_in('test message', content)
assert_in(UNSUBSCRIBE_MSG, content)
# Store the recipient address so we can verify later
recipients.extend(msg.recipients())
# Check that the messages were sent to the right people
# Because "myself" can vary based on who sent the message,
# we use the world.expected_addresses dict we configured
# in an earlier step.
for addr in world.expected_addresses[recipient]:
assert_in(addr, recipients)
| agpl-3.0 |
ahhda/sympy | sympy/functions/special/elliptic_integrals.py | 75 | 11975 | """ Elliptic integrals. """
from __future__ import print_function, division
from sympy.core import S, pi, I
from sympy.core.function import Function, ArgumentIndexError
from sympy.functions.elementary.hyperbolic import atanh
from sympy.functions.elementary.trigonometric import sin, tan
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.functions.elementary.complexes import sign
from sympy.functions.special.hyper import hyper, meijerg
from sympy.functions.special.gamma_functions import gamma
class elliptic_k(Function):
r"""
The complete elliptic integral of the first kind, defined by
.. math:: K(z) = F\left(\tfrac{\pi}{2}\middle| z\right)
where `F\left(z\middle| m\right)` is the Legendre incomplete
elliptic integral of the first kind.
The function `K(z)` is a single-valued function on the complex
plane with branch cut along the interval `(1, \infty)`.
Examples
========
>>> from sympy import elliptic_k, I, pi
>>> from sympy.abc import z
>>> elliptic_k(0)
pi/2
>>> elliptic_k(1.0 + I)
1.50923695405127 + 0.625146415202697*I
>>> elliptic_k(z).series(z, n=3)
pi/2 + pi*z/8 + 9*pi*z**2/128 + O(z**3)
References
==========
.. [1] http://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] http://functions.wolfram.com/EllipticIntegrals/EllipticK
See Also
========
elliptic_f
"""
@classmethod
def eval(cls, z):
if z is S.Zero:
return pi/2
elif z is S.Half:
return 8*pi**(S(3)/2)/gamma(-S(1)/4)**2
elif z is S.One:
return S.ComplexInfinity
elif z is S.NegativeOne:
return gamma(S(1)/4)**2/(4*sqrt(2*pi))
elif z in (S.Infinity, S.NegativeInfinity, I*S.Infinity,
I*S.NegativeInfinity, S.ComplexInfinity):
return S.Zero
def fdiff(self, argindex=1):
z = self.args[0]
return (elliptic_e(z) - (1 - z)*elliptic_k(z))/(2*z*(1 - z))
def _eval_conjugate(self):
z = self.args[0]
if (z.is_real and (z - 1).is_positive) is False:
return self.func(z.conjugate())
def _eval_nseries(self, x, n, logx):
from sympy.simplify import hyperexpand
return hyperexpand(self.rewrite(hyper)._eval_nseries(x, n=n, logx=logx))
def _eval_rewrite_as_hyper(self, z):
return (pi/2)*hyper((S.Half, S.Half), (S.One,), z)
def _eval_rewrite_as_meijerg(self, z):
return meijerg(((S.Half, S.Half), []), ((S.Zero,), (S.Zero,)), -z)/2
def _sage_(self):
import sage.all as sage
return sage.elliptic_kc(self.args[0]._sage_())
class elliptic_f(Function):
r"""
The Legendre incomplete elliptic integral of the first
kind, defined by
.. math:: F\left(z\middle| m\right) =
\int_0^z \frac{dt}{\sqrt{1 - m \sin^2 t}}
This function reduces to a complete elliptic integral of
the first kind, `K(m)`, when `z = \pi/2`.
Examples
========
>>> from sympy import elliptic_f, I, O
>>> from sympy.abc import z, m
>>> elliptic_f(z, m).series(z)
z + z**5*(3*m**2/40 - m/30) + m*z**3/6 + O(z**6)
>>> elliptic_f(3.0 + I/2, 1.0 + I)
2.909449841483 + 1.74720545502474*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] http://functions.wolfram.com/EllipticIntegrals/EllipticF
See Also
========
elliptic_k
"""
@classmethod
def eval(cls, z, m):
k = 2*z/pi
if m.is_zero:
return z
elif z.is_zero:
return S.Zero
elif k.is_integer:
return k*elliptic_k(m)
elif m in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif z.could_extract_minus_sign():
return -elliptic_f(-z, m)
def fdiff(self, argindex=1):
z, m = self.args
fm = sqrt(1 - m*sin(z)**2)
if argindex == 1:
return 1/fm
elif argindex == 2:
return (elliptic_e(z, m)/(2*m*(1 - m)) - elliptic_f(z, m)/(2*m) -
sin(2*z)/(4*(1 - m)*fm))
raise ArgumentIndexError(self, argindex)
def _eval_conjugate(self):
z, m = self.args
if (m.is_real and (m - 1).is_positive) is False:
return self.func(z.conjugate(), m.conjugate())
class elliptic_e(Function):
r"""
Called with two arguments `z` and `m`, evaluates the
incomplete elliptic integral of the second kind, defined by
.. math:: E\left(z\middle| m\right) = \int_0^z \sqrt{1 - m \sin^2 t} dt
Called with a single argument `z`, evaluates the Legendre complete
elliptic integral of the second kind
.. math:: E(z) = E\left(\tfrac{\pi}{2}\middle| z\right)
The function `E(z)` is a single-valued function on the complex
plane with branch cut along the interval `(1, \infty)`.
Examples
========
>>> from sympy import elliptic_e, I, pi, O
>>> from sympy.abc import z, m
>>> elliptic_e(z, m).series(z)
z + z**5*(-m**2/40 + m/30) - m*z**3/6 + O(z**6)
>>> elliptic_e(z).series(z, n=4)
pi/2 - pi*z/8 - 3*pi*z**2/128 - 5*pi*z**3/512 + O(z**4)
>>> elliptic_e(1 + I, 2 - I/2).n()
1.55203744279187 + 0.290764986058437*I
>>> elliptic_e(0)
pi/2
>>> elliptic_e(2.0 - I)
0.991052601328069 + 0.81879421395609*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] http://functions.wolfram.com/EllipticIntegrals/EllipticE2
.. [3] http://functions.wolfram.com/EllipticIntegrals/EllipticE
"""
@classmethod
def eval(cls, z, m=None):
if m is not None:
k = 2*z/pi
if m.is_zero:
return z
if z.is_zero:
return S.Zero
elif k.is_integer:
return k*elliptic_e(m)
elif m in (S.Infinity, S.NegativeInfinity):
return S.ComplexInfinity
elif z.could_extract_minus_sign():
return -elliptic_e(-z, m)
else:
if z.is_zero:
return pi/2
elif z is S.One:
return S.One
elif z is S.Infinity:
return I*S.Infinity
elif z is S.NegativeInfinity:
return S.Infinity
elif z is S.ComplexInfinity:
return S.ComplexInfinity
def fdiff(self, argindex=1):
if len(self.args) == 2:
z, m = self.args
if argindex == 1:
return sqrt(1 - m*sin(z)**2)
elif argindex == 2:
return (elliptic_e(z, m) - elliptic_f(z, m))/(2*m)
else:
z = self.args[0]
if argindex == 1:
return (elliptic_e(z) - elliptic_k(z))/(2*z)
raise ArgumentIndexError(self, argindex)
def _eval_conjugate(self):
if len(self.args) == 2:
z, m = self.args
if (m.is_real and (m - 1).is_positive) is False:
return self.func(z.conjugate(), m.conjugate())
else:
z = self.args[0]
if (z.is_real and (z - 1).is_positive) is False:
return self.func(z.conjugate())
def _eval_nseries(self, x, n, logx):
from sympy.simplify import hyperexpand
if len(self.args) == 1:
return hyperexpand(self.rewrite(hyper)._eval_nseries(x, n=n, logx=logx))
return super(elliptic_e, self)._eval_nseries(x, n=n, logx=logx)
def _eval_rewrite_as_hyper(self, *args):
if len(args) == 1:
z = args[0]
return (pi/2)*hyper((-S.Half, S.Half), (S.One,), z)
def _eval_rewrite_as_meijerg(self, *args):
if len(args) == 1:
z = args[0]
return -meijerg(((S.Half, S(3)/2), []), \
((S.Zero,), (S.Zero,)), -z)/4
class elliptic_pi(Function):
r"""
Called with three arguments `n`, `z` and `m`, evaluates the
Legendre incomplete elliptic integral of the third kind, defined by
.. math:: \Pi\left(n; z\middle| m\right) = \int_0^z \frac{dt}
{\left(1 - n \sin^2 t\right) \sqrt{1 - m \sin^2 t}}
Called with two arguments `n` and `m`, evaluates the complete
elliptic integral of the third kind:
.. math:: \Pi\left(n\middle| m\right) =
\Pi\left(n; \tfrac{\pi}{2}\middle| m\right)
Examples
========
>>> from sympy import elliptic_pi, I, pi, O, S
>>> from sympy.abc import z, n, m
>>> elliptic_pi(n, z, m).series(z, n=4)
z + z**3*(m/6 + n/3) + O(z**4)
>>> elliptic_pi(0.5 + I, 1.0 - I, 1.2)
2.50232379629182 - 0.760939574180767*I
>>> elliptic_pi(0, 0)
pi/2
>>> elliptic_pi(1.0 - I/3, 2.0 + I)
3.29136443417283 + 0.32555634906645*I
References
==========
.. [1] http://en.wikipedia.org/wiki/Elliptic_integrals
.. [2] http://functions.wolfram.com/EllipticIntegrals/EllipticPi3
.. [3] http://functions.wolfram.com/EllipticIntegrals/EllipticPi
"""
@classmethod
def eval(cls, n, m, z=None):
if z is not None:
n, z, m = n, m, z
k = 2*z/pi
if n == S.Zero:
return elliptic_f(z, m)
elif n == S.One:
return (elliptic_f(z, m) +
(sqrt(1 - m*sin(z)**2)*tan(z) -
elliptic_e(z, m))/(1 - m))
elif k.is_integer:
return k*elliptic_pi(n, m)
elif m == S.Zero:
return atanh(sqrt(n - 1)*tan(z))/sqrt(n - 1)
elif n == m:
return (elliptic_f(z, n) - elliptic_pi(1, z, n) +
tan(z)/sqrt(1 - n*sin(z)**2))
elif n in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif m in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif z.could_extract_minus_sign():
return -elliptic_pi(n, -z, m)
else:
if n == S.Zero:
return elliptic_k(m)
elif n == S.One:
return S.ComplexInfinity
elif m == S.Zero:
return pi/(2*sqrt(1 - n))
elif m == S.One:
return -S.Infinity/sign(n - 1)
elif n == m:
return elliptic_e(n)/(1 - n)
elif n in (S.Infinity, S.NegativeInfinity):
return S.Zero
elif m in (S.Infinity, S.NegativeInfinity):
return S.Zero
def _eval_conjugate(self):
if len(self.args) == 3:
n, z, m = self.args
if (n.is_real and (n - 1).is_positive) is False and \
(m.is_real and (m - 1).is_positive) is False:
return self.func(n.conjugate(), z.conjugate(), m.conjugate())
else:
n, m = self.args
return self.func(n.conjugate(), m.conjugate())
def fdiff(self, argindex=1):
if len(self.args) == 3:
n, z, m = self.args
fm, fn = sqrt(1 - m*sin(z)**2), 1 - n*sin(z)**2
if argindex == 1:
return (elliptic_e(z, m) + (m - n)*elliptic_f(z, m)/n +
(n**2 - m)*elliptic_pi(n, z, m)/n -
n*fm*sin(2*z)/(2*fn))/(2*(m - n)*(n - 1))
elif argindex == 2:
return 1/(fm*fn)
elif argindex == 3:
return (elliptic_e(z, m)/(m - 1) +
elliptic_pi(n, z, m) -
m*sin(2*z)/(2*(m - 1)*fm))/(2*(n - m))
else:
n, m = self.args
if argindex == 1:
return (elliptic_e(m) + (m - n)*elliptic_k(m)/n +
(n**2 - m)*elliptic_pi(n, m)/n)/(2*(m - n)*(n - 1))
elif argindex == 2:
return (elliptic_e(m)/(m - 1) + elliptic_pi(n, m))/(2*(n - m))
raise ArgumentIndexError(self, argindex)
| bsd-3-clause |
ajose01/rethinkdb | test/rql_test/connections/http_support/werkzeug/testsuite/contrib/cache.py | 145 | 7212 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
try:
import pylibmc as memcache
except ImportError:
try:
from google.appengine.api import memcache
except ImportError:
try:
import memcache
except ImportError:
memcache = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', b'Awesome')
self.assert_equal(c.get('foo'), b'Awesome')
c._client.set(c.key_prefix + 'foo', b'42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
self.assert_equal(c.inc('foo'), 2)
self.assert_equal(c.dec('foo'), 1)
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
class MemcachedCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.MemcachedCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + b'foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_set(self):
c = self.make_cache()
c.set('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
def test_get_many(self):
c = self.make_cache()
c.set('foo', 'bar')
c.set('spam', 'eggs')
self.assert_equal(c.get_many('foo', 'spam'), ['bar', 'eggs'])
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': 'eggs'})
self.assert_equal(c.get('foo'), 'bar')
self.assert_equal(c.get('spam'), 'eggs')
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
self.assert_is_none(c.get('foo'))
def test_add(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.add('foo', 'baz')
self.assert_equal(c.get('foo'), 'bar')
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
self.assert_equal(c.get('foo'), 'bar')
c.delete('foo')
self.assert_is_none(c.get('foo'))
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
self.assert_is_none(c.get('foo'))
self.assert_is_none(c.get('spam'))
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
# XXX: Is this an intended difference?
c.inc('foo')
self.assert_equal(c.get('foo'), 2)
c.dec('foo')
self.assert_equal(c.get('foo'), 1)
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
self.assert_equal(c.get('foo'), True)
c.set('bar', False)
self.assert_equal(c.get('bar'), False)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
if memcache is not None:
suite.addTest(unittest.makeSuite(MemcachedCacheTestCase))
return suite
| agpl-3.0 |
oandrew/home-assistant | homeassistant/components/notify/llamalab_automate.py | 11 | 1880 | """
LlamaLab Automate notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.llamalab_automate/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (BaseNotificationService,
PLATFORM_SCHEMA)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_TO = 'to'
CONF_DEVICE = 'device'
_RESOURCE = 'https://llamalab.com/automate/cloud/message'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
})
def get_service(hass, config):
"""Get the LlamaLab Automate notification service."""
secret = config.get(CONF_API_KEY)
recipient = config.get(CONF_TO)
device = config.get(CONF_DEVICE)
return AutomateNotificationService(secret, recipient, device)
class AutomateNotificationService(BaseNotificationService):
"""Implement the notification service for LlamaLab Automate."""
def __init__(self, secret, recipient, device=None):
"""Initialize the service."""
self._secret = secret
self._recipient = recipient
self._device = device
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
_LOGGER.debug("Sending to: %s, %s", self._recipient, str(self._device))
data = {
"secret": self._secret,
"to": self._recipient,
"device": self._device,
"payload": message,
}
response = requests.post(_RESOURCE, json=data)
if response.status_code != 200:
_LOGGER.error("Error sending message: " + str(response))
| mit |
elastic/elasticsearch-py | test_elasticsearch/test_types/async_types.py | 1 | 3095 | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Any, AsyncGenerator, Dict
from elasticsearch import (
AIOHttpConnection,
AsyncElasticsearch,
AsyncTransport,
ConnectionPool,
)
from elasticsearch.helpers import (
async_bulk,
async_reindex,
async_scan,
async_streaming_bulk,
)
es = AsyncElasticsearch(
[{"host": "localhost", "port": 9443}],
transport_class=AsyncTransport,
)
t = AsyncTransport(
[{}],
connection_class=AIOHttpConnection,
connection_pool_class=ConnectionPool,
sniff_on_start=True,
sniffer_timeout=0.1,
sniff_timeout=1,
sniff_on_connection_fail=False,
max_retries=1,
retry_on_status={100, 400, 503},
retry_on_timeout=True,
send_get_body_as="source",
)
async def async_gen() -> AsyncGenerator[Dict[Any, Any], None]:
yield {}
async def async_scan_types() -> None:
async for _ in async_scan(
es,
query={"query": {"match_all": {}}},
request_timeout=10,
clear_scroll=True,
scroll_kwargs={"request_timeout": 10},
):
pass
async for _ in async_scan(
es,
raise_on_error=False,
preserve_order=False,
scroll="10m",
size=10,
request_timeout=10.0,
):
pass
async def async_streaming_bulk_types() -> None:
async for _ in async_streaming_bulk(es, async_gen()):
pass
async for _ in async_streaming_bulk(es, async_gen().__aiter__()):
pass
async for _ in async_streaming_bulk(es, [{}]):
pass
async for _ in async_streaming_bulk(es, ({},)):
pass
async def async_bulk_types() -> None:
_, _ = await async_bulk(es, async_gen())
_, _ = await async_bulk(es, async_gen().__aiter__())
_, _ = await async_bulk(es, [{}])
_, _ = await async_bulk(es, ({},))
async def async_reindex_types() -> None:
_, _ = await async_reindex(
es, "src-index", "target-index", query={"query": {"match": {"key": "val"}}}
)
_, _ = await async_reindex(
es, source_index="src-index", target_index="target-index", target_client=es
)
_, _ = await async_reindex(
es,
"src-index",
"target-index",
chunk_size=1,
scroll="10m",
scan_kwargs={"request_timeout": 10},
bulk_kwargs={"request_timeout": 10},
)
| apache-2.0 |
gemagomez/keepnote | keepnote/gui/main_window.py | 1 | 52638 | """
KeepNote
Graphical User Interface for KeepNote Application
"""
#
# KeepNote
# Copyright (c) 2008-2009 Matt Rasmussen
# Author: Matt Rasmussen <rasmus@mit.edu>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
#
# python imports
import mimetypes
import os
import shutil
import subprocess
import sys
import time
import thread
import threading
import uuid
# pygtk imports
import pygtk
pygtk.require('2.0')
import gtk
import gobject
# keepnote imports
import keepnote
from keepnote import \
KeepNoteError, \
ensure_unicode, \
unicode_gtk, \
FS_ENCODING
from keepnote.notebook import \
NoteBookError, \
NoteBookVersionError
from keepnote import notebook as notebooklib
from keepnote import tasklib
from keepnote.gui import \
get_resource, \
get_resource_image, \
get_resource_pixbuf, \
Action, \
ToggleAction, \
add_actions, \
CONTEXT_MENU_ACCEL_PATH, \
FileChooserDialog, \
init_key_shortcuts, \
UIManager
from keepnote.gui.icons import \
lookup_icon_filename
from keepnote.gui import richtext
from keepnote.gui import \
dialog_image_new, \
dialog_drag_drop_test, \
dialog_wait, \
update_file_preview
from keepnote.gui.icon_menu import IconMenu
from keepnote.gui.three_pane_viewer import ThreePaneViewer
from keepnote.gui.tabbed_viewer import TabbedViewer
_ = keepnote.translate
CLIPBOARD_NAME = "CLIPBOARD"
class KeepNoteWindow (gtk.Window):
"""Main windows for KeepNote"""
def __init__(self, app, winid=None):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
self._app = app # application object
self._winid = winid if winid else unicode(uuid.uuid4())
self._viewers = []
# window state
self._maximized = False # True if window is maximized
self._was_maximized = False # True if iconified and was maximized
self._iconified = False # True if window is minimized
self._tray_icon = None # True if tray icon is present
self._recent_notebooks = []
self._uimanager = UIManager()
self._accel_group = self._uimanager.get_accel_group()
self.add_accel_group(self._accel_group)
init_key_shortcuts()
self.init_layout()
self.setup_systray()
# load preferences for the first time
self.load_preferences(True)
def get_id(self):
return self._winid
def init_layout(self):
# init main window
self.set_title(keepnote.PROGRAM_NAME)
self.set_default_size(*keepnote.DEFAULT_WINDOW_SIZE)
self.set_icon_list(get_resource_pixbuf("keepnote-16x16.png"),
get_resource_pixbuf("keepnote-32x32.png"),
get_resource_pixbuf("keepnote-64x64.png"))
# main window signals
self.connect("error", lambda w,m,e,t: self.error(m,e,t))
self.connect("delete-event", lambda w,e: self._on_close())
self.connect("window-state-event", self._on_window_state)
self.connect("size-allocate", self._on_window_size)
#self._app.pref.changed.add(self._on_app_options_changed)
#====================================
# Dialogs
self.drag_test = dialog_drag_drop_test.DragDropTestDialog(self)
self.viewer = self.new_viewer()
#====================================
# Layout
# vertical box
main_vbox = gtk.VBox(False, 0)
self.add(main_vbox)
# menu bar
main_vbox.set_border_width(0)
self.menubar = self.make_menubar()
main_vbox.pack_start(self.menubar, False, True, 0)
# toolbar
main_vbox.pack_start(self.make_toolbar(), False, True, 0)
main_vbox2 = gtk.VBox(False, 0)
main_vbox2.set_border_width(1)
main_vbox.pack_start(main_vbox2, True, True, 0)
# viewer
self.viewer_box = gtk.VBox(False, 0)
main_vbox2.pack_start(self.viewer_box, True, True, 0)
# status bar
status_hbox = gtk.HBox(False, 0)
main_vbox.pack_start(status_hbox, False, True, 0)
# message bar
self.status_bar = gtk.Statusbar()
status_hbox.pack_start(self.status_bar, False, True, 0)
self.status_bar.set_property("has-resize-grip", False)
self.status_bar.set_size_request(300, -1)
# stats bar
self.stats_bar = gtk.Statusbar()
status_hbox.pack_start(self.stats_bar, True, True, 0)
#====================================================
# viewer
self.viewer_box.pack_start(self.viewer, True, True, 0)
# add viewer menus
self.viewer.add_ui(self)
def setup_systray(self):
"""Setup systray for window"""
# system tray icon
if gtk.gtk_version > (2, 10):
if not self._tray_icon:
self._tray_icon = gtk.StatusIcon()
self._tray_icon.set_from_pixbuf(
get_resource_pixbuf("keepnote-32x32.png"))
self._tray_icon.set_tooltip(keepnote.PROGRAM_NAME)
self._statusicon_menu = self.make_statusicon_menu()
self._tray_icon.connect("activate", self._on_tray_icon_activate)
self._tray_icon.connect('popup-menu',
self._on_systray_popup_menu)
self._tray_icon.set_property(
"visible", self._app.pref.get("window", "use_systray",
default=True))
else:
self._tray_icon = None
def _on_systray_popup_menu(self, status, button, time):
self._statusicon_menu.popup(None, None, None, button, time)
#==============================================
# viewers
def new_viewer(self):
"""Creates a new viewer for this window"""
#viewer = ThreePaneViewer(self._app, self)
viewer = TabbedViewer(self._app, self)
viewer.connect("error", lambda w,m,e: self.error(m, e, None))
viewer.connect("status", lambda w,m,b: self.set_status(m, b))
viewer.connect("window-request", self._on_window_request)
viewer.connect("current-node", self._on_current_node)
viewer.connect("modified", self._on_viewer_modified)
return viewer
def add_viewer(self, viewer):
"""Adds a viewer to the window"""
self._viewers.append(viewer)
def remove_viewer(self, viewer):
"""Removes a viewer from the window"""
self._viewers.remove(viewer)
def get_all_viewers(self):
"""Returns list of all viewers associated with window"""
return self._viewers
def get_all_notebooks(self):
"""Returns all notebooks loaded by all viewers"""
return set(filter(lambda n: n is not None,
(v.get_notebook() for v in self._viewers)))
#===============================================
# accessors
def get_app(self):
"""Returns application object"""
return self._app
def get_uimanager(self):
"""Returns the UIManager for the window"""
return self._uimanager
def get_viewer(self):
"""Returns window's viewer"""
return self.viewer
def get_accel_group(self):
"""Returns the accel group for the window"""
return self._accel_group
def get_notebook(self):
"""Returns the currently loaded notebook"""
return self.viewer.get_notebook()
def get_current_page(self):
"""Returns the currently selected page"""
return self.viewer.get_current_page()
#=========================================================
# main window gui callbacks
def _on_window_state(self, window, event):
"""Callback for window state"""
iconified = self._iconified
# keep track of maximized and minimized state
self._iconified = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_ICONIFIED)
# detect recent iconification
if not iconified and self._iconified:
# save maximized state before iconification
self._was_maximized = self._maximized
self._maximized = bool(event.new_window_state &
gtk.gdk.WINDOW_STATE_MAXIMIZED)
# detect recent de-iconification
if iconified and not self._iconified:
# explicitly maximize if not maximized
# NOTE: this is needed to work around a MS windows GTK bug
if self._was_maximized:
gobject.idle_add(self.maximize)
def _on_window_size(self, window, event):
"""Callback for resize events"""
# record window size if it is not maximized or minimized
if not self._maximized and not self._iconified:
self._app.pref.get("window")["window_size"] = self.get_size()
#def _on_app_options_changed(self):
# self.load_preferences()
def _on_tray_icon_activate(self, icon):
"""Try icon has been clicked in system tray"""
if self.is_active():
self.minimize_window()
else:
self.restore_window()
#=============================================================
# viewer callbacks
def _on_window_request(self, viewer, action):
"""Callback for requesting an action from the main window"""
if action == "minimize":
self.minimize_window()
elif action == "restore":
self.restore_window()
else:
raise Exception("unknown window request: " + str(action))
#=================================================
# Window manipulation
def minimize_window(self):
"""Minimize the window (block until window is minimized"""
if self._iconified:
return
# TODO: add timer in case minimize fails
def on_window_state(window, event):
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
gtk.main_quit()
sig = self.connect("window-state-event", on_window_state)
self.iconify()
gtk.main()
self.disconnect(sig)
def restore_window(self):
"""Restore the window from minimization"""
self.deiconify()
self.present()
def on_new_window(self):
"""Open a new window"""
win = self._app.new_window()
notebook = self.get_notebook()
if notebook:
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
#==============================================
# Application preferences
def load_preferences(self, first_open=False):
"""Load preferences"""
p = self._app.pref
# notebook
window_size = p.get("window", "window_size",
default=keepnote.DEFAULT_WINDOW_SIZE)
window_maximized = p.get("window", "window_maximized", default=True)
self.setup_systray()
use_systray = p.get("window", "use_systray", default=True)
# window config for first open
if first_open:
self.resize(*window_size)
if window_maximized:
self.maximize()
minimize = p.get("window", "minimize_on_start", default=False)
if use_systray and minimize:
self.iconify()
# config window
skip = p.get("window", "skip_taskbar", default=False)
if use_systray:
self.set_property("skip-taskbar-hint", skip)
self.set_keep_above(p.get("window", "keep_above", default=False))
if p.get("window", "stick", default=False):
self.stick()
else:
self.unstick()
# other window wide properties
self._recent_notebooks = p.get("recent_notebooks", default=[])
self.set_recent_notebooks_menu(self._recent_notebooks)
self._uimanager.set_force_stock(
p.get("look_and_feel", "use_stock_icons", default=False))
self.viewer.load_preferences(self._app.pref, first_open)
def save_preferences(self):
"""Save preferences"""
p = self._app.pref
# save window preferences
p.set("window", "window_maximized", self._maximized)
p.set("recent_notebooks", self._recent_notebooks)
# let viewer save preferences
self.viewer.save_preferences(self._app.pref)
def set_recent_notebooks_menu(self, recent_notebooks):
"""Set the recent notebooks in the file menu"""
menu = self._uimanager.get_widget("/main_menu_bar/File/Open Recent Notebook")
# init menu
if menu.get_submenu() is None:
submenu = gtk.Menu()
submenu.show()
menu.set_submenu(submenu)
menu = menu.get_submenu()
# clear menu
menu.foreach(lambda x: menu.remove(x))
def make_filename(filename, maxsize=30):
if len(filename) > maxsize:
base = os.path.basename(filename)
pre = max(maxsize - len(base), 10)
return os.path.join(filename[:pre] + u"...", base)
else:
return filename
def make_func(filename):
return lambda w: self.open_notebook(filename)
# populate menu
for i, notebook in enumerate(recent_notebooks):
item = gtk.MenuItem(u"%d. %s" % (i+1, make_filename(notebook)))
item.connect("activate", make_func(notebook))
item.show()
menu.append(item)
def add_recent_notebook(self, filename):
"""Add recent notebook"""
if filename in self._recent_notebooks:
self._recent_notebooks.remove(filename)
self._recent_notebooks = [filename] + \
self._recent_notebooks[:keepnote.gui.MAX_RECENT_NOTEBOOKS]
self.set_recent_notebooks_menu(self._recent_notebooks)
#=============================================
# Notebook open/save/close UI
def on_new_notebook(self):
"""Launches New NoteBook dialog"""
dialog = FileChooserDialog(
_("New Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("New"), gtk.RESPONSE_OK),
app=self._app,
persistent_path="new_notebook_path")
response = dialog.run()
if response == gtk.RESPONSE_OK:
# create new notebook
if dialog.get_filename():
self.new_notebook(unicode_gtk(dialog.get_filename()))
dialog.destroy()
def on_open_notebook(self):
"""Launches Open NoteBook dialog"""
dialog = gtk.FileChooserDialog(
_("Open Notebook"), self,
action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
buttons=(_("Cancel"), gtk.RESPONSE_CANCEL,
_("Open"), gtk.RESPONSE_OK))
def on_folder_changed(filechooser):
folder = unicode_gtk(filechooser.get_current_folder())
if os.path.exists(os.path.join(folder, notebooklib.PREF_FILE)):
filechooser.response(gtk.RESPONSE_OK)
dialog.connect("current-folder-changed", on_folder_changed)
path = self._app.get_default_path("new_notebook_path")
if os.path.exists(path):
dialog.set_current_folder(path)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*.nbk")
file_filter.set_name(_("Notebook (*.nbk)"))
dialog.add_filter(file_filter)
file_filter = gtk.FileFilter()
file_filter.add_pattern("*")
file_filter.set_name(_("All files (*.*)"))
dialog.add_filter(file_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
path = dialog.get_current_folder()
if path:
self._app.pref.set("default_paths", "new_notebook_path",
os.path.dirname(path))
notebook_file = unicode_gtk(dialog.get_filename())
if notebook_file:
self.open_notebook(notebook_file)
dialog.destroy()
def _on_close(self):
"""Callback for window close"""
try:
# TODO: decide if a clipboard action is needed before
# closing down.
#clipboard = self.get_clipboard(selection=CLIPBOARD_NAME)
#clipboard.set_can_store(None)
#clipboard.store()
self._app.save()
self.close_notebook()
if self._tray_icon:
# turn off try icon
self._tray_icon.set_property("visible", False)
except Exception, e:
self.error("Error while closing", e, sys.exc_info()[2])
return False
def close(self):
"""Close the window"""
self._on_close()
self.emit("delete-event", None)
self.destroy()
def on_quit(self):
"""Quit the application"""
self._app.save()
self._app.quit()
#===============================================
# Notebook actions
def save_notebook(self, silent=False):
"""Saves the current notebook"""
try:
# save window information for all notebooks associated with this
# window
for notebook in self.get_all_notebooks():
p = notebook.pref.get("windows", "ids", define=True)
p[self._winid] = {
"viewer_type": self.viewer.get_name(),
"viewerid": self.viewer.get_id()}
# let the viewer save its information
self.viewer.save()
self.set_status(_("Notebook saved"))
except Exception, e:
if not silent:
self.error(_("Could not save notebook."), e, sys.exc_info()[2])
self.set_status(_("Error saving notebook"))
return
def reload_notebook(self):
"""Reload the current NoteBook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.error(_("Reloading only works when a notebook is open."))
return
filename = notebook.get_path()
self._app.close_all_notebook(notebook, False)
self.open_notebook(filename)
self.set_status(_("Notebook reloaded"))
def new_notebook(self, filename):
"""Creates and opens a new NoteBook"""
if self.viewer.get_notebook() is not None:
self.close_notebook()
try:
# make sure filename is unicode
filename = ensure_unicode(filename, FS_ENCODING)
notebook = notebooklib.NoteBook(filename)
notebook.create()
notebook.close()
self.set_status(_("Created '%s'") % notebook.get_title())
except NoteBookError, e:
self.error(_("Could not create new notebook."), e, sys.exc_info()[2])
self.set_status("")
return None
return self.open_notebook(filename, new=True)
def _load_notebook(self, filename):
"""Loads notebook in background with progress bar"""
notebook = self._app.get_notebook(filename, self)
if notebook is None:
return None
# check for indexing
# TODO: is this the best place for checking?
# There is a difference between normal incremental indexing
# and indexing due version updating.
# incremental updating (checking a few files that have changed on
# disk) should be done within notebook.load().
# Whole notebook re-indexing, triggered by version upgrade
# should be done separately, and with a different wait dialog
# clearly indicating that notebook loading is going to take
# longer than usual.
if notebook.index_needed():
self.update_index(notebook)
return notebook
def _restore_windows(self, notebook, open_here=True):
"""
Restore multiple windows for notebook
open_here -- if True, will open notebook in this window
Cases:
1. if notebook has no saved windows, just open notebook in this window
2. if notebook has 1 saved window
if open_here:
open it in this window
else:
if this window has no opened notebooks,
reassign its ids to the notebook and open it here
else
reassign notebooks saved ids to this window and viewer
3. if notebook has >1 saved windows, open them in their own windows
if this window has no notebook, reassign its id to one of the
saved ids.
"""
# init window lookup
win_lookup = dict((w.get_id(), w) for w in
self._app.get_windows())
def open_in_window(winid, viewerid, notebook):
win = win_lookup.get(winid, None)
if win is None:
# open new window
win = self._app.new_window()
win_lookup[winid] = win
win._winid = winid
if viewerid:
win.get_viewer().set_id(viewerid)
# set notebook
self._app.ref_notebook(notebook)
win.set_notebook(notebook)
# find out how many windows this notebook had last time
# init viewer if needed
windows = notebook.pref.get("windows", "ids", define=True)
notebook.pref.get("viewers", "ids", define=True)
if len(windows) == 0:
# no presistence info found, just open notebook in this window
self.set_notebook(notebook)
elif len(windows) == 1:
# restore a single window
winid, winpref = windows.items()[0]
viewerid = winpref.get("viewerid", None)
if viewerid is not None:
if len(self.get_all_notebooks()) == 0:
# no notebooks are open, so it is ok to reassign
# the viewer's id to match the notebook pref
self._winid = winid
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
elif open_here:
# TODO: needs more testing
# notebooks are open, so reassign the notebook's pref to
# match the existing viewer
notebook.pref.set("windows", "ids",
{self._winid:
{"viewerid": self.viewer.get_id(),
"viewer_type": self.viewer.get_name()}})
notebook.pref.set(
"viewers", "ids", self.viewer.get_id(),
notebook.pref.get("viewers", "ids", viewerid,
define=True))
del notebook.pref.get("viewers", "ids")[viewerid]
self.set_notebook(notebook)
else:
# open in whatever window the notebook wants
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
elif len(windows) > 1:
# get different kinds of window ids
restoring_ids = set(windows.keys())
new_ids = restoring_ids - set(win_lookup.keys())
if len(self.get_all_notebooks()) == 0:
# special case: if no notebooks opened, then make sure
# to reuse this window
if self._winid not in restoring_ids:
self._winid = iter(restoring_ids).next()
restoring_ids.remove(self._winid)
viewerid = windows[self._winid].get("viewerid", None)
if viewerid:
self.viewer.set_id(viewerid)
self.set_notebook(notebook)
# restore remaining windows
while len(restoring_ids) > 0:
winid = restoring_ids.pop()
viewerid = windows[winid].get("viewerid", None)
open_in_window(winid, viewerid, notebook)
self._app.unref_notebook(notebook)
def open_notebook(self, filename, new=False, open_here=True):
"""Opens a new notebook"""
try:
filename = notebooklib.normalize_notebook_dirname(
filename, longpath=False)
except Exception, e:
self.error(_("Could note find notebook '%s'.") % filename, e,
sys.exc_info()[2])
notebook = None
else:
notebook = self._load_notebook(filename)
if notebook is None:
return
# setup notebook
self._restore_windows(notebook, open_here=open_here)
if not new:
self.set_status(_("Loaded '%s'") % notebook.get_title())
self.update_title()
# save notebook to recent notebooks
self.add_recent_notebook(filename)
return notebook
def close_notebook(self, notebook=None):
"""Close the NoteBook"""
if notebook is None:
notebook = self.get_notebook()
self.viewer.close_notebook(notebook)
self.set_status(_("Notebook closed"))
def _on_close_notebook(self, notebook):
"""Callback when notebook is closing"""
pass
def set_notebook(self, notebook):
"""Set the NoteBook for the window"""
self.viewer.set_notebook(notebook)
def update_index(self, notebook=None, clear=False):
"""Update notebook index"""
if notebook is None:
notebook = self.viewer.get_notebook()
if notebook is None:
return
def update(task):
# erase database first
# NOTE: I do this right now so that corrupt databases can be
# cleared out of the way.
if clear:
notebook.clear_index()
try:
for node in notebook.index_all():
# terminate if search is canceled
if task.aborted():
break
except Exception, e:
self.error(_("Error during index"), e, sys.exc_info()[2])
task.finish()
# launch task
self.wait_dialog(_("Indexing notebook"), _("Indexing..."),
tasklib.Task(update))
#=====================================================
# viewer callbacks
def update_title(self, node=None):
"""Set the modification state of the notebook"""
notebook = self.viewer.get_notebook()
if notebook is None:
self.set_title(keepnote.PROGRAM_NAME)
else:
title = notebook.get_attr("title", u"")
if node is None:
node = self.get_current_page()
if node is not None:
title += u": " + node.get_attr("title", "")
modified = notebook.save_needed()
if modified:
self.set_title(u"* %s" % title)
self.set_status(_("Notebook modified"))
else:
self.set_title(title)
def _on_current_node(self, viewer, node):
"""Callback for when viewer changes the current node"""
self.update_title(node)
def _on_viewer_modified(self, viewer, modified):
"""Callback for when viewer has a modified notebook"""
self.update_title()
#===========================================================
# page and folder actions
def get_selected_nodes(self):
"""
Returns list of selected nodes
"""
return self.viewer.get_selected_nodes()
def confirm_delete_nodes(self, nodes):
"""Confirm whether nodes should be deleted"""
# TODO: move to app?
# TODO: add note names to dialog
# TODO: assume one node is selected
# could make this a stand alone function/dialog box
for node in nodes:
if node.get_attr("content_type") == notebooklib.CONTENT_TYPE_TRASH:
self.error(_("The Trash folder cannot be deleted."), None)
return False
if node.get_parent() == None:
self.error(_("The top-level folder cannot be deleted."), None)
return False
if len(nodes) > 1 or len(nodes[0].get_children()) > 0:
message = _("Do you want to delete this note and all of its children?")
else:
message = _("Do you want to delete this note?")
return self._app.ask_yes_no(message, _("Delete Note"),
parent=self.get_toplevel())
def on_empty_trash(self):
"""Empty Trash folder in NoteBook"""
if self.get_notebook() is None:
return
try:
self.get_notebook().empty_trash()
except NoteBookError, e:
self.error(_("Could not empty trash."), e, sys.exc_info()[2])
#=================================================
# action callbacks
def on_view_node_external_app(self, app, node=None, kind=None):
"""View a node with an external app"""
self._app.save()
# determine node to view
if node is None:
nodes = self.get_selected_nodes()
if len(nodes) == 0:
self.emit("error", _("No notes are selected."), None, None)
return
node = nodes[0]
try:
self._app.run_external_app_node(app, node, kind)
except KeepNoteError, e:
self.emit("error", e.msg, e, sys.exc_info()[2])
#=====================================================
# Cut/copy/paste
# forward cut/copy/paste to the correct widget
def on_cut(self):
"""Cut callback"""
widget = self.get_focus()
if gobject.signal_lookup("cut-clipboard", widget) != 0:
widget.emit("cut-clipboard")
def on_copy(self):
"""Copy callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-clipboard", widget) != 0:
widget.emit("copy-clipboard")
def on_copy_tree(self):
"""Copy tree callback"""
widget = self.get_focus()
if gobject.signal_lookup("copy-tree-clipboard", widget) != 0:
widget.emit("copy-tree-clipboard")
def on_paste(self):
"""Paste callback"""
widget = self.get_focus()
if gobject.signal_lookup("paste-clipboard", widget) != 0:
widget.emit("paste-clipboard")
def on_undo(self):
"""Undo callback"""
self.viewer.undo()
def on_redo(self):
"""Redo callback"""
self.viewer.redo()
#===================================================
# Misc.
def view_error_log(self):
"""View error in text editor"""
# windows locks open files
# therefore we should copy error log before viewing it
try:
filename = os.path.realpath(keepnote.get_user_error_log())
filename2 = filename + u".bak"
shutil.copy(filename, filename2)
# use text editor to view error log
self._app.run_external_app("text_editor", filename2)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
def view_config_files(self):
"""View config folder in a file explorer"""
try:
# use text editor to view error log
filename = keepnote.get_user_pref_dir()
self._app.run_external_app("file_explorer", filename)
except Exception, e:
self.error(_("Could not open error log") + ":\n" + str(e),
e, sys.exc_info()[2])
#==================================================
# Help/about dialog
def on_about(self):
"""Display about dialog"""
def func(dialog, link, data):
try:
self._app.open_webpage(link)
except KeepNoteError, e:
self.error(e.msg, e, sys.exc_info()[2])
gtk.about_dialog_set_url_hook(func, None)
about = gtk.AboutDialog()
about.set_name(keepnote.PROGRAM_NAME)
about.set_version(keepnote.PROGRAM_VERSION_TEXT)
about.set_copyright(keepnote.COPYRIGHT)
about.set_logo(get_resource_pixbuf("keepnote-icon.png"))
about.set_website(keepnote.WEBSITE)
about.set_license(keepnote.LICENSE_NAME)
about.set_translator_credits(keepnote.TRANSLATOR_CREDITS)
license_file = keepnote.get_resource(u"rc", u"COPYING")
if os.path.exists(license_file):
about.set_license(open(license_file).read())
#about.set_authors(["Matt Rasmussen <rasmus@mit.edu>"])
about.set_transient_for(self)
about.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
about.connect("response", lambda d,r: about.destroy())
about.show()
#===========================================
# Messages, warnings, errors UI/dialogs
def set_status(self, text, bar="status"):
"""Sets a status message in the status bar"""
if bar == "status":
self.status_bar.pop(0)
self.status_bar.push(0, text)
elif bar == "stats":
self.stats_bar.pop(0)
self.stats_bar.push(0, text)
else:
raise Exception("unknown bar '%s'" % bar)
def error(self, text, error=None, tracebk=None):
"""Display an error message"""
self._app.error(text, error, tracebk)
def wait_dialog(self, title, text, task, cancel=True):
"""Display a wait dialog"""
# NOTE: pause autosave while performing long action
self._app.pause_auto_save(True)
dialog = dialog_wait.WaitDialog(self)
dialog.show(title, text, task, cancel=cancel)
self._app.pause_auto_save(False)
#================================================
# Menus
def get_actions(self):
actions = map(lambda x: Action(*x),
[
("File", None, _("_File")),
("New Notebook", gtk.STOCK_NEW, _("_New Notebook..."),
"", _("Start a new notebook"),
lambda w: self.on_new_notebook()),
("Open Notebook", gtk.STOCK_OPEN, _("_Open Notebook..."),
"<control>O", _("Open an existing notebook"),
lambda w: self.on_open_notebook()),
("Open Recent Notebook", gtk.STOCK_OPEN,
_("Open Re_cent Notebook")),
("Reload Notebook", gtk.STOCK_REVERT_TO_SAVED,
_("_Reload Notebook"),
"", _("Reload the current notebook"),
lambda w: self.reload_notebook()),
("Save Notebook", gtk.STOCK_SAVE, _("_Save Notebook"),
"<control>S", _("Save the current notebook"),
lambda w: self._app.save()),
("Close Notebook", gtk.STOCK_CLOSE, _("_Close Notebook"),
"", _("Close the current notebook"),
lambda w: self._app.close_all_notebook(self.get_notebook())),
("Export", None, _("_Export Notebook")),
("Import", None, _("_Import Notebook")),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.on_quit()),
#=======================================
("Edit", None, _("_Edit")),
("Undo", gtk.STOCK_UNDO, None,
"<control>Z", None,
lambda w: self.on_undo()),
("Redo", gtk.STOCK_REDO, None,
"<control><shift>Z", None,
lambda w: self.on_redo()),
("Cut", gtk.STOCK_CUT, None,
"<control>X", None,
lambda w: self.on_cut()),
("Copy", gtk.STOCK_COPY, None,
"<control>C", None,
lambda w: self.on_copy()),
("Copy Tree", gtk.STOCK_COPY, None,
"<control><shift>C", None,
lambda w: self.on_copy_tree()),
("Paste", gtk.STOCK_PASTE, None,
"<control>V", None,
lambda w: self.on_paste()),
("Empty Trash", gtk.STOCK_DELETE, _("Empty _Trash"),
"", None,
lambda w: self.on_empty_trash()),
#========================================
("Search", None, _("_Search")),
("Search All Notes", gtk.STOCK_FIND, _("_Search All Notes"),
"<control>K", None,
lambda w: self.search_box.grab_focus()),
#=======================================
("Go", None, _("_Go")),
#========================================
("View", None, _("_View")),
("View Note in File Explorer", gtk.STOCK_OPEN,
_("View Note in File Explorer"),
"", None,
lambda w: self.on_view_node_external_app("file_explorer",
kind="dir")),
("View Note in Text Editor", gtk.STOCK_OPEN,
_("View Note in Text Editor"),
"", None,
lambda w: self.on_view_node_external_app("text_editor",
kind="page")),
("View Note in Web Browser", gtk.STOCK_OPEN,
_("View Note in Web Browser"),
"", None,
lambda w: self.on_view_node_external_app("web_browser",
kind="page")),
("Open File", gtk.STOCK_OPEN,
_("_Open File"),
"", None,
lambda w: self.on_view_node_external_app("file_launcher",
kind="file")),
#=========================================
("Tools", None, _("_Tools")),
("Update Notebook Index", None, _("_Update Notebook Index"),
"", None,
lambda w: self.update_index(clear=True)),
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
#=========================================
("Window", None, _("Window")),
("New Window", None, _("New Window"),
"", _("Open a new window"),
lambda w: self.on_new_window()),
("Close Window", None, _("Close Window"),
"", _("Close window"),
lambda w: self.close()),
#=========================================
("Help", None, _("_Help")),
("View Error Log...", gtk.STOCK_DIALOG_ERROR, _("View _Error Log..."),
"", None,
lambda w: self.view_error_log()),
("View Preference Files...", None, _("View Preference Files..."), "", None,
lambda w: self.view_config_files()),
("Drag and Drop Test...", None, _("Drag and Drop Test..."),
"", None,
lambda w: self.drag_test.on_drag_and_drop_test()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
]) + [
Action("Main Spacer Tool"),
Action("Search Box Tool", None, None, "", _("Search All Notes")),
Action("Search Button Tool", gtk.STOCK_FIND, None, "",
_("Search All Notes"),
lambda w: self.search_box.on_search_nodes())]
# make sure recent notebooks is always visible
recent = [x for x in actions
if x.get_property("name") == "Open Recent Notebook"][0]
recent.set_property("is-important", True)
return actions
def setup_menus(self, uimanager):
pass
def get_ui(self):
return ["""
<ui>
<!-- main window menu bar -->
<menubar name="main_menu_bar">
<menu action="File">
<menuitem action="New Notebook"/>
<placeholder name="Viewer"/>
<placeholder name="New"/>
<separator/>
<menuitem action="Open Notebook"/>
<menuitem action="Open Recent Notebook"/>
<menuitem action="Save Notebook"/>
<menuitem action="Close Notebook"/>
<menuitem action="Reload Notebook"/>
<menuitem action="Empty Trash"/>
<separator/>
<menu action="Export" />
<menu action="Import" />
<separator/>
<placeholder name="Extensions"/>
<separator/>
<menuitem action="Quit"/>
</menu>
<menu action="Edit">
<menuitem action="Undo"/>
<menuitem action="Redo"/>
<separator/>
<menuitem action="Cut"/>
<menuitem action="Copy"/>
<menuitem action="Copy Tree"/>
<menuitem action="Paste"/>
<separator/>
<placeholder name="Viewer"/>
<separator/>
<menuitem action="KeepNote Preferences"/>
</menu>
<menu action="Search">
<menuitem action="Search All Notes"/>
<placeholder name="Viewer"/>
</menu>
<placeholder name="Viewer"/>
<menu action="Go">
<placeholder name="Viewer"/>
</menu>
<menu action="Tools">
<placeholder name="Viewer"/>
<menuitem action="Update Notebook Index"/>
<placeholder name="Extensions"/>
</menu>
<menu action="Window">
<menuitem action="New Window"/>
<menuitem action="Close Window"/>
<placeholder name="Viewer Window"/>
</menu>
<menu action="Help">
<menuitem action="View Error Log..."/>
<menuitem action="View Preference Files..."/>
<menuitem action="Drag and Drop Test..."/>
<separator/>
<menuitem action="About"/>
</menu>
</menubar>
<!-- main window tool bar -->
<toolbar name="main_tool_bar">
<placeholder name="Viewer"/>
<toolitem action="Main Spacer Tool"/>
<toolitem action="Search Box Tool"/>
<toolitem action="Search Button Tool"/>
</toolbar>
<!-- popup menus -->
<menubar name="popup_menus">
</menubar>
</ui>
"""]
def get_actions_statusicon(self):
"""Set actions for StatusIcon menu and return."""
actions = map(lambda x: Action(*x),
[
("KeepNote Preferences", gtk.STOCK_PREFERENCES, _("_Preferences"),
"", None,
lambda w: self._app.app_options_dialog.show(self)),
("Quit", gtk.STOCK_QUIT, _("_Quit"),
"<control>Q", _("Quit KeepNote"),
lambda w: self.close()),
("About", gtk.STOCK_ABOUT, _("_About"),
"", None,
lambda w: self.on_about())
])
return actions
def get_ui_statusicon(self):
"""Create UI xml-definition for StatusIcon menu and return."""
return ["""
<ui>
<!-- statusicon_menu -->
<popup name="statusicon_menu">
<menuitem action="KeepNote Preferences"/>
<menuitem action="About"/>
<separator/>
<menuitem action="Quit"/>
</popup>
</ui>
"""]
def make_menubar(self):
"""Initialize the menu bar"""
#===============================
# ui manager
self._actiongroup = gtk.ActionGroup('MainWindow')
self._uimanager.insert_action_group(self._actiongroup, 0)
# setup menus
add_actions(self._actiongroup, self.get_actions())
for s in self.get_ui():
self._uimanager.add_ui_from_string(s)
self.setup_menus(self._uimanager)
# return menu bar
menubar = self._uimanager.get_widget('/main_menu_bar')
return menubar
def make_toolbar(self):
# configure toolbar
toolbar = self._uimanager.get_widget('/main_tool_bar')
toolbar.set_orientation(gtk.ORIENTATION_HORIZONTAL)
toolbar.set_style(gtk.TOOLBAR_ICONS)
toolbar.set_border_width(0)
try:
# NOTE: if this version of GTK doesn't have this size, then
# ignore it
toolbar.set_property("icon-size", gtk.ICON_SIZE_SMALL_TOOLBAR)
except:
pass
# separator (is there a better way to do this?)
spacer = self._uimanager.get_widget("/main_tool_bar/Main Spacer Tool")
spacer.remove(spacer.child)
spacer.set_expand(True)
# search box
self.search_box = SearchBox(self)
self.search_box.show()
w = self._uimanager.get_widget("/main_tool_bar/Search Box Tool")
w.remove(w.child)
w.add(self.search_box)
return toolbar
def make_statusicon_menu(self):
"""Initialize the StatusIcon menu."""
#===============================
# ui manager
self._actiongroup_statusicon = gtk.ActionGroup('StatusIcon')
self._tray_icon.uimanager = gtk.UIManager()
self._tray_icon.uimanager.insert_action_group(
self._actiongroup_statusicon, 0)
# setup menu
add_actions(self._actiongroup_statusicon,
self.get_actions_statusicon())
for s in self.get_ui_statusicon():
self._tray_icon.uimanager.add_ui_from_string(s)
self.setup_menus(self._tray_icon.uimanager)
# return menu
statusicon_menu = self._tray_icon.uimanager.get_widget(
'/statusicon_menu')
return statusicon_menu
gobject.type_register(KeepNoteWindow)
gobject.signal_new("error", KeepNoteWindow, gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE, (str, object, object))
class SearchBox (gtk.Entry):
def __init__(self, window):
gtk.Entry.__init__(self)
self._window = window
self.connect("changed", self._on_search_box_text_changed)
self.connect("activate", lambda w: self.on_search_nodes())
self.search_box_list = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING)
self.search_box_completion = gtk.EntryCompletion()
self.search_box_completion.connect("match-selected",
self._on_search_box_completion_match)
self.search_box_completion.set_match_func(lambda c, k, i: True)
self.search_box_completion.set_model(self.search_box_list)
self.search_box_completion.set_text_column(0)
self.set_completion(self.search_box_completion)
def on_search_nodes(self):
"""Search nodes"""
# do nothing if notebook is not defined
if not self._window.get_notebook():
return
# TODO: add parsing grammar
# get words
words = [x.lower() for x in
unicode_gtk(self.get_text()).strip().split()]
# clear listview
self._window.get_viewer().start_search_result()
# queue for sending results between threads
from threading import Lock
from Queue import Queue
queue = Queue()
lock = Lock() # a mutex for the notebook (protect sqlite)
# update gui with search result
def search(task):
alldone = Lock() # ensure gui and background sync up at end
alldone.acquire()
def gui_update():
lock.acquire()
more = True
try:
maxstep = 20
for i in xrange(maxstep):
# check if search is aborted
if task.aborted():
more = False
break
# skip if queue is empty
if queue.empty():
break
node = queue.get()
# no more nodes left, finish
if node is None:
more = False
break
# add result to gui
self._window.get_viewer().add_search_result(node)
except Exception, e:
self._window.error(_("Unexpected error"), e)
more = False
finally:
lock.release()
if not more:
alldone.release()
return more
gobject.idle_add(gui_update)
# init search
notebook = self._window.get_notebook()
try:
nodes = (notebook.get_node_by_id(nodeid)
for nodeid in
notebook.search_node_contents(" ".join(words)))
except:
keepnote.log_error()
# do search in thread
try:
lock.acquire()
for node in nodes:
if task.aborted():
break
lock.release()
if node:
queue.put(node)
lock.acquire()
lock.release()
queue.put(None)
except Exception, e:
self.error(_("Unexpected error"), e)
# wait for gui thread to finish
# NOTE: if task is aborted, then gui_update stops itself for
# some reason, thus no need to acquire alldone.
if not task.aborted():
alldone.acquire()
# launch task
task = tasklib.Task(search)
self._window.wait_dialog(
_("Searching notebook"), _("Searching..."), task)
if task.exc_info()[0]:
e, t, tr = task.exc_info()
keepnote.log_error(e, tr)
self._window.get_viewer().end_search_result()
def focus_on_search_box(self):
"""Place cursor in search box"""
self.grab_focus()
def _on_search_box_text_changed(self, url_text):
self.search_box_update_completion()
def search_box_update_completion(self):
if not self._window.get_notebook():
return
text = unicode_gtk(self.get_text())
self.search_box_list.clear()
if len(text) > 0:
results = self._window.get_notebook().search_node_titles(text)[:10]
for nodeid, title in results:
self.search_box_list.append([title, nodeid])
def _on_search_box_completion_match(self, completion, model, iter):
if not self._window.get_notebook():
return
nodeid = model[iter][1]
node = self._window.get_notebook().get_node_by_id(nodeid)
if node:
self._window.get_viewer().goto_node(node, False)
| gpl-2.0 |
shakamunyi/neutron-dvr | neutron/openstack/common/processutils.py | 28 | 9401 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import logging as stdlib_logging
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = "Unexpected error while running command."
if exit_code is None:
exit_code = '-'
message = ("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r"
% (description, cmd, exit_code, stdout, stderr))
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=('Command requested root, but did not specify a root '
'helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, _('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
| apache-2.0 |
shakamunyi/ansible | lib/ansible/runner/lookup_plugins/redis_kv.py | 176 | 2469 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible import utils, errors
import os
HAVE_REDIS=False
try:
import redis # https://github.com/andymccurdy/redis-py/
HAVE_REDIS=True
except ImportError:
pass
import re
# ==============================================================
# REDISGET: Obtain value from a GET on a Redis key. Terms
# expected: 0 = URL, 1 = Key
# URL may be empty, in which case redis://localhost:6379 assumed
# --------------------------------------------------------------
class LookupModule(object):
def __init__(self, basedir=None, **kwargs):
self.basedir = basedir
if HAVE_REDIS == False:
raise errors.AnsibleError("Can't LOOKUP(redis_kv): module redis is not installed")
def run(self, terms, inject=None, **kwargs):
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
ret = []
for term in terms:
(url,key) = term.split(',')
if url == "":
url = 'redis://localhost:6379'
# urlsplit on Python 2.6.1 is broken. Hmm. Probably also the reason
# Redis' from_url() doesn't work here.
p = '(?P<scheme>[^:]+)://?(?P<host>[^:/ ]+).?(?P<port>[0-9]*).*'
try:
m = re.search(p, url)
host = m.group('host')
port = int(m.group('port'))
except AttributeError:
raise errors.AnsibleError("Bad URI in redis lookup")
try:
conn = redis.Redis(host=host, port=port)
res = conn.get(key)
if res is None:
res = ""
ret.append(res)
except:
ret.append("") # connection failed or key not found
return ret
| gpl-3.0 |
capoe/espressopp.soap | src/integrator/Settle.py | 2 | 2534 | # Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
**********************************
**espressopp.integrator.Settle**
**********************************
.. function:: espressopp.integrator.Settle(system, fixedtuplelist, mO, mH, distHH, distOH)
:param system:
:param fixedtuplelist:
:param mO: (default: 16.0)
:param mH: (default: 1.0)
:param distHH: (default: 1.58)
:param distOH: (default: 1.0)
:type system:
:type fixedtuplelist:
:type mO: real
:type mH: real
:type distHH: real
:type distOH: real
.. function:: espressopp.integrator.Settle.addMolecules(moleculelist)
:param moleculelist:
:type moleculelist:
:rtype:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_Settle
class SettleLocal(ExtensionLocal, integrator_Settle):
def __init__(self, system, fixedtuplelist, mO=16.0, mH=1.0, distHH=1.58, distOH=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_Settle, system, fixedtuplelist, mO, mH, distHH, distOH)
def addMolecules(self, moleculelist):
"""
Each processor takes the broadcasted list.
"""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
for pid in moleculelist:
self.cxxclass.add(self, pid)
if pmi.isController:
class Settle(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.SettleLocal',
pmicall = [ "addMolecules" ]
)
| gpl-3.0 |
EUDAT-B2SHARE/invenio-old | modules/bibformat/lib/elements/bfe_pagination.py | 39 | 1082 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints pagination
"""
__revision__ = "$Id$"
def format_element(bfo):
"""
Prints the record pagination
@see: date.py, publisher.py, reprints.py, imprint.py, place.py
"""
pagination = bfo.field('300__a')
return pagination
| gpl-2.0 |
kou/zulip | zerver/lib/url_preview/oembed.py | 7 | 1586 | import json
from typing import Any, Dict, Optional
from pyoembed import PyOembedException, oEmbed
def get_oembed_data(url: str,
maxwidth: int=640,
maxheight: int=480) -> Optional[Dict[str, Any]]:
try:
data = oEmbed(url, maxwidth=maxwidth, maxheight=maxheight)
except (PyOembedException, json.decoder.JSONDecodeError):
return None
oembed_resource_type = data.get('type', '')
image = data.get('url', data.get('image'))
thumbnail = data.get('thumbnail_url')
html = data.pop('html', '')
if oembed_resource_type == 'photo' and image:
return dict(
oembed=True,
image=image,
type=oembed_resource_type,
title=data.get('title'),
description=data.get('description'),
)
if oembed_resource_type == 'video' and html and thumbnail:
return dict(
oembed=True,
image=thumbnail,
type=oembed_resource_type,
html=strip_cdata(html),
title=data.get('title'),
description=data.get('description'),
)
# Otherwise, start with just the embed type.
return dict(
type=oembed_resource_type,
title=data.get('title'),
description=data.get('description'),
)
def strip_cdata(html: str) -> str:
# Work around a bug in SoundCloud's XML generation:
# <html><![CDATA[<iframe ...></iframe>]]></html>
if html.startswith('<![CDATA[') and html.endswith(']]>'):
html = html[9:-3]
return html
| apache-2.0 |
heytcass/homeassistant-config | deps/cherrypy/_cpreqbody.py | 1 | 37427 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short,
:attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of
:class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the
'image' types altogether. If neither the full type nor the major type has a
matching processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
\"""Read application/json data into request.json.\"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the
``processors`` dictionary. All processor functions take a single argument,
the ``Entity`` instance they are to process. It will be called whenever a
request is received (for those URI's where the tool is turned on) which
has a ``Content-Type`` of "application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid),
then reads the remaining bytes on the socket. The ``fp`` object knows its
own length, so it won't hang waiting for data that never arrives. It will
return when all data has been read. Then, we decode those bytes using
Python's built-in ``json`` module, and stick the decoded result onto
``request.json`` . If it cannot be decoded, we raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the
``processors`` dict so that request entities of other ``Content-Types``
aren't parsed at all. Since there's no entry for those invalid MIME
types, the ``default_proc`` method of ``cherrypy.request.body`` is
called. But this does nothing by default (usually to provide the page
handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace
``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``.
Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way,
not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
try:
from urllib import unquote_plus
except ImportError:
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(ntob('+'), ntob(' '))
atoms = bs.split(ntob('%'))
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return ntob('').join(atoms)
import cherrypy
from cherrypy._cpcompat import text_or_bytes, ntob, ntou
from cherrypy.lib import httputil
# ------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(ntob('&')):
for pair in aparam.split(ntob(';')):
if not pair:
continue
atoms = pair.split(ntob('='), 1)
if len(atoms) == 1:
atoms.append(ntob(''))
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ""
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match("^[ -~]{0,200}[!-~]$", ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params.
"""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# -------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process>`.
This uses the ``content_type`` of the Entity to look up a suitable
processor in
:attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`,
a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method
of the Entity is called (which does nothing by default; you can
override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = _cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type
"multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type
# charset
dec = self.content_type.params.get("charset", None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if (
clen is not None and
'chunked' not in headers.get('Transfer-Encoding', '')
):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if (
self.filename.startswith('"') and
self.filename.endswith('"')
):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(
lambda self: self.content_type,
doc="A deprecated alias for "
":attr:`content_type<cherrypy._cpreqbody.Entity.content_type>`."
)
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
value = self.decode_entity(value)
return value
def decode_entity(self , value):
"""Return a given byte encoded value as a string"""
for charset in self.attempt_charsets:
try:
value = value.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return value
else:
raise cherrypy.HTTPError(
400,
"The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets)
)
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store
its data in a file (generated by
:func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi`
module in Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
@classmethod
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
@classmethod
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError("Illegal end of headers.")
if line == ntob('\r\n'):
# Normal end of headers
break
if not line.endswith(ntob('\r\n')):
raise ValueError("MIME requires CRLF terminators: %r" % line)
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(ntob(":"), 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ", ".join((existing, v))
headers[k] = v
return headers
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and that fp is returned.
"""
endmarker = self.boundary + ntob("--")
delim = ntob("")
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1 << 16)
if not line:
raise EOFError("Illegal end of multipart body.")
if line.startswith(ntob("--")) and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(ntob("\r\n")):
delim = ntob("\r\n")
line = line[:-2]
prev_lf = True
elif line.endswith(ntob("\n")):
delim = ntob("\n")
line = line[:-1]
prev_lf = True
else:
delim = ntob("")
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ntob('').join(lines)
return result
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the
``Content-Type``.
"""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, text_or_bytes):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None).
Return fp_out.
"""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
try:
inf = float('inf')
except ValueError:
# Python 2.4 and lower
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = [
'Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow',
'Cache-Control', 'Connection', 'Content-Encoding',
'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate',
'Te', 'Trailer', 'Transfer-Encoding', 'Upgrade',
'Vary', 'Via', 'Warning', 'Www-Authenticate'
]
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE,
has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ntob('')
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like
object that supports the 'write' method; all bytes read will be
written to the fp, and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ntob('')
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ntob('')
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ntob('').join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(ntob('\n')) + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ntob('').join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(ntob(":"), 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ntob(", ").join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See
# https://github.com/cherrypy/cherrypy/issues/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from
the socket.
"""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type
# 'str').
if sys.version_info < (3, 0):
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| mit |
lykops/lykops | lykops/urls.py | 1 | 5817 | """lykops URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
# from django.contrib import admin
from library.connecter.database.mongo import Op_Mongo
from library.connecter.database.redis_api import Op_Redis
# from lykops import settings
from lykops.ansible.execute import Exec
from lykops.ansible.options import Options
from lykops.ansible.report import Report
from lykops.ansible.yaml import Yaml
from lykops.sysadmin.inventory import Inventory
# from lykops.sysadmin.privacy import Privacy
from lykops.sysadmin.user import User
from lykops.views import Login
mongoclient = Op_Mongo()
redisclient = Op_Redis()
urlpatterns = [
# url(r'^admin/', admin.site.urls),
url(r'^$', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='index'),
url(r'^login.html', Login(mongoclient=mongoclient, redisclient=redisclient).login, name='login'),
url(r'^logout.html', Login(mongoclient=mongoclient, redisclient=redisclient).logout, name='logout'),
url(r'^user/create_admin', Login(mongoclient=mongoclient, redisclient=redisclient).create_admin, name='create_admin'),
url(r'^user/detail', User(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^user/list', User(mongoclient=mongoclient, redisclient=redisclient).summary, name='user_list'),
url(r'^user/add', User(mongoclient=mongoclient, redisclient=redisclient).add, name='user_add'),
url(r'^user/edit', User(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^user/chgpwd', User(mongoclient=mongoclient, redisclient=redisclient).change_pwd),
url(r'^user/chgpvltwd', User(mongoclient=mongoclient, redisclient=redisclient).change_vaultpwd),
url(r'^user/del', User(mongoclient=mongoclient, redisclient=redisclient).delete),
url(r'^user/disable', User(mongoclient=mongoclient, redisclient=redisclient).disable),
url(r'^user/enable', User(mongoclient=mongoclient, redisclient=redisclient).enable),
url(r'^user/$', User(mongoclient=mongoclient, redisclient=redisclient).summary),
# url(r'^privacy/edit', Privacy(mongoclient=mongoclient, redisclient=redisclient).edit, name='privacy_edit'),
# url(r'^privacy/detail', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail, name='privacy_detail'),
# url(r'^privacy/$', Privacy(mongoclient=mongoclient, redisclient=redisclient).detail),
# 该功能用于保存用户的机密数据,但该版本暂时不需要使用,故暂时不做展示
url(r'^inventory/add$', Inventory(mongoclient=mongoclient, redisclient=redisclient).add, name='inventory_add'),
url(r'^inventory/list$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary, name='inventory_list'),
url(r'^inventory/$', Inventory(mongoclient=mongoclient, redisclient=redisclient).summary),
url(r'^inventory/detail$', Inventory(mongoclient=mongoclient, redisclient=redisclient).detail, name='inventory_detail'),
url(r'^inventory/edit$', Inventory(mongoclient=mongoclient, redisclient=redisclient).edit, name='inventory_edit'),
url(r'^inventory/del$', Inventory(mongoclient=mongoclient, redisclient=redisclient).delete, name='inventory_del'),
url(r'^ansible/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible'),
url(r'^ansible/report/$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report'),
url(r'^ansible/report/list$', Report(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_report_list'),
url(r'^ansible/report/detail$', Report(mongoclient=mongoclient, redisclient=redisclient).detail),
url(r'^ansible/yaml/add$', Yaml(mongoclient=mongoclient, redisclient=redisclient).add, name='ansible_yaml_add'),
url(r'^ansible/yaml/import$', Yaml(mongoclient=mongoclient, redisclient=redisclient).import_file, name='ansible_yaml_import'),
url(r'^ansible/yaml/list$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml_list'),
url(r'^ansible/yaml/detail$', Yaml(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_yaml_detail'),
url(r'^ansible/yaml/edit$', Yaml(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/yaml/$', Yaml(mongoclient=mongoclient, redisclient=redisclient).summary, name='ansible_yaml'),
url(r'^ansible/exec/adhoc$', Exec(mongoclient=mongoclient, redisclient=redisclient).adhoc, name='ansible_exec_adhoc'),
url(r'^ansible/exec/playbook$', Exec(mongoclient=mongoclient, redisclient=redisclient).playbook, name='ansible_exec_playbook'),
url(r'^ansible/option/$', Options(mongoclient=mongoclient, redisclient=redisclient).detail, name='ansible_option'),
url(r'^ansible/option/edit$', Options(mongoclient=mongoclient, redisclient=redisclient).edit),
url(r'^ansible/option/detail$', Options(mongoclient=mongoclient, redisclient=redisclient).detail),
# url(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.STATICFILES_DIRS, 'show_indexes':False}),
# url(r'^file/(?P<path>.*)$', 'django.views.static.serve', {'document_root':settings.MEDIA_ROOT, 'show_indexes':False}),
]
| apache-2.0 |
sgenoud/scikit-learn | sklearn/datasets/lfw.py | 6 | 16362 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by refering to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
import logging
import numpy as np
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warn("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warn("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
"is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) / (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) / (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
face = np.asarray(imread(file_path)[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=None, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
Parameters
----------
data_home: optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person: int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split('\t') for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
person_folder = join(data_folder_path, name)
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
Parameters
----------
subset: optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home: optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled: boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize: float, optional, default 0.5
Ratio used to resize the each face picture.
color: boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_: optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
dk379/asn-tryst | src/load_asns.py | 1 | 3927 |
import argparse
import logging
from multiprocessing import Pool
import MySQLdb
import netaddr
import requests
import sys
from read_config import asntryst_read_config
#
# Project specific settings
#
CONFIG = asntryst_read_config()
DB_HOST = CONFIG["database"]["hostname"]
DB_NAME = CONFIG["database"]["database"]
DB_USER = CONFIG["database"]["username"]
DB_PASS = CONFIG["database"]["password"]
INVALID_ASN = 0xffffffff
# IP versions supported
# 4 ... IP version 4
# 6 ... IP version 6
IP_VERSIONS = [4, 6]
# Data call URL for fetching ASN
RIPESTAT_DC_URL = "http://stat.ripe.net/data/network-info/data.json?resource={}"
log = logging.getLogger(__file__)
def is_private_ip(ip):
"""Identifies private IP addresses
Based on RFC1918 some IP addresses are intended for private use only.
If 'ip' is a private IP address True is returned.
"""
is_private = False
try:
ip_parsed = netaddr.IPNetwork(ip)
is_private = ip_parsed.is_private()
except netaddr.AddrFormatError, e:
log.error(e)
return is_private
def fetch_from_ripestat(url):
"""Result is returned in JSON format, unless in an
error case in which it returns None."""
try:
response = requests.get(url = url, headers={'Connection':'close'})
response = response.json()
except requests.exceptions.RequestException, e:
log.error(e)
response = None
return response
def asn_for_ip(ip):
""" Returns the ASN looked up on RIPEstat.
"""
if not is_private_ip(ip):
json_response = fetch_from_ripestat(RIPESTAT_DC_URL.format(ip))
try:
asn = json_response["data"]["asns"][0]
asn = int(asn)
except (KeyError,TypeError,IndexError, ValueError), e:
asn = INVALID_ASN
else:
asn = INVALID_ASN
return asn
MP_POOL = Pool(10)
def asns_for_ips(ips):
"""Returns ip to ASN mapping."""
asns = MP_POOL.map(asn_for_ip, ips)
return zip(ips, asns)
def load_asns_for_ips(ip_version=4, fetch_size=10):
"""IPs are fetched from MySQL, looked up on RIPEstat and written back,
fetch_size at a time.
"""
conn = MySQLdb.connect(host=DB_HOST,
user=DB_NAME, passwd=DB_PASS, db=DB_NAME)
cur = conn.cursor()
sql = "SELECT COUNT(*) FROM IPSV{} WHERE AUTNUM = 0xffff".format(ip_version)
cur.execute(sql)
total = cur.fetchone()[0]
to_ascii_func = "inet_ntoa" if ip_version ==4 else "inet6_ntoa"
count = 0
while( count < total ):
sql = "SELECT {}(IP) FROM IPSV{} WHERE AUTNUM = 0 limit {}".format(
to_ascii_func, ip_version, fetch_size)
cur.execute(sql)
ips = [ result[0] for result in cur.fetchall() ]
if not ips:
break
else:
count += len(ips)
sys.stdout.write(" Progress: {0:.0f}%\r".format( (count*1./total)*100))
sys.stdout.flush()
annotated_ips = asns_for_ips(ips)
to_num_func = "inet_aton" if ip_version == 4 else "inet6_aton"
insert_sql = "REPLACE INTO IPSV{} (IP,AUTNUM) VALUES ({}(%s),%s)".format(
ip_version,
to_num_func)
values = [ (ip, asn) for ip, asn in annotated_ips ]
cur.executemany(
insert_sql, values)
conn.commit()
print "Finished: ASN loaded for {} IPs totally".format(count)
cur.close()
conn.close()
return count
def get_parser():
"""Command line parser
Arguments:
ip: select IP version to load ASNs for
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
help_msg = "load ASNs for IP version 4 or 6"
parser.add_argument("-ip", default=4, choices=IP_VERSIONS, type=int, help=help_msg )
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
load_asns_for_ips(args.ip)
| bsd-3-clause |
raphaelmerx/django | tests/model_fields/test_durationfield.py | 296 | 2724 | import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import SimpleTestCase, TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(SimpleTestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(SimpleTestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
| bsd-3-clause |
lmprice/ansible | lib/ansible/modules/network/f5/bigip_timer_policy.py | 18 | 18820 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_timer_policy
short_description: Manage timer policies on a BIG-IP
description:
- Manage timer policies on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the timer policy.
required: True
description:
description:
- Specifies descriptive text that identifies the timer policy.
rules:
description:
- Rules that you want assigned to the timer policy
suboptions:
name:
description:
- The name of the rule.
required: True
protocol:
description:
- Specifies the IP protocol entry for which the timer policy rule is being
configured. This could be a layer-4 protocol (such as C(tcp), C(udp) or
C(sctp).
- Only flows matching the configured protocol will make use of this rule.
- When C(all-other) is specified, if there are no specific ip-protocol rules
that match the flow, the flow matches all the other ip-protocol rules.
- When specifying rules, if this parameter is not specified, the default of
C(all-other) will be used.
choices:
- all-other
- ah
- bna
- esp
- etherip
- gre
- icmp
- ipencap
- ipv6
- ipv6-auth
- ipv6-crypt
- ipv6-icmp
- isp-ip
- mux
- ospf
- sctp
- tcp
- udp
- udplite
destination_ports:
description:
- The list of destination ports to match the rule on.
- Specify a port range by specifying start and end ports separated by a
dash (-).
- This field is only available if you have selected the C(sctp), C(tcp), or
C(udp) protocol.
idle_timeout:
description:
- Specifies an idle timeout, in seconds, for protocol and port pairs that
match the timer policy rule.
- When C(infinite), specifies that the protocol and port pairs that match
the timer policy rule have no idle timeout.
- When specifying rules, if this parameter is not specified, the default of
C(unspecified) will be used.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a timer policy
bigip_timer_policy:
name: timer1
description: My timer policy
rules:
- name: rule1
protocol: tcp
idle_timeout: indefinite
destination_ports:
- 443
- 80
- name: rule2
protocol: 200
- name: rule3
protocol: sctp
idle_timeout: 200
destination_ports:
- 21
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove a timer policy and all its associated rules
bigip_timer_policy:
name: timer1
description: My timer policy
password: secret
server: lb.mydomain.com
state: absent
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the timer policy.
returned: changed
type: string
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import compare_dictionary
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import compare_dictionary
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'description',
'rules'
]
returnables = [
'description',
'rules'
]
updatables = [
'description',
'rules'
]
class ApiParameters(Parameters):
@property
def rules(self):
if self._values['rules'] is None:
return None
results = []
for rule in self._values['rules']:
result = dict()
result['name'] = rule['name']
if 'ipProtocol' in rule:
result['protocol'] = str(rule['ipProtocol'])
if 'timers' in rule:
result['idle_timeout'] = str(rule['timers'][0]['value'])
if 'destinationPorts' in rule:
ports = list(set([str(x['name']) for x in rule['destinationPorts']]))
ports.sort()
result['destination_ports'] = ports
results.append(result)
results = sorted(results, key=lambda k: k['name'])
return results
class ModuleParameters(Parameters):
@property
def rules(self):
if self._values['rules'] is None:
return None
if len(self._values['rules']) == 1 and self._values['rules'][0] == '':
return ''
results = []
for rule in self._values['rules']:
result = dict()
result['name'] = rule['name']
if 'protocol' in rule:
result['protocol'] = str(rule['protocol'])
else:
result['protocol'] = 'all-other'
if 'idle_timeout' in rule:
result['idle_timeout'] = str(rule['idle_timeout'])
else:
result['idle_timeout'] = 'unspecified'
if 'destination_ports' in rule:
ports = list(set([str(x) for x in rule['destination_ports']]))
ports.sort()
ports = [str(self._validate_port_entries(x)) for x in ports]
result['destination_ports'] = ports
results.append(result)
results = sorted(results, key=lambda k: k['name'])
return results
def _validate_port_entries(self, port):
if port == 'all-other':
return 0
if '-' in port:
parts = port.split('-')
if len(parts) != 2:
raise F5ModuleError(
"The correct format for a port range is X-Y, where X is the start"
"port and Y is the end port."
)
try:
start = int(parts[0])
end = int(parts[1])
except ValueError:
raise F5ModuleError(
"The ports in a range must be numbers."
"You provided '{0}' and '{1}'.".format(parts[0], parts[1])
)
if start == end:
return start
if start > end:
return '{0}-{1}'.format(end, start)
else:
return port
else:
try:
return int(port)
except ValueError:
raise F5ModuleError(
"The specified destination port is not a number."
)
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def rules(self):
if self._values['rules'] is None:
return None
results = []
for rule in self._values['rules']:
result = dict()
result['name'] = rule['name']
if 'protocol' in rule:
result['ipProtocol'] = rule['protocol']
if 'destination_ports' in rule:
if rule['protocol'] not in ['tcp', 'udp', 'sctp']:
raise F5ModuleError(
"Only the 'tcp', 'udp', and 'sctp' protocols support 'destination_ports'."
)
ports = [dict(name=str(x)) for x in rule['destination_ports']]
result['destinationPorts'] = ports
else:
result['destinationPorts'] = []
if 'idle_timeout' in rule:
if rule['idle_timeout'] in ['indefinite', 'immediate', 'unspecified']:
timeout = rule['idle_timeout']
else:
try:
int(rule['idle_timeout'])
timeout = rule['idle_timeout']
except ValueError:
raise F5ModuleError(
"idle_timeout must be a number, or, one of 'indefinite', 'immediate', or 'unspecified'."
)
result['timers'] = [
dict(name='flow-idle-timeout', value=timeout)
]
results.append(result)
results = sorted(results, key=lambda k: k['name'])
return results
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def rules(self):
if self.want.rules is None:
return None
if self.have.rules is None and self.want.rules == '':
return None
if self.have.rules is not None and self.want.rules == '':
return []
if self.have.rules is None:
return self.want.rules
want = [tuple(x.pop('destination_ports')) for x in self.want.rules if 'destination_ports' in x]
have = [tuple(x.pop('destination_ports')) for x in self.have.rules if 'destination_ports' in x]
if set(want) != set(have):
return self.want.rules
if compare_dictionary(self.want.rules, self.have.rules):
return self.want.rules
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
result = self.client.api.tm.net.timer_policys.timer_policy.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
self.client.api.tm.net.timer_policys.timer_policy.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def update_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.net.timer_policys.timer_policy.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
resource = self.client.api.tm.net.timer_policys.timer_policy.load(
name=self.want.name,
partition=self.want.partition
)
if resource:
resource.delete()
def read_current_from_device(self):
resource = self.client.api.tm.net.timer_policys.timer_policy.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
rules=dict(
type='list',
suboptions=dict(
name=dict(required=True),
protocol=dict(
default='all-other',
choices=[
'all-other',
'ah',
'bna',
'esp',
'etherip',
'gre',
'icmp',
'ipencap',
'ipv6',
'ipv6-auth',
'ipv6-crypt',
'ipv6-icmp',
'isp-ip',
'mux',
'ospf',
'sctp',
'tcp',
'udp',
'udplite',
]
),
description=dict(),
idle_timeout=dict(default='unspecified'),
destination_ports=dict(
type='list'
)
)
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.