repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
gram526/VTK | Common/Transforms/Testing/Python/TestThinPlateWarp3D.py | 20 | 1768 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Image pipeline
reader = vtk.vtkImageReader()
reader.ReleaseDataFlagOff()
reader.SetDataByteOrderToLittleEndian()
reader.SetDataExtent(0,63,0,63,1,93)
reader.SetDataSpacing(3.2,3.2,1.5)
reader.SetDataOrigin(-100.8,-100.8,-69)
reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter")
reader.SetDataMask(0x7fff)
reader.Update()
p1 = vtk.vtkPoints()
p2 = vtk.vtkPoints()
p1.InsertNextPoint(0,0,0)
p2.InsertNextPoint(-60,10,20)
p1.InsertNextPoint(-100,-100,-50)
p2.InsertNextPoint(-100,-100,-50)
p1.InsertNextPoint(-100,-100,50)
p2.InsertNextPoint(-100,-100,50)
p1.InsertNextPoint(-100,100,-50)
p2.InsertNextPoint(-100,100,-50)
p1.InsertNextPoint(-100,100,50)
p2.InsertNextPoint(-100,100,50)
p1.InsertNextPoint(100,-100,-50)
p2.InsertNextPoint(100,-100,-50)
p1.InsertNextPoint(100,-100,50)
p2.InsertNextPoint(100,-100,50)
p1.InsertNextPoint(100,100,-50)
p2.InsertNextPoint(100,100,-50)
p1.InsertNextPoint(100,100,50)
p2.InsertNextPoint(100,100,50)
transform = vtk.vtkThinPlateSplineTransform()
transform.SetSourceLandmarks(p1)
transform.SetTargetLandmarks(p2)
transform.SetBasisToR()
reslice = vtk.vtkImageReslice()
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetResliceTransform(transform)
reslice.SetInterpolationModeToLinear()
reslice.SetOutputSpacing(1,1,1)
cac = vtk.vtkImageCacheFilter()
cac.SetInputConnection(reslice.GetOutputPort())
cac.SetCacheSize(1000)
cac.SetInputConnection(reslice.GetOutputPort())
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(cac.GetOutputPort())
viewer.SetZSlice(90)
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
| bsd-3-clause |
Suwmlee/XX-Net | Python3/lib/pydoc.py | 3 | 103200 | #!/usr/bin/env python3
"""Generate Python documentation in HTML or text for interactive use.
At the Python interactive prompt, calling help(thing) on a Python object
documents the object, and calling help() starts up an interactive
help session.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on the given port on the
local machine. Port number 0 can be used to get an arbitrary unused port.
Run "pydoc -b" to start an HTTP server on an arbitrary unused port and
open a Web browser to interactively browse documentation. The -p option
can be used with the -b option to explicitly specify the server port.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/X.Y/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__all__ = ['help']
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - synopsis() cannot be prevented from clobbering existing
# loaded modules.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import builtins
import importlib._bootstrap
import importlib._bootstrap_external
import importlib.machinery
import importlib.util
import inspect
import io
import os
import pkgutil
import platform
import re
import sys
import time
import tokenize
import urllib.parse
import warnings
from collections import deque
from reprlib import Repr
from traceback import format_exception_only
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', result.rstrip()) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = doc.strip().split('\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not lines[1].rstrip():
return lines[0], '\n'.join(lines[2:])
return '', '\n'.join(lines)
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = pairs[1].join(text.split(pairs[0]))
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return (inspect.isfunction(obj) or
inspect.ismethod(obj) or
inspect.isbuiltin(obj) or
inspect.ismethoddescriptor(obj))
def _is_bound_method(fn):
"""
Returns True if fn is a bound method, regardless of whether
fn was implemented in Python or in C.
"""
if inspect.ismethod(fn):
return True
if inspect.isbuiltin(fn):
self = getattr(fn, '__self__', None)
return not (inspect.ismodule(self) or (self is None))
return False
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant or internal.
# XXX Remove __initializing__?
if name in {'__author__', '__builtins__', '__cached__', '__credits__',
'__date__', '__doc__', '__file__', '__spec__',
'__loader__', '__module__', '__name__', '__package__',
'__path__', '__qualname__', '__slots__', '__version__'}:
return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return True
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
results = []
for (name, kind, cls, value) in inspect.classify_class_attrs(object):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
results.append((name, kind, cls, value))
return results
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not line.strip():
line = file.readline()
if not line: break
line = line.strip()
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not line.strip():
line = file.readline()
if not line: break
result = line.split('"""')[0].strip()
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
# Look for binary suffixes first, falling back to source.
if filename.endswith(tuple(importlib.machinery.BYTECODE_SUFFIXES)):
loader_cls = importlib.machinery.SourcelessFileLoader
elif filename.endswith(tuple(importlib.machinery.EXTENSION_SUFFIXES)):
loader_cls = importlib.machinery.ExtensionFileLoader
else:
loader_cls = None
# Now handle the choice.
if loader_cls is None:
# Must be a source file.
try:
file = tokenize.open(filename)
except OSError:
# module can't be opened, so skip it
return None
# text modules can be directly examined
with file:
result = source_synopsis(file)
else:
# Must be a binary module, which has to be imported.
loader = loader_cls('__temp__', filename)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location('__temp__', filename,
loader=loader)
try:
module = importlib._bootstrap._load(spec)
except:
return None
del sys.modules['__temp__']
result = module.__doc__.splitlines()[0] if module.__doc__ else None
# Cache the result.
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
self.filename = filename
self.exc, self.value, self.tb = exc_info
def __str__(self):
exc = self.exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = importlib.util.MAGIC_NUMBER
with open(path, 'rb') as file:
is_bytecode = magic == file.read(len(magic))
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
if is_bytecode:
loader = importlib._bootstrap_external.SourcelessFileLoader(name, path)
else:
loader = importlib._bootstrap_external.SourceFileLoader(name, path)
# XXX We probably don't need to pass in the loader here.
spec = importlib.util.spec_from_file_location(name, path, loader=loader)
try:
return importlib._bootstrap._load(spec)
except:
raise ErrorDuringImport(path, sys.exc_info())
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Remove the module from sys.modules and re-import to try
# and avoid problems with partially loaded modules.
# Also remove any submodules because they won't appear
# in the newly loaded module's namespace if they're already
# in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and value.name == path:
# No such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in path.split('.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
PYTHONDOCS = os.environ.get("PYTHONDOCS",
"http://docs.python.org/%d.%d/library"
% sys.version_info[:2])
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError(message)
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS", self.PYTHONDOCS)
basedir = os.path.join(sys.base_exec_prefix, "lib",
"python%d.%d" % sys.version_info[:2])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'_thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(text.expandtabs())
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, modpkginfo):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = modpkginfo
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def filelink(self, url, path):
"""Make a link to source file."""
return '<a href="file:%s">%s</a>' % (url, path)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + ', '.join(parents) + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = name.split('.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
('.'.join(parts[:i+1]), parts[i]))
linkedname = '.'.join(links + parts[-1:])
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = urllib.parse.quote(path)
filelink = self.filelink(url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % ', '.join(info)
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Reference</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda t: self.modulelink(t[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = [value for (key, value) in classes]
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', ' '.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', ' '.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', '<br>\n'.join(contents))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
attrs.sort(key=lambda t: t[0])
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % ', '.join(parents)
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % self.classlink(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + self.escape(argspec) + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
if any((0xD800 <= ord(ch) <= 0xDFFF) for ch in name):
# ignore a module if its name contains a surrogate character
continue
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + '_'.join(type(x).__name__.split())
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return ''.join(ch + '\b' + ch for ch in text)
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = [prefix + line for line in text.split('\n')]
if lines: lines[-1] = lines[-1].rstrip()
return '\n'.join(lines)
def section(self, title, contents):
"""Format a section with a given heading."""
clean_contents = self.indent(contents).rstrip()
return self.bold(title) + '\n' + clean_contents + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = (classname(c, modname) for c in bases)
result = result + '(%s)' % ', '.join(parents)
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
all = getattr(object, '__all__', None)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE REFERENCE', docloc + """
The following documentation is automatically generated from the Python
source files. It may be incomplete, incorrect or include features that
are considered implementation detail and may vary between Python
implementations. When in doubt, consult the module reference at the
location listed above.
""")
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', '\n'.join(modpkgs))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', '\n'.join(submodules))
if classes:
classlist = [value for key, value in classes]
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', '\n'.join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', '\n'.join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', '\n'.join(contents))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = version[11:-1].strip()
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % ', '.join(parents)
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
try:
obj = getattr(object, name)
except AttributeError:
obj = homecls.__dict__[name]
push(self.docother(obj, name, mod, maxlen=70, doc=doc) +
'\n')
return attrs
attrs = [(name, kind, cls, value)
for name, kind, cls, value in classify_class_attrs(object)
if visiblename(name, obj=object)]
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is builtins.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(contents.rstrip(), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if _is_bound_method(object):
imclass = object.__self__.__class__
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.__self__ is not None:
note = ' method of %s instance' % classname(
object.__self__.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
argspec = None
if inspect.isroutine(object):
try:
signature = inspect.signature(object)
except (ValueError, TypeError):
signature = None
if signature:
argspec = str(signature)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
# XXX lambda's won't usually have func_annotations['return']
# since the syntax doesn't support but it is possible.
# So removing parentheses isn't truly safe.
argspec = argspec[1:-1] # remove parentheses
if not argspec:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and self.indent(doc).rstrip() + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
class _PlainTextDoc(TextDoc):
"""Subclass of TextDoc which overrides string styling"""
def bold(self, text):
return text
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not hasattr(sys.stdout, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32':
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
import subprocess
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
try:
with io.TextIOWrapper(proc.stdin, errors='backslashreplace') as pipe:
try:
pipe.write(text)
except KeyboardInterrupt:
# We've hereby abandoned whatever text hasn't been written,
# but the pager is still in control of the terminal.
pass
except OSError:
pass # Ignore broken pipes caused by quitting the pager program.
while True:
try:
proc.wait()
break
except KeyboardInterrupt:
# Ignore ctl-c like the pager itself does. Otherwise the pager is
# left running and the terminal is in raw mode and unusable.
pass
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
with open(filename, 'w', errors='backslashreplace') as file:
file.write(text)
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def _escape_stdout(text):
# Escape non-encodable characters to avoid encoding errors later
encoding = getattr(sys.stdout, 'encoding', None) or 'utf-8'
return text.encode(encoding, 'backslashreplace').decode(encoding)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_escape_stdout(text)).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError, io.UnsupportedOperation):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write('\n'.join(lines[:inc]) + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + '\n'.join(lines[r:r+inc]) + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(_escape_stdout(text)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in path.split('.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport('.'.join(parts[:n+1]), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = builtins
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
plaintext = _PlainTextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError('''\
No Python documentation found for %r.
Use help() to get the interactive help utility.
Use help(str) for help on the str class.''' % thing)
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0,
renderer=None):
"""Render text documentation, given an object or a path to an object."""
if renderer is None:
renderer = text
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + renderer.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0,
output=None):
"""Display text documentation, given an object or a path to an object."""
try:
if output is None:
pager(render_doc(thing, title, forceload))
else:
output.write(render_doc(thing, title, forceload, plaintext))
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
with open(name + '.html', 'w', encoding='utf-8') as file:
file.write(page)
print('wrote', name + '.html')
except (ImportError, ErrorDuringImport) as value:
print(value)
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'False': '',
'None': '',
'True': '',
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'nonlocal NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'nonlocal': ('nonlocal', 'global NAMESPACES'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "b'", '"""', '"', 'r"', 'b"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.items():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING range LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS MAPPINGMETHODS '
'NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global nonlocal ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'CONVERSIONS': ('conversions', ''),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS NUMBERS TUPLELITERALS '
'LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<%s.%s instance>' % (self.__class__.__module__,
self.__class__.__qualname__)
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = replace(request, '"', '', "'", '').strip()
if request.lower() in ('q', 'quit'): break
if request == 'help':
self.intro()
else:
self.help(request)
def getline(self, prompt):
"""Read one line, using input() when appropriate."""
if self.input is sys.stdin:
return input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(request.split()[1])
elif request in self.symbols: self.showsymbol(request)
elif request in ['True', 'False', 'None']:
# special case these keywords since they are objects too
doc(eval(request), 'Help on %s:')
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:', output=self._output)
else: doc(str, 'Help on %s:', output=self._output)
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:', output=self._output)
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s's help utility!
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, symbols, or topics, type
"modules", "keywords", "symbols", or "topics". Each module also comes
with a one-line summary of what it does; to list the modules whose name
or summary contain a given string such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = list(sorted(items))
colw = width // columns
rows = (len(items) + columns - 1) // columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw - 1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(doc.strip() + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import textwrap
text = 'Related help topics: ' + ', '.join(xrefs.split()) + '\n'
wrapped_text = textwrap.wrap(text, 72)
self.output.write('\n%s\n' % ''.join(wrapped_text))
def _gettopic(self, topic, more_xrefs=''):
"""Return unbuffered tuple of (topic, xrefs).
If an error occurs here, the exception is caught and displayed by
the url handler.
This function duplicates the showtopic method but returns its
result directly so it can be formatted for display in an html page.
"""
try:
import pydoc_data.topics
except ImportError:
return('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''' , '')
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
raise ValueError('could not find topic')
if isinstance(target, str):
return self._gettopic(target, more_xrefs)
label, xrefs = target
doc = pydoc_data.topics.topics[label]
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
return doc, xrefs
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of modules whose name or summary contains '{}'.
If there are any, enter a module name to get more help.
'''.format(key))
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if modname.find('.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose name or summary contain the string "spam".
''')
help = Helper()
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = key.lower()
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
name = __import__(modname).__doc__ or ''
desc = name.split('\n')[0]
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
try:
spec = pkgutil._get_spec(importer, modname)
except SyntaxError:
# raised by tests for bad coding cookies or BOM
continue
loader = spec.loader
if hasattr(loader, 'get_source'):
try:
source = loader.get_source(modname)
except Exception:
if onerror:
onerror(modname)
continue
desc = source_synopsis(io.StringIO(source)) or ''
if hasattr(loader, 'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
try:
module = importlib._bootstrap._load(spec)
except ImportError:
if onerror:
onerror(modname)
continue
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
name = modname + ' - ' + desc
if name.lower().find(key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print(modname, desc and '- ' + desc)
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------- enhanced Web browser interface
def _start_server(urlhandler, port):
"""Start an HTTP server thread on a specific port.
Start an HTML/text server thread, so HTML or text documents can be
browsed dynamically and interactively with a Web browser. Example use:
>>> import time
>>> import pydoc
Define a URL handler. To determine what the client is asking
for, check the URL and content_type.
Then get or generate some text or HTML code and return it.
>>> def my_url_handler(url, content_type):
... text = 'the URL sent was: (%s, %s)' % (url, content_type)
... return text
Start server thread on port 0.
If you use port 0, the server will pick a random port number.
You can then use serverthread.port to get the port number.
>>> port = 0
>>> serverthread = pydoc._start_server(my_url_handler, port)
Check that the server is really started. If it is, open browser
and get first page. Use serverthread.url as the starting page.
>>> if serverthread.serving:
... import webbrowser
The next two lines are commented out so a browser doesn't open if
doctest is run on this module.
#... webbrowser.open(serverthread.url)
#True
Let the server do its thing. We just need to monitor its status.
Use time.sleep so the loop doesn't hog the CPU.
>>> starttime = time.time()
>>> timeout = 1 #seconds
This is a short timeout for testing purposes.
>>> while serverthread.serving:
... time.sleep(.01)
... if serverthread.serving and time.time() - starttime > timeout:
... serverthread.stop()
... break
Print any errors that may have occurred.
>>> print(serverthread.error)
None
"""
import http.server
import email.message
import select
import threading
class DocHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
"""Process a request from an HTML browser.
The URL received is in self.path.
Get an HTML page from self.urlhandler and send it.
"""
if self.path.endswith('.css'):
content_type = 'text/css'
else:
content_type = 'text/html'
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.end_headers()
self.wfile.write(self.urlhandler(
self.path, content_type).encode('utf-8'))
def log_message(self, *args):
# Don't log messages.
pass
class DocServer(http.server.HTTPServer):
def __init__(self, port, callback):
self.host = 'localhost'
self.address = (self.host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
self.quit = False
def serve_until_quit(self):
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd:
self.handle_request()
self.server_close()
def server_activate(self):
self.base.server_activate(self)
if self.callback:
self.callback(self)
class ServerThread(threading.Thread):
def __init__(self, urlhandler, port):
self.urlhandler = urlhandler
self.port = int(port)
threading.Thread.__init__(self)
self.serving = False
self.error = None
def run(self):
"""Start the server."""
try:
DocServer.base = http.server.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = email.message.Message
DocHandler.urlhandler = staticmethod(self.urlhandler)
docsvr = DocServer(self.port, self.ready)
self.docserver = docsvr
docsvr.serve_until_quit()
except Exception as e:
self.error = e
def ready(self, server):
self.serving = True
self.host = server.host
self.port = server.server_port
self.url = 'http://%s:%d/' % (self.host, self.port)
def stop(self):
"""Stop the server and this thread nicely"""
self.docserver.quit = True
self.serving = False
self.url = None
thread = ServerThread(urlhandler, port)
thread.start()
# Wait until thread.serving is True to make sure we are
# really up before returning.
while not thread.error and not thread.serving:
time.sleep(.01)
return thread
def _url_handler(url, content_type="text/html"):
"""The pydoc url handler for use with the pydoc server.
If the content_type is 'text/css', the _pydoc.css style
sheet is read and returned if it exits.
If the content_type is 'text/html', then the result of
get_html_page(url) is returned.
"""
class _HTMLDoc(HTMLDoc):
def page(self, title, contents):
"""Format an HTML page."""
css_path = "pydoc_data/_pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Pydoc: %s</title>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
%s</head><body bgcolor="#f0f0f8">%s<div style="clear:both;padding-top:.5em;">%s</div>
</body></html>''' % (title, css_link, html_navbar(), contents)
def filelink(self, url, path):
return '<a href="getfile?key=%s">%s</a>' % (url, path)
html = _HTMLDoc()
def html_navbar():
version = html.escape("%s [%s, %s]" % (platform.python_version(),
platform.python_build()[0],
platform.python_compiler()))
return """
<div style='float:left'>
Python %s<br>%s
</div>
<div style='float:right'>
<div style='text-align:center'>
<a href="index.html">Module Index</a>
: <a href="topics.html">Topics</a>
: <a href="keywords.html">Keywords</a>
</div>
<div>
<form action="get" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Get">
</form>
<form action="search" style='display:inline;'>
<input type=text name=key size=15>
<input type=submit value="Search">
</form>
</div>
</div>
""" % (version, html.escape(platform.platform(terse=True)))
def html_index():
"""Module Index page."""
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
names = [name for name in sys.builtin_module_names
if name != '__main__']
contents = html.multicolumn(names, bltinlink)
contents = [heading, '<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
contents.append(html.index(dir, seen))
contents.append(
'<p align=right><font color="#909090" face="helvetica,'
'arial"><strong>pydoc</strong> by Ka-Ping Yee'
'<ping@lfw.org></font>')
return 'Index of Modules', ''.join(contents)
def html_search(key):
"""Search results page."""
# scan for modules
search_result = []
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
search_result.append((modname, desc and '- ' + desc))
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
def onerror(modname):
pass
ModuleScanner().run(callback, key, onerror=onerror)
# format page
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
results = []
heading = html.heading(
'<big><big><strong>Search Results</strong></big></big>',
'#ffffff', '#7799ee')
for name, desc in search_result:
results.append(bltinlink(name) + desc)
contents = heading + html.bigsection(
'key = %s' % key, '#ffffff', '#ee77aa', '<br>'.join(results))
return 'Search Results', contents
def html_getfile(path):
"""Get and display a source file listing safely."""
path = urllib.parse.unquote(path)
with tokenize.open(path) as fp:
lines = html.escape(fp.read())
body = '<pre>%s</pre>' % lines
heading = html.heading(
'<big><big><strong>File Listing</strong></big></big>',
'#ffffff', '#7799ee')
contents = heading + html.bigsection(
'File: %s' % path, '#ffffff', '#ee77aa', body)
return 'getfile %s' % path, contents
def html_topics():
"""Index of topic texts available."""
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.topics.keys())
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Topics', '#ffffff', '#ee77aa', contents)
return 'Topics', contents
def html_keywords():
"""Index of keywords."""
heading = html.heading(
'<big><big><strong>INDEX</strong></big></big>',
'#ffffff', '#7799ee')
names = sorted(Helper.keywords.keys())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
contents = html.multicolumn(names, bltinlink)
contents = heading + html.bigsection(
'Keywords', '#ffffff', '#ee77aa', contents)
return 'Keywords', contents
def html_topicpage(topic):
"""Topic or keyword help page."""
buf = io.StringIO()
htmlhelp = Helper(buf, buf)
contents, xrefs = htmlhelp._gettopic(topic)
if topic in htmlhelp.keywords:
title = 'KEYWORD'
else:
title = 'TOPIC'
heading = html.heading(
'<big><big><strong>%s</strong></big></big>' % title,
'#ffffff', '#7799ee')
contents = '<pre>%s</pre>' % html.markup(contents)
contents = html.bigsection(topic , '#ffffff','#ee77aa', contents)
if xrefs:
xrefs = sorted(xrefs.split())
def bltinlink(name):
return '<a href="topic?key=%s">%s</a>' % (name, name)
xrefs = html.multicolumn(xrefs, bltinlink)
xrefs = html.section('Related help topics: ',
'#ffffff', '#ee77aa', xrefs)
return ('%s %s' % (title, topic),
''.join((heading, contents, xrefs)))
def html_getobj(url):
obj = locate(url, forceload=1)
if obj is None and url != 'None':
raise ValueError('could not find object')
title = describe(obj)
content = html.document(obj, url)
return title, content
def html_error(url, exc):
heading = html.heading(
'<big><big><strong>Error</strong></big></big>',
'#ffffff', '#7799ee')
contents = '<br>'.join(html.escape(line) for line in
format_exception_only(type(exc), exc))
contents = heading + html.bigsection(url, '#ffffff', '#bb0000',
contents)
return "Error - %s" % url, contents
def get_html_page(url):
"""Generate an HTML page for url."""
complete_url = url
if url.endswith('.html'):
url = url[:-5]
try:
if url in ("", "index"):
title, content = html_index()
elif url == "topics":
title, content = html_topics()
elif url == "keywords":
title, content = html_keywords()
elif '=' in url:
op, _, url = url.partition('=')
if op == "search?key":
title, content = html_search(url)
elif op == "getfile?key":
title, content = html_getfile(url)
elif op == "topic?key":
# try topics first, then objects.
try:
title, content = html_topicpage(url)
except ValueError:
title, content = html_getobj(url)
elif op == "get?key":
# try objects first, then topics.
if url in ("", "index"):
title, content = html_index()
else:
try:
title, content = html_getobj(url)
except ValueError:
title, content = html_topicpage(url)
else:
raise ValueError('bad pydoc url')
else:
title, content = html_getobj(url)
except Exception as exc:
# Catch any errors and display them in an error page.
title, content = html_error(complete_url, exc)
return html.page(title, content)
if url.startswith('/'):
url = url[1:]
if content_type == 'text/css':
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, url)
with open(css_path) as fp:
return ''.join(fp.readlines())
elif content_type == 'text/html':
return get_html_page(url)
# Errors outside the url handler are caught by the server.
raise TypeError('unknown content type %r for url %s' % (content_type, url))
def browse(port=0, *, open_browser=True):
"""Start the enhanced pydoc Web server and open a Web browser.
Use port '0' to start the server on an arbitrary port.
Set open_browser to False to suppress opening a browser.
"""
import webbrowser
serverthread = _start_server(_url_handler, port)
if serverthread.error:
print(serverthread.error)
return
if serverthread.serving:
server_help_msg = 'Server commands: [b]rowser, [q]uit'
if open_browser:
webbrowser.open(serverthread.url)
try:
print('Server ready at', serverthread.url)
print(server_help_msg)
while serverthread.serving:
cmd = input('server> ')
cmd = cmd.lower()
if cmd == 'q':
break
elif cmd == 'b':
webbrowser.open(serverthread.url)
else:
print(server_help_msg)
except (KeyboardInterrupt, EOFError):
print()
finally:
if serverthread.serving:
serverthread.stop()
print('Server stopped')
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and x.find(os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage(Exception): pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'bk:p:w')
writing = False
start_server = False
open_browser = False
port = None
for opt, val in opts:
if opt == '-b':
start_server = True
open_browser = True
if opt == '-k':
apropos(val)
return
if opt == '-p':
start_server = True
port = val
if opt == '-w':
writing = True
if start_server:
if port is None:
port = 0
browse(port, open_browser=open_browser)
return
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print('file %r does not exist' % arg)
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport as value:
print(value)
except (getopt.error, BadUsage):
cmd = os.path.splitext(os.path.basename(sys.argv[0]))[0]
print("""pydoc - the Python documentation tool
{cmd} <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '{sep}', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
{cmd} -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
{cmd} -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
{cmd} -b
Start an HTTP server on an arbitrary unused port and open a Web browser
to interactively browse documentation. The -p option can be used with
the -b option to explicitly specify the server port.
{cmd} -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '{sep}', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""".format(cmd=cmd, sep=os.sep))
if __name__ == '__main__':
cli()
| bsd-2-clause |
tanji/replication-manager | share/opensvc/compliance/com.replication-manager/keyval.py | 2 | 11079 | #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_kwargs": {
"path": "/etc/ssh/sshd_config",
},
"example_value": """
[
{
"key": "PermitRootLogin",
"op": "=",
"value": "yes"
}
]
or
{
"path": "/etc/ssh/sshd_config",
"keys": [
{
"key": "PermitRootLogin",
"op": "=",
"value": "yes"
}
]
}
""",
"description": """* Setup and verify keys in "key value" formatted configuration file.
* Example files: sshd_config, ssh_config, ntp.conf, ...
""",
"form_definition": """
Desc: |
A rule to set a list of parameters in simple keyword/value configuration file format. Current values can be checked as set or unset, strictly equal, or superior/inferior to their target value.
Outputs:
-
Dest: compliance variable
Type: json
Format: list of dict
Class: keyval
Inputs:
-
Id: key
Label: Key
DisplayModeTrim: 64
DisplayModeLabel: key
LabelCss: action16
Mandatory: Yes
Type: string
Help:
-
Id: op
Label: Comparison operator
DisplayModeLabel: op
LabelCss: action16
Mandatory: Yes
Type: string
Default: "="
Candidates:
- reset
- unset
- "="
- ">"
- ">="
- "<"
- "<="
Help: The comparison operator to use to check the parameter current value.
-
Id: value
Label: Value
DisplayModeLabel: value
LabelCss: action16
Mandatory: Yes
Type: string or integer
Help: The configuration file parameter target value.
""",
}
import os
import sys
import json
sys.path.append(os.path.dirname(__file__))
from comp import *
from keyval_parser import Parser, ParserError
class KeyVal(CompObject):
def __init__(self, prefix=None, path=None):
CompObject.__init__(self, prefix=prefix, data=data)
self.cf = path
def init(self):
self.nocf = False
self.file_keys = {}
if self.cf:
self.file_keys[self.cf] = {
"target_n_key": {},
"keys": [],
}
for rule in self.get_rules():
if self.cf and "key" in rule:
self.file_keys[self.cf]["keys"] += [rule]
continue
if "path" not in rule:
continue
if "keys" not in rule or not isinstance(rule["keys"], list):
continue
path = rule["path"]
if path not in self.file_keys:
self.file_keys[path] = {
"target_n_key": {},
"keys": rule["keys"],
}
else:
self.file_keys[path]["keys"] += rule["keys"]
for path, data in self.file_keys.items():
for i, key in enumerate(data["keys"]):
if data["keys"][i]['op'] == 'IN':
data["keys"][i]['value'] = json.loads(data["keys"][i]['value'])
if 'op' in key and 'key' in key and key['op'] not in ("unset", "reset"):
if key['key'] not in data["target_n_key"]:
data["target_n_key"][key['key']] = 1
else:
data["target_n_key"][key['key']] += 1
try:
data["conf"] = Parser(path)
except ParserError as e:
perror(e)
raise ComplianceError()
def fixable(self):
return RET_OK
def _check_key(self, path, data, keyname, target, op, value, instance=0, verbose=True):
r = RET_OK
if op == "reset":
if value is not None:
current_n_key = len(value)
target_n_key = data["target_n_key"][keyname] if keyname in data["target_n_key"] else 0
if current_n_key > target_n_key:
if verbose:
perror("%s is set %d times, should be set %d times"%(keyname, current_n_key, target_n_key))
return RET_ERR
else:
if verbose:
pinfo("%s is set %d times, on target"%(keyname, current_n_key))
return RET_OK
else:
return RET_OK
elif op == "unset":
if value is not None:
if target.strip() == "":
if verbose:
perror("%s is set, should not be"%keyname)
return RET_ERR
target_found = False
for i, val in enumerate(value):
if target == val:
target_found = True
break
if target_found:
if verbose:
perror("%s[%d] is set to value %s, should not be"%(keyname, i, target))
return RET_ERR
else:
if verbose:
pinfo("%s is not set to value %s, on target"%(keyname, target))
return RET_OK
else:
if target.strip() != "":
if verbose:
pinfo("%s=%s is not set, on target"%(keyname, target))
else:
if verbose:
pinfo("%s is not set, on target"%keyname)
return RET_OK
if value is None:
if op == 'IN' and "unset" in map(str, target):
if verbose:
pinfo("%s is not set, on target"%(keyname))
return RET_OK
else:
if verbose:
perror("%s[%d] is not set, target: %s"%(keyname, instance, str(target)))
return RET_ERR
if type(value) == list:
if str(target) in value:
if verbose:
pinfo("%s[%d]=%s on target"%(keyname, instance, str(value)))
return RET_OK
else:
if verbose:
perror("%s[%d]=%s is not set"%(keyname, instance, str(target)))
return RET_ERR
if op == '=':
if str(value) != str(target):
if verbose:
perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
elif op == 'IN':
if str(value) not in map(str, target):
if verbose:
perror("%s[%d]=%s, target: %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s=%s on target"%(keyname, str(value)))
else:
if type(value) != int:
if verbose:
perror("%s[%d]=%s value must be integer"%(keyname, instance, str(value)))
r |= RET_ERR
elif op == '<=' and value > target:
if verbose:
perror("%s[%d]=%s target: <= %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif op == '>=' and value < target:
if verbose:
perror("%s[%d]=%s target: >= %s"%(keyname, instance, str(value), str(target)))
r |= RET_ERR
elif verbose:
pinfo("%s[%d]=%s on target"%(keyname, instance, str(value)))
return r
def check_key(self, path, data, key, instance=0, verbose=True):
if 'key' not in key:
if verbose:
perror("'key' not set in rule %s"%str(key))
return RET_NA
if 'value' not in key:
if verbose:
perror("'value' not set in rule %s"%str(key))
return RET_NA
if 'op' not in key:
op = "="
else:
op = key['op']
target = key['value']
allowed_ops = ('>=', '<=', '=', 'unset', 'reset', 'IN')
if op not in allowed_ops:
if verbose:
perror(key['key'], "'op' value must be one of", ", ".join(allowed_ops))
return RET_NA
keyname = key['key']
value = data["conf"].get(keyname, instance=instance)
r = self._check_key(path, data, keyname, target, op, value, instance=instance, verbose=verbose)
return r
def fix_key(self, path, data, key, instance=0):
if key['op'] == "unset" or (key['op'] == "IN" and key['value'][0] == "unset"):
pinfo("%s unset"%key['key'])
if key['op'] == "IN":
target = None
else:
target = key['value']
data["conf"].unset(key['key'], target)
elif key['op'] == "reset":
target_n_key = data["target_n_key"][key['key']] if key['key'] in data["target_n_key"] else 0
pinfo("%s truncated to %d definitions"%(key['key'], target_n_key))
data["conf"].truncate(key['key'], target_n_key)
else:
if key['op'] == "IN":
target = key['value'][0]
else:
target = key['value']
pinfo("%s=%s set"%(key['key'], target))
data["conf"].set(key['key'], target, instance=instance)
def check(self):
r = RET_OK
for path, data in self.file_keys.items():
r |= self.check_keys(path, data)
return r
def check_keys(self, path, data):
r = RET_OK
key_instance = {}
for key in data["keys"]:
if 'key' not in key or 'op' not in key:
continue
if key['op'] in ('reset', 'unset'):
instance = None
else:
if key['key'] not in key_instance:
key_instance[key['key']] = 0
else:
key_instance[key['key']] += 1
instance = key_instance[key['key']]
r |= self.check_key(path, data, key, instance=instance, verbose=True)
return r
def fix(self):
r = RET_OK
for path, data in self.file_keys.items():
r |= self.fix_keys(path, data)
return r
def fix_keys(self, path, data):
key_instance = {}
for key in data["keys"]:
if 'key' not in key or 'op' not in key:
continue
if key['op'] in ('reset', 'unset'):
instance = None
else:
if key['key'] not in key_instance:
key_instance[key['key']] = 0
else:
key_instance[key['key']] += 1
instance = key_instance[key['key']]
if self.check_key(path, data, key, instance=instance, verbose=False) == RET_ERR:
self.fix_key(path, data, key, instance=instance)
if not data["conf"].changed:
return RET_OK
try:
data["conf"].write()
except ParserError as e:
perror(e)
return RET_ERR
return RET_OK
if __name__ == "__main__":
main(KeyVal)
| gpl-3.0 |
contactless/mqtt-rpc | python/test_client.py | 1 | 1794 | import json, time
import pprint
import argparse
try:
import mosquitto
except ImportError:
import paho.mqtt.client as mosquitto
from mqttrpc.client import TMQTTRPCClient
from jsonrpc.exceptions import JSONRPCError
def main():
parser = argparse.ArgumentParser(description='Sample RPC client', add_help=False)
parser.add_argument('-h', '--host', dest='host', type=str,
help='MQTT host', default='localhost')
parser.add_argument('-u', '--username', dest='username', type=str,
help='MQTT username', default='')
parser.add_argument('-P', '--password', dest='password', type=str,
help='MQTT password', default='')
parser.add_argument('-p', '--port', dest='port', type=int,
help='MQTT port', default='1883')
args = parser.parse_args()
client = mosquitto.Mosquitto()
if args.username:
client.username_pw_set(args.username, args.password)
client.connect(args.host, args.port)
client.loop_start()
rpc_client = TMQTTRPCClient(client)
client.on_message = rpc_client.on_mqtt_message
#~ resp = rpc_client.call('Driver', 'main', 'foobar', {'foo':'foo', 'bar':'bar'})
for i in xrange(10):
resp = rpc_client.call('db_logger', 'history', 'get_values', {
'channels': [
[ 'wb-w1', '00-1234566789' ],
[ 'wb-w1', '00' ],
[ 'wb-adc', 'Vin'],
],
'timestamp' : {
'gt': 1434728034
},
'limit' : 60
}, 10)
print "got result!"
pprint.pprint(resp)
time.sleep(5)
if __name__ == "__main__":
main()
| mit |
romkk/p2pool | p2pool/util/math.py | 44 | 6400 | from __future__ import absolute_import, division
import __builtin__
import math
import random
import time
def median(x, use_float=True):
# there exist better algorithms...
y = sorted(x)
if not y:
raise ValueError('empty sequence!')
left = (len(y) - 1)//2
right = len(y)//2
sum = y[left] + y[right]
if use_float:
return sum/2
else:
return sum//2
def mean(x):
total = 0
count = 0
for y in x:
total += y
count += 1
return total/count
def shuffled(x):
x = list(x)
random.shuffle(x)
return x
def shift_left(n, m):
# python: :(
if m >= 0:
return n << m
return n >> -m
def clip(x, (low, high)):
if x < low:
return low
elif x > high:
return high
else:
return x
add_to_range = lambda x, (low, high): (min(low, x), max(high, x))
def nth(i, n=0):
i = iter(i)
for _ in xrange(n):
i.next()
return i.next()
def geometric(p):
if p <= 0 or p > 1:
raise ValueError('p must be in the interval (0.0, 1.0]')
if p == 1:
return 1
return int(math.log1p(-random.random()) / math.log1p(-p)) + 1
def add_dicts_ext(add_func=lambda a, b: a+b, zero=0):
def add_dicts(*dicts):
res = {}
for d in dicts:
for k, v in d.iteritems():
res[k] = add_func(res.get(k, zero), v)
return dict((k, v) for k, v in res.iteritems() if v != zero)
return add_dicts
add_dicts = add_dicts_ext()
mult_dict = lambda c, x: dict((k, c*v) for k, v in x.iteritems())
def format(x):
prefixes = 'kMGTPEZY'
count = 0
while x >= 100000 and count < len(prefixes) - 2:
x = x//1000
count += 1
s = '' if count == 0 else prefixes[count - 1]
return '%i' % (x,) + s
def format_dt(dt):
for value, name in [(60*60*24, 'days'), (60*60, 'hours'), (60, 'minutes'), (1, 'seconds')]:
if dt > value:
break
return '%.01f %s' % (dt/value, name)
perfect_round = lambda x: int(x + random.random())
def erf(x):
# save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def find_root(y_over_dy, start, steps=10, bounds=(None, None)):
guess = start
for i in xrange(steps):
prev, guess = guess, guess - y_over_dy(guess)
if bounds[0] is not None and guess < bounds[0]: guess = bounds[0]
if bounds[1] is not None and guess > bounds[1]: guess = bounds[1]
if guess == prev:
break
return guess
def ierf(z):
return find_root(lambda x: (erf(x) - z)/(2*math.e**(-x**2)/math.sqrt(math.pi)), 0)
def binomial_conf_interval(x, n, conf=0.95):
assert 0 <= x <= n and 0 <= conf < 1
if n == 0:
left = random.random()*(1 - conf)
return left, left + conf
# approximate - Wilson score interval
z = math.sqrt(2)*ierf(conf)
p = x/n
topa = p + z**2/2/n
topb = z * math.sqrt(p*(1-p)/n + z**2/4/n**2)
bottom = 1 + z**2/n
return [clip(x, (0, 1)) for x in add_to_range(x/n, [(topa - topb)/bottom, (topa + topb)/bottom])]
minmax = lambda x: (min(x), max(x))
def format_binomial_conf(x, n, conf=0.95, f=lambda x: x):
if n == 0:
return '???'
left, right = minmax(map(f, binomial_conf_interval(x, n, conf)))
return '~%.1f%% (%.f-%.f%%)' % (100*f(x/n), math.floor(100*left), math.ceil(100*right))
def reversed(x):
try:
return __builtin__.reversed(x)
except TypeError:
return reversed(list(x))
class Object(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def add_tuples(res, *tuples):
for t in tuples:
if len(t) != len(res):
raise ValueError('tuples must all be the same length')
res = tuple(a + b for a, b in zip(res, t))
return res
def flatten_linked_list(x):
while x is not None:
x, cur = x
yield cur
def weighted_choice(choices):
choices = list((item, weight) for item, weight in choices)
target = random.randrange(sum(weight for item, weight in choices))
for item, weight in choices:
if weight > target:
return item
target -= weight
raise AssertionError()
def natural_to_string(n, alphabet=None):
if n < 0:
raise TypeError('n must be a natural')
if alphabet is None:
s = ('%x' % (n,)).lstrip('0')
if len(s) % 2:
s = '0' + s
return s.decode('hex')
else:
assert len(set(alphabet)) == len(alphabet)
res = []
while n:
n, x = divmod(n, len(alphabet))
res.append(alphabet[x])
res.reverse()
return ''.join(res)
def string_to_natural(s, alphabet=None):
if alphabet is None:
assert not s.startswith('\x00')
return int(s.encode('hex'), 16) if s else 0
else:
assert len(set(alphabet)) == len(alphabet)
assert not s.startswith(alphabet[0])
return sum(alphabet.index(char) * len(alphabet)**i for i, char in enumerate(reversed(s)))
class RateMonitor(object):
def __init__(self, max_lookback_time):
self.max_lookback_time = max_lookback_time
self.datums = []
self.first_timestamp = None
def _prune(self):
start_time = time.time() - self.max_lookback_time
for i, (ts, datum) in enumerate(self.datums):
if ts > start_time:
self.datums[:] = self.datums[i:]
return
def get_datums_in_last(self, dt=None):
if dt is None:
dt = self.max_lookback_time
assert dt <= self.max_lookback_time
self._prune()
now = time.time()
return [datum for ts, datum in self.datums if ts > now - dt], min(dt, now - self.first_timestamp) if self.first_timestamp is not None else 0
def add_datum(self, datum):
self._prune()
t = time.time()
if self.first_timestamp is None:
self.first_timestamp = t
else:
self.datums.append((t, datum))
| gpl-3.0 |
praekelt/gem-survey-tool | gems/core/migrations/0017_auto__add_vumichannel.py | 1 | 10064 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'VumiChannel'
db.create_table(u'core_vumichannel', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('key', self.gf('django.db.models.fields.CharField')(max_length=100)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal(u'core', ['VumiChannel'])
def backwards(self, orm):
# Deleting model 'VumiChannel'
db.delete_table(u'core_vumichannel')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2016, 3, 22, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'msisdn': ('django.db.models.fields.CharField', [], {'max_length': '15', 'primary_key': 'True'}),
'vkey': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'})
},
u'core.contactgroup': {
'Meta': {'object_name': 'ContactGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '8000'}),
'group_key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'query_words': ('django.db.models.fields.CharField', [], {'max_length': '8000'})
},
u'core.contactgroupmember': {
'Meta': {'object_name': 'ContactGroupMember'},
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Contact']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ContactGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'synced': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.exporttypemapping': {
'Meta': {'object_name': 'ExportTypeMapping'},
'cast': ('django.db.models.fields.IntegerField', [], {}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'core.incomingsurvey': {
'Meta': {'object_name': 'IncomingSurvey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'raw_message': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'core.rawsurveyresult': {
'Meta': {'object_name': 'RawSurveyResult'},
'answer': (u'django_hstore.fields.DictionaryField', [], {}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Contact']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Survey']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.sentmessage': {
'Meta': {'object_name': 'SentMessage'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {})
},
u'core.setting': {
'Meta': {'object_name': 'Setting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
u'core.survey': {
'Meta': {'object_name': 'Survey'},
'created_on': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'series': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'survey_id': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'})
},
u'core.surveyresult': {
'Meta': {'object_name': 'SurveyResult'},
'answer': (u'django_hstore.fields.DictionaryField', [], {}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Contact']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Survey']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'core.vumichannel': {
'Meta': {'object_name': 'VumiChannel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['core'] | bsd-2-clause |
poiesisconsulting/openerp-restaurant | account_analytic_plans/wizard/account_crossovered_analytic.py | 3 | 2976 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, [], context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, ids, 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tessercat/ddj | controllers/poems.py | 1 | 2187 | import poems
def call():
session.forget()
return _service()
def index():
response.title = 'Daoistic'
if request.args or request.vars:
logger.error('Bad index: %s, %s', request.args, request.vars)
raise HTTP(404)
try:
idx = cache.ram('poems-1', lambda: poems.index(1, db))
pager = cache.ram('poem_pager-1', lambda: poems.pager(db))
pager = TAG.nav(pager, **{'_aria-label': 'Page navigation'})
pager = DIV(pager, _class='ddj-nav')
except:
logger.exception('Bad index: %s', request.args(0))
raise HTTP(404)
return {'index': idx, 'pager': pager}
def chapter():
try:
prow = db(db.poem.chapter==request.args(0)).select().first()
if not prow:
raise HTTP(404)
data = {}
data['poem'] = cache.ram(
'poem-%s' % request.args[0],
lambda: poems.chapter(prow, db, uhdb)
)
response.title = data['poem'][0][0][0]
data['url'] = URL('poems', 'chapter', request.args[0])
data['title'] = data['poem'][0][0][0]
data['description'] = '%s - %s' % (prow.intro_hanzi, prow.intro_en)
data['links'] = cache.ram(
'links-%s' % request.args[0],
lambda: poems.links(prow, db)
)
except:
logger.exception('Bad chapter: %s', request.args(0))
raise HTTP(404)
return data
def page():
try:
low, high = poems.chapter_range(int(request.args(0)))
response.title = 'Daoistic %i-%i' % (low, high)
idx = cache.ram(
'poems-%s' % request.args(0),
lambda: poems.index(int(request.args(0)), db))
pager = cache.ram(
'poem_pager-%s' % request.args(0),
lambda: poems.pager(db))
pager = TAG.nav(pager, **{'_aria-label': 'Page navigation'})
pager = DIV(pager, _class='ddj-nav')
except:
logger.exception('Bad page: %s', request.args(0))
raise HTTP(404)
return {'index': idx, 'pager': pager}
@auth.requires_login()
def manage():
response.title = 'Daoistic Manage'
grid = poems.grid(db, 'edit' in request.args)
return {'grid': grid}
| mit |
marqueedev/django | tests/gis_tests/relatedapp/models.py | 31 | 1809 | from django.contrib.gis.db import models
from django.utils.encoding import python_2_unicode_compatible
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Location(SimpleModel):
point = models.PointField()
def __str__(self):
return self.point.wkt
@python_2_unicode_compatible
class City(SimpleModel):
name = models.CharField(max_length=50)
state = models.CharField(max_length=2)
location = models.ForeignKey(Location)
def __str__(self):
return self.name
class AugmentedLocation(Location):
extra_text = models.TextField(blank=True)
objects = models.GeoManager()
class DirectoryEntry(SimpleModel):
listing_text = models.CharField(max_length=50)
location = models.ForeignKey(AugmentedLocation)
@python_2_unicode_compatible
class Parcel(SimpleModel):
name = models.CharField(max_length=30)
city = models.ForeignKey(City)
center1 = models.PointField()
# Throwing a curveball w/`db_column` here.
center2 = models.PointField(srid=2276, db_column='mycenter')
border1 = models.PolygonField()
border2 = models.PolygonField(srid=2276)
def __str__(self):
return self.name
# These use the GeoManager but do not have any geographic fields.
class Author(SimpleModel):
name = models.CharField(max_length=100)
dob = models.DateField()
class Article(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, unique=True)
class Book(SimpleModel):
title = models.CharField(max_length=100)
author = models.ForeignKey(Author, related_name='books', null=True)
class Event(SimpleModel):
name = models.CharField(max_length=100)
when = models.DateTimeField()
| bsd-3-clause |
vasiliykochergin/euca2ools | euca2ools/commands/cloudformation/updatestack.py | 3 | 2702 | # Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg, MutuallyExclusiveArgList
from euca2ools.commands.cloudformation import CloudFormationRequest
from euca2ools.commands.cloudformation.argtypes import parameter_list
class UpdateStack(CloudFormationRequest):
DESCRIPTION = 'Update a stack with a new template'
ARGS = [Arg('StackName', metavar='STACK',
help='name of the stack to update (required)'),
MutuallyExclusiveArgList(
Arg('--template-file', dest='TemplateBody',
metavar='FILE', type=open,
help='file containing a new JSON template for the stack'),
Arg('--template-url', dest='TemplateURL', metavar='URL',
help='URL pointing to a new JSON template for the stack'))
.required(),
Arg('-p', '--parameter', dest='param_sets', route_to=None,
metavar='KEY=VALUE', type=parameter_list, action='append',
help='''key and value of the parameters to use with the
stack's template, separated by an "=" character''')]
def configure(self):
CloudFormationRequest.configure(self)
stack_params = sum(self.args.get('param_sets') or [], [])
self.params['Parameters.member'] = stack_params
def print_result(self, result):
print result.get('StackId')
| bsd-2-clause |
juanblanco/BlocksCAD | blockly/appengine/storage.py | 186 | 2617 | """Blockly Demo: Storage
Copyright 2012 Google Inc.
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Store and retrieve XML with App Engine.
"""
__author__ = "q.neutron@gmail.com (Quynh Neutron)"
import cgi
from random import randint
from google.appengine.ext import db
from google.appengine.api import memcache
import logging
print "Content-Type: text/plain\n"
def keyGen():
# Generate a random string of length KEY_LEN.
KEY_LEN = 6
CHARS = "abcdefghijkmnopqrstuvwxyz23456789" # Exclude l, 0, 1.
max_index = len(CHARS) - 1
return "".join([CHARS[randint(0, max_index)] for x in range(KEY_LEN)])
class Xml(db.Model):
# A row in the database.
xml_hash = db.IntegerProperty()
xml_content = db.TextProperty()
forms = cgi.FieldStorage()
if "xml" in forms:
# Store XML and return a generated key.
xml_content = forms["xml"].value
xml_hash = hash(xml_content)
lookup_query = db.Query(Xml)
lookup_query.filter("xml_hash =", xml_hash)
lookup_result = lookup_query.get()
if lookup_result:
xml_key = lookup_result.key().name()
else:
trials = 0
result = True
while result:
trials += 1
if trials == 100:
raise Exception("Sorry, the generator failed to get a key for you.")
xml_key = keyGen()
result = db.get(db.Key.from_path("Xml", xml_key))
xml = db.Text(xml_content, encoding="utf_8")
row = Xml(key_name = xml_key, xml_hash = xml_hash, xml_content = xml)
row.put()
print xml_key
if "key" in forms:
# Retrieve stored XML based on the provided key.
key_provided = forms["key"].value
# Normalize the string.
key_provided = key_provided.lower().strip()
# Check memcache for a quick match.
xml = memcache.get("XML_" + key_provided)
if xml is None:
# Check datastore for a definitive match.
result = db.get(db.Key.from_path("Xml", key_provided))
if not result:
xml = ""
else:
xml = result.xml_content
# Save to memcache for next hit.
if not memcache.add("XML_" + key_provided, xml, 3600):
logging.error("Memcache set failed.")
print xml.encode("utf-8")
| gpl-3.0 |
bjamil/MT-HW3 | decoder/models.py | 15 | 2489 | #!/usr/bin/env python
# Simple translation model and language model data structures
import sys
from collections import namedtuple
# A translation model is a dictionary where keys are tuples of French words
# and values are lists of (english, logprob) named tuples. For instance,
# the French phrase "que se est" has two translations, represented like so:
# tm[('que', 'se', 'est')] = [
# phrase(english='what has', logprob=-0.301030009985),
# phrase(english='what has been', logprob=-0.301030009985)]
# k is a pruning parameter: only the top k translations are kept for each f.
phrase = namedtuple("phrase", "english, logprob")
def TM(filename, k):
sys.stderr.write("Reading translation model from %s...\n" % (filename,))
tm = {}
for line in open(filename).readlines():
(f, e, logprob) = line.strip().split(" ||| ")
tm.setdefault(tuple(f.split()), []).append(phrase(e, float(logprob)))
for f in tm: # prune all but top k translations
tm[f].sort(key=lambda x: -x.logprob)
del tm[f][k:]
return tm
# # A language model scores sequences of English words, and must account
# # for both beginning and end of each sequence. Example API usage:
# lm = models.LM(filename)
# sentence = "This is a test ."
# lm_state = lm.begin() # initial state is always <s>
# logprob = 0.0
# for word in sentence.split():
# (lm_state, word_logprob) = lm.score(lm_state, word)
# logprob += word_logprob
# logprob += lm.end(lm_state) # transition to </s>, can also use lm.score(lm_state, "</s>")[1]
ngram_stats = namedtuple("ngram_stats", "logprob, backoff")
class LM:
def __init__(self, filename):
sys.stderr.write("Reading language model from %s...\n" % (filename,))
self.table = {}
for line in open(filename):
entry = line.strip().split("\t")
if len(entry) > 1 and entry[0] != "ngram":
(logprob, ngram, backoff) = (float(entry[0]), tuple(entry[1].split()), float(entry[2] if len(entry)==3 else 0.0))
self.table[ngram] = ngram_stats(logprob, backoff)
def begin(self):
return ("<s>",)
def score(self, state, word):
ngram = state + (word,)
score = 0.0
while len(ngram)> 0:
if ngram in self.table:
return (ngram[-2:], score + self.table[ngram].logprob)
else: #backoff
score += self.table[ngram[:-1]].backoff if len(ngram) > 1 else 0.0
ngram = ngram[1:]
return ((), score + self.table[("<unk>",)].logprob)
def end(self, state):
return self.score(state, "</s>")[1]
| mit |
fduraffourg/servo | tests/wpt/web-platform-tests/mixed-content/generic/expect.py | 95 | 4179 | import json, os, urllib, urlparse
def redirect(url, response):
response.add_required_headers = False
response.writer.write_status(301)
response.writer.write_header("access-control-allow-origin", "*")
response.writer.write_header("location", url)
response.writer.end_headers()
response.writer.write("")
def create_redirect_url(request, swap_scheme = False):
parsed = urlparse.urlsplit(request.url)
destination_netloc = parsed.netloc
scheme = parsed.scheme
if swap_scheme:
scheme = "http" if parsed.scheme == "https" else "https"
hostname = parsed.netloc.split(':')[0]
port = request.server.config["ports"][scheme][0]
destination_netloc = ":".join([hostname, str(port)])
# Remove "redirection" from query to avoid redirect loops.
parsed_query = dict(urlparse.parse_qsl(parsed.query))
assert "redirection" in parsed_query
del parsed_query["redirection"]
destination_url = urlparse.urlunsplit(urlparse.SplitResult(
scheme = scheme,
netloc = destination_netloc,
path = parsed.path,
query = urllib.urlencode(parsed_query),
fragment = None))
return destination_url
def main(request, response):
if "redirection" in request.GET:
redirection = request.GET["redirection"]
if redirection == "no-redirect":
pass
elif redirection == "keep-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=False), response)
return
elif redirection == "swap-scheme-redirect":
redirect(create_redirect_url(request, swap_scheme=True), response)
return
else:
raise ValueError ("Invalid redirect type: %s" % redirection)
content_type = "text/plain"
response_data = ""
if "action" in request.GET:
action = request.GET["action"]
if "content_type" in request.GET:
content_type = request.GET["content_type"]
key = request.GET["key"]
stash = request.server.stash
path = request.GET.get("path", request.url.split('?'))[0]
if action == "put":
value = request.GET["value"]
stash.take(key=key, path=path)
stash.put(key=key, value=value, path=path)
response_data = json.dumps({"status": "success", "result": key})
elif action == "purge":
value = stash.take(key=key, path=path)
if content_type == "image/png":
response_data = open(os.path.join(request.doc_root,
"images",
"smiley.png"), "rb").read()
elif content_type == "audio/mpeg":
response_data = open(os.path.join(request.doc_root,
"media",
"sound_5.oga"), "rb").read()
elif content_type == "video/mp4":
response_data = open(os.path.join(request.doc_root,
"media",
"movie_5.mp4"), "rb").read()
elif content_type == "application/javascript":
response_data = open(os.path.join(request.doc_root,
"mixed-content",
"generic",
"worker.js"), "rb").read()
else:
response_data = "/* purged */"
elif action == "take":
value = stash.take(key=key, path=path)
if value is None:
status = "allowed"
else:
status = "blocked"
response_data = json.dumps({"status": status, "result": value})
response.add_required_headers = False
response.writer.write_status(200)
response.writer.write_header("content-type", content_type)
response.writer.write_header("cache-control", "no-cache; must-revalidate")
response.writer.end_headers()
response.writer.write(response_data)
| mpl-2.0 |
chrxr/wagtail | wagtail/wagtailcore/whitelist.py | 5 | 5347 | """
A generic HTML whitelisting engine, designed to accommodate subclassing to override
specific rules.
"""
from __future__ import absolute_import, unicode_literals
import re
from bs4 import BeautifulSoup, Comment, NavigableString, Tag
ALLOWED_URL_SCHEMES = ['http', 'https', 'ftp', 'mailto', 'tel']
PROTOCOL_RE = re.compile("^[a-z0-9][-+.a-z0-9]*:")
def check_url(url_string):
# Remove control characters and other disallowed characters
# Browsers sometimes ignore these, so that 'jav\tascript:alert("XSS")'
# is treated as a valid javascript: link
unescaped = url_string.lower()
unescaped = unescaped.replace("<", "<")
unescaped = unescaped.replace(">", ">")
unescaped = unescaped.replace("&", "&")
unescaped = re.sub("[`\000-\040\177-\240\s]+", '', unescaped)
unescaped = unescaped.replace("\ufffd", "")
if PROTOCOL_RE.match(unescaped):
protocol = unescaped.split(':', 1)[0]
if protocol not in ALLOWED_URL_SCHEMES:
return None
return url_string
def attribute_rule(allowed_attrs):
"""
Generator for functions that can be used as entries in Whitelister.element_rules.
These functions accept a tag, and modify its attributes by looking each attribute
up in the 'allowed_attrs' dict defined here:
* if the lookup fails, drop the attribute
* if the lookup returns a callable, replace the attribute with the result of calling
it - e.g. {'title': uppercase} will replace 'title' with the result of uppercasing
the title. If the callable returns None, the attribute is dropped
* if the lookup returns a truthy value, keep the attribute; if falsy, drop it
"""
def fn(tag):
for attr, val in list(tag.attrs.items()):
rule = allowed_attrs.get(attr)
if rule:
if callable(rule):
new_val = rule(val)
if new_val is None:
del tag[attr]
else:
tag[attr] = new_val
else:
# rule is not callable, just truthy - keep the attribute
pass
else:
# rule is falsy or absent - remove the attribute
del tag[attr]
return fn
allow_without_attributes = attribute_rule({})
class Whitelister(object):
element_rules = {
'[document]': allow_without_attributes,
'a': attribute_rule({'href': check_url}),
'b': allow_without_attributes,
'br': allow_without_attributes,
'div': allow_without_attributes,
'em': allow_without_attributes,
'h1': allow_without_attributes,
'h2': allow_without_attributes,
'h3': allow_without_attributes,
'h4': allow_without_attributes,
'h5': allow_without_attributes,
'h6': allow_without_attributes,
'hr': allow_without_attributes,
'i': allow_without_attributes,
'img': attribute_rule({'src': check_url, 'width': True, 'height': True,
'alt': True}),
'li': allow_without_attributes,
'ol': allow_without_attributes,
'p': allow_without_attributes,
'strong': allow_without_attributes,
'sub': allow_without_attributes,
'sup': allow_without_attributes,
'ul': allow_without_attributes,
}
@classmethod
def clean(cls, html):
"""Clean up an HTML string to contain just the allowed elements /
attributes"""
doc = BeautifulSoup(html, 'html5lib')
cls.clean_node(doc, doc)
return doc.decode()
@classmethod
def clean_node(cls, doc, node):
"""Clean a BeautifulSoup document in-place"""
if isinstance(node, NavigableString):
cls.clean_string_node(doc, node)
elif isinstance(node, Tag):
cls.clean_tag_node(doc, node)
# This branch is here in case node is a BeautifulSoup object that does
# not inherit from NavigableString or Tag. I can't find any examples
# of such a thing at the moment, so this branch is untested.
else: # pragma: no cover
cls.clean_unknown_node(doc, node)
@classmethod
def clean_string_node(cls, doc, node):
# Remove comments
if isinstance(node, Comment):
node.extract()
return
# by default, nothing needs to be done to whitelist string nodes
pass
@classmethod
def clean_tag_node(cls, doc, tag):
# first, whitelist the contents of this tag
# NB tag.contents will change while this iteration is running, so we need
# to capture the initial state into a static list() and iterate over that
# to avoid losing our place in the sequence.
for child in list(tag.contents):
cls.clean_node(doc, child)
# see if there is a rule in element_rules for this tag type
try:
rule = cls.element_rules[tag.name]
except KeyError:
# don't recognise this tag name, so KILL IT WITH FIRE
tag.unwrap()
return
# apply the rule
rule(tag)
@classmethod
def clean_unknown_node(cls, doc, node):
# don't know what type of object this is, so KILL IT WITH FIRE
node.decompose()
| bsd-3-clause |
40223202/2015cdb_g2 | gear.py | 204 | 19237 | #@+leo-ver=5-thin
#@+node:office.20150407074720.1: * @file gear.py
#@@language python
#@@tabwidth -4
#@+<<declarations>>
#@+node:office.20150407074720.2: ** <<declarations>> (application)
#@@language python
import cherrypy
import os
import sys
# 這個程式要計算正齒輪的齒面寬, 資料庫連結希望使用 pybean 與 SQLite
# 導入 pybean 模組與所要使用的 Store 及 SQLiteWriter 方法
from pybean import Store, SQLiteWriter
import math
# 確定程式檔案所在目錄, 在 Windows 有最後的反斜線
_curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
# 將所在目錄設為系統搜尋目錄
sys.path.append(_curdir)
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# while program is executed in OpenShift
download_root_dir = os.environ['OPENSHIFT_DATA_DIR']
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# while program is executed in localhost
download_root_dir = _curdir + "/local_data/"
data_dir = _curdir + "/local_data/"
# 這是 Gear 設計資料表的定義
'''
lewis.db 中有兩個資料表, steel 與 lewis
CREATE TABLE steel (
serialno INTEGER,
unsno TEXT,
aisino TEXT,
treatment TEXT,
yield_str INTEGER,
tensile_str INTEGER,
stretch_ratio INTEGER,
sectional_shr INTEGER,
brinell INTEGER
);
CREATE TABLE lewis (
serialno INTEGER PRIMARY KEY
NOT NULL,
gearno INTEGER,
type1 NUMERIC,
type4 NUMERIC,
type3 NUMERIC,
type2 NUMERIC
);
'''
#@-<<declarations>>
#@+others
#@+node:office.20150407074720.3: ** class Gear
class Gear(object):
#@+others
#@+node:office.20150407074720.4: *3* __init__
def __init__(self):
# hope to create downloads and images directories
if not os.path.isdir(download_root_dir+"downloads"):
try:
os.makedirs(download_root_dir+"downloads")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"images"):
try:
os.makedirs(download_root_dir+"images")
except:
print("mkdir error")
if not os.path.isdir(download_root_dir+"tmp"):
try:
os.makedirs(download_root_dir+"tmp")
except:
print("mkdir error")
#@+node:office.20150407074720.5: *3* default
@cherrypy.expose
def default(self, attr='default', *args, **kwargs):
raise cherrypy.HTTPRedirect("/")
#@+node:office.20150407074720.6: *3* index
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
# 進行資料庫檔案連結, 並且取出所有資料
try:
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
# 因為程式以 application 所在目錄執行, 因此利用相對目錄連結 lewis.db 資料庫檔案
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
#material = SQLite連結.find_one("steel","serialno = ?",[序號])
# str(SQLite連結.count("steel")) 將傳回 70, 表示資料庫中有 70 筆資料
material = SQLite連結.find("steel")
# 所傳回的 material 為 iterator
'''
outstring = ""
for material_item in material:
outstring += str(material_item.serialno) + ":" + material_item.unsno + "_" + material_item.treatment + "<br />"
return outstring
'''
except:
return "抱歉! 資料庫無法連線<br />"
outstring = '''
<form id=entry method=post action="gear_width">
請填妥下列參數,以完成適當的齒尺寸大小設計。<br />
馬達馬力:<input type=text name=horsepower id=horsepower value=100 size=10>horse power<br />
馬達轉速:<input type=text name=rpm id=rpm value=1120 size=10>rpm<br />
齒輪減速比: <input type=text name=ratio id=ratio value=4 size=10><br />
齒形:<select name=toothtype id=toothtype>
<option value=type1>壓力角20度,a=0.8,b=1.0
<option value=type2>壓力角20度,a=1.0,b=1.25
<option value=type3>壓力角25度,a=1.0,b=1.25
<option value=type4>壓力角25度,a=1.0,b=1.35
</select><br />
安全係數:<input type=text name=safetyfactor id=safetyfactor value=3 size=10><br />
齒輪材質:<select name=material_serialno id=material_serialno>
'''
for material_item in material:
outstring += "<option value=" + str(material_item.serialno) + ">UNS - " + \
material_item.unsno + " - " + material_item.treatment
outstring += "</select><br />"
outstring += "小齒輪齒數:<input type=text name=npinion id=npinion value=18 size=10><br />"
outstring += "<input type=submit id=submit value=進行運算>"
outstring += "</form>"
return outstring
#@+node:office.20150407074720.7: *3* interpolation
@cherrypy.expose
def interpolation(self, small_gear_no=18, gear_type=1):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
# 使用內插法求值
# 找出比目標齒數大的其中的最小的,就是最鄰近的大值
lewis_factor = SQLite連結.find_one("lewis","gearno > ?",[small_gear_no])
if(gear_type == 1):
larger_formfactor = lewis_factor.type1
elif(gear_type == 2):
larger_formfactor = lewis_factor.type2
elif(gear_type == 3):
larger_formfactor = lewis_factor.type3
else:
larger_formfactor = lewis_factor.type4
larger_toothnumber = lewis_factor.gearno
# 找出比目標齒數小的其中的最大的,就是最鄰近的小值
lewis_factor = SQLite連結.find_one("lewis","gearno < ? order by gearno DESC",[small_gear_no])
if(gear_type == 1):
smaller_formfactor = lewis_factor.type1
elif(gear_type == 2):
smaller_formfactor = lewis_factor.type2
elif(gear_type == 3):
smaller_formfactor = lewis_factor.type3
else:
smaller_formfactor = lewis_factor.type4
smaller_toothnumber = lewis_factor.gearno
calculated_factor = larger_formfactor + (small_gear_no - larger_toothnumber) * (larger_formfactor - smaller_formfactor) / (larger_toothnumber - smaller_toothnumber)
# 只傳回小數點後五位數
return str(round(calculated_factor, 5))
#@+node:office.20150407074720.8: *3* gear_width
# 改寫為齒面寬的設計函式
@cherrypy.expose
def gear_width(self, horsepower=100, rpm=1000, ratio=4, toothtype=1, safetyfactor=2, material_serialno=1, npinion=18):
SQLite連結 = Store(SQLiteWriter(_curdir+"/lewis.db", frozen=True))
outstring = ""
# 根據所選用的齒形決定壓力角
if(toothtype == 1 or toothtype == 2):
壓力角 = 20
else:
壓力角 = 25
# 根據壓力角決定最小齒數
if(壓力角== 20):
最小齒數 = 18
else:
最小齒數 = 12
# 直接設最小齒數
if int(npinion) <= 最小齒數:
npinion = 最小齒數
# 大於400的齒數則視為齒條(Rack)
if int(npinion) >= 400:
npinion = 400
# 根據所選用的材料查詢強度值
# 由 material之序號查 steel 表以得材料之降伏強度S單位為 kpsi 因此查得的值要成乘上1000
# 利用 Store 建立資料庫檔案對應物件, 並且設定 frozen=True 表示不要開放動態資料表的建立
#SQLite連結 = Store(SQLiteWriter("lewis.db", frozen=True))
# 指定 steel 資料表
steel = SQLite連結.new("steel")
# 資料查詢
#material = SQLite連結.find_one("steel","unsno=? and treatment=?",[unsno, treatment])
material = SQLite連結.find_one("steel","serialno=?",[material_serialno])
# 列出 steel 資料表中的資料筆數
#print(SQLite連結.count("steel"))
#print (material.yield_str)
strengthstress = material.yield_str*1000
# 由小齒輪的齒數與齒形類別,查詢lewis form factor
# 先查驗是否有直接對應值
on_table = SQLite連結.count("lewis","gearno=?",[npinion])
if on_table == 1:
# 直接進入設計運算
#print("直接運算")
#print(on_table)
lewis_factor = SQLite連結.find_one("lewis","gearno=?",[npinion])
#print(lewis_factor.type1)
# 根據齒形查出 formfactor 值
if(toothtype == 1):
formfactor = lewis_factor.type1
elif(toothtype == 2):
formfactor = lewis_factor.type2
elif(toothtype == 3):
formfactor = lewis_factor.type3
else:
formfactor = lewis_factor.type4
else:
# 沒有直接對應值, 必須進行查表內插運算後, 再執行設計運算
#print("必須內插")
#print(interpolation(npinion, gear_type))
formfactor = self.interpolation(npinion, toothtype)
# 開始進行設計運算
ngear = int(npinion) * int(ratio)
# 重要的最佳化設計---儘量用整數的diametralpitch
# 先嘗試用整數算若 diametralpitch 找到100 仍無所獲則改用 0.25 作為增量再不行則宣告 fail
counter = 0
i = 0.1
facewidth = 0
circularpitch = 0
while (facewidth <= 3 * circularpitch or facewidth >= 5 * circularpitch):
diametralpitch = i
#circularpitch = 3.14159/diametralpitch
circularpitch = math.pi/diametralpitch
pitchdiameter = int(npinion)/diametralpitch
#pitchlinevelocity = 3.14159*pitchdiameter*rpm/12
pitchlinevelocity = math.pi*pitchdiameter * float(rpm)/12
transmittedload = 33000*float(horsepower)/pitchlinevelocity
velocityfactor = 1200/(1200 + pitchlinevelocity)
# formfactor is Lewis form factor
# formfactor need to get from table 13-3 and determined ty teeth number and type of tooth
# formfactor = 0.293
# 90 is the value get from table corresponding to material type
facewidth = transmittedload*diametralpitch*float(safetyfactor)/velocityfactor/formfactor/strengthstress
if(counter>5000):
outstring += "超過5000次的設計運算,仍無法找到答案!<br />"
outstring += "可能所選用的傳遞功率過大,或無足夠強度的材料可以使用!<br />"
# 離開while迴圈
break
i += 0.1
counter += 1
facewidth = round(facewidth, 4)
if(counter<5000):
# 先載入 cube 程式測試
#outstring = self.cube_weblink()
# 再載入 gear 程式測試
outstring = self.gear_weblink()
outstring += "進行"+str(counter)+"次重複運算後,得到合用的facewidth值為:"+str(facewidth)
return outstring
#@+node:office.20150407074720.9: *3* cube_weblink
@cherrypy.expose
def cube_weblink(self):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">
document.writeln ("Error loading Pro/Web.Link header!");
</script>
<script type="text/javascript" language="JavaScript">
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("cube.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("cube.prt"));
var solid = session.GetModel("cube.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("a1");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("a2");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=20.0;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("cube.prt", "v:/tmp", "cube"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
'''
return outstring
#@+node:office.20150407074720.10: *3* gear_weblink
@cherrypy.expose
def gear_weblink(self, facewidth=5, n=18):
outstring = '''<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js">// <![CDATA[
document.writeln ("Error loading Pro/Web.Link header!");
// ]]></script>
<script type="text/javascript" language="JavaScript">// <![CDATA[
if (!pfcIsWindows()) netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("gear.prt", "v:/", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("gear.prt"));
var solid = session.GetModel("gear.prt",pfcCreate("pfcModelType").MDL_PART);
var length,width,myf,myn,i,j,volume,count,d1Value,d2Value;
// 將模型檔中的 length 變數設為 javascript 中的 length 變數
length = solid.GetParam("n");
// 將模型檔中的 width 變數設為 javascript 中的 width 變數
width = solid.GetParam("face_width");
//改變零件尺寸
//myf=20;
//myn=20;
volume=0;
count=0;
try
{
// 以下採用 URL 輸入對應變數
//createParametersFromArguments ();
// 以下則直接利用 javascript 程式改變零件參數
for(i=0;i<=5;i++)
{
//for(j=0;j<=2;j++)
//{
myf=25+i*2;
myn=10.0+i*0.5;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
//d1Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myf);
d1Value = pfcCreate ("MpfcModelItem").CreateIntParamValue(myf);
d2Value = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
length.Value = d1Value;
width.Value = d2Value;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
//volume = volume + properties.Volume;
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
var newfile = document.pwl.pwlMdlSaveAs("gear.prt", "v:/", "mygear_"+count+".prt");
if (!newfile.Status) {
alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
}
//} // 內圈 for 迴圈
} //外圈 for 迴圈
//alert("共執行:"+count+"次,零件總體積:"+volume);
//alert("零件體積:"+properties.Volume);
//alert("零件體積取整數:"+Math.round(properties.Volume));
}
catch(err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
// ]]></script>
'''
return outstring
#@-others
#@-others
root = Gear()
# setup static, images and downloads directories
application_conf = {
'/static':{
'tools.staticdir.on': True,
'tools.staticdir.dir': _curdir+"/static"},
'/images':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/images"},
'/downloads':{
'tools.staticdir.on': True,
'tools.staticdir.dir': data_dir+"/downloads"}
}
# if inOpenshift ('OPENSHIFT_REPO_DIR' exists in environment variables) or not inOpenshift
if __name__ == '__main__':
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# operate in OpenShift
application = cherrypy.Application(root, config = application_conf)
else:
# operate in localhost
cherrypy.quickstart(root, config = application_conf)
#@-leo
| gpl-3.0 |
tswsl1989/Minecraft-Overviewer | contrib/playerInspect.py | 4 | 2936 | #!/usr/bin/env python3
"""
Very basic player.dat inspection script
"""
import os
import sys
import argparse
from pathlib import Path
# incantation to be able to import overviewer_core
if not hasattr(sys, "frozen"):
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], '..')))
from overviewer_core.nbt import load
from overviewer_core import items
def print_player(data, sub_entry=False):
indent = ""
if sub_entry:
indent = "\t"
print("%sPosition:\t%i, %i, %i\t(dim: %i)"
% (indent, data['Pos'][0], data['Pos'][1], data['Pos'][2], data['Dimension']))
try:
print("%sSpawn:\t\t%i, %i, %i"
% (indent, data['SpawnX'], data['SpawnY'], data['SpawnZ']))
except KeyError:
pass
print("%sHealth:\t%i\tLevel:\t\t%i\t\tGameType:\t%i"
% (indent, data['Health'], data['XpLevel'], data['playerGameType']))
print("%sFood:\t%i\tTotal XP:\t%i"
% (indent, data['foodLevel'], data['XpTotal']))
print("%sInventory: %d items" % (indent, len(data['Inventory'])))
if not sub_entry:
for item in data['Inventory']:
print(" %-3d %s" % (item['Count'], items.id2item(item['id'])))
def find_all_player_files(dir_path):
for player_file in dir_path.iterdir():
player = player_file.stem
yield player_file, player
def find_player_file(dir_path, selected_player):
for player_file, player in find_all_player_files(dir_path):
if selected_player == player:
return player_file, player
raise FileNotFoundError()
def load_and_output_player(player_file_path, player, sub_entry=False):
with player_file_path.open('rb') as f:
player_data = load(f)[1]
print("")
print(player)
print_player(player_data, sub_entry=sub_entry)
def dir_or_file(path):
p = Path(path)
if not p.is_file() and not p.is_dir():
raise argparse.ArgumentTypeError("Not a valid file or directory path")
return p
def main(path, selected_player=None):
print("Inspecting %s" % args.path)
if not path.is_dir():
load_and_output_player(args.path)
return
if selected_player is None:
for player_file, player in find_all_player_files(args.path):
load_and_output_player(player_file, player)
return
try:
player_file, player = find_player_file(args.path, args.selected_player)
load_and_output_player(player_file, player, sub_entry=True)
except FileNotFoundError:
print("No %s.dat in %s" % (args.selected_player, args.path))
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('path', metavar='<Player.dat or directory>', type=dir_or_file)
parser.add_argument('selected_player', nargs='?', default=None)
args = parser.parse_args()
main(args.path, selected_player=args.selected_player)
| gpl-3.0 |
oceanobservatories/mi-instrument | mi/instrument/sunburst/sami2_pco2/pco2a/test/test_driver.py | 5 | 57485 | """
@package mi.instrument.sunburst.sami2_pco2.pco2a.test.test_driver
@file marine-integrations/mi/instrument/sunburst/sami2_pco2/pco2a/driver.py
@author Christopher Wingard
@brief Test cases for pco2a driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
__author__ = 'Kevin Stiemke'
__license__ = 'Apache 2.0'
import unittest
import time
import copy
from nose.plugins.attrib import attr
from mock import Mock
from mi.core.log import get_logger
log = get_logger()
# MI imports.
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import DriverStartupConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import ResourceAgentEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentDriver
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import InstrumentCommand
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolState
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import ProtocolEvent
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Capability
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Parameter
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Protocol
from mi.instrument.sunburst.driver import Prompt
from mi.instrument.sunburst.driver import SAMI_NEWLINE
from mi.instrument.sunburst.sami2_pco2.driver import Pco2wSamiSampleDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import Pco2waConfigurationDataParticleKey
from mi.instrument.sunburst.sami2_pco2.pco2a.driver import DataParticleType
# Added Imports (Note, these pick up some of the base classes not directly imported above)
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverTestMixinSub
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverUnitTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverIntegrationTest
from mi.instrument.sunburst.sami2_pco2.test.test_driver import Pco2DriverQualificationTest
###
# Driver parameters for the tests
###
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.sunburst.sami2_pco2.pco2a.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='V7HE4T',
instrument_agent_name='sunburst_sami2_pco2_pco2a',
instrument_agent_packet_config=DataParticleType(),
driver_startup_config={
DriverStartupConfigKey.PARAMETERS: {
Parameter.PUMP_SETTINGS: 0x01,
},
}
)
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###
# Driver constant definitions
###
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class DriverTestMixinSub(Pco2DriverTestMixinSub):
"""
Mixin class used for storing data particle constants and common data
assertion methods.
"""
# Create some short names for the parameter test config
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.ACQUIRE_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.ACQUIRE_BLANK_SAMPLE: {STATES: [ProtocolState.COMMAND]},
Capability.START_AUTOSAMPLE: {STATES: [ProtocolState.COMMAND,
ProtocolState.AUTOSAMPLE]},
Capability.STOP_AUTOSAMPLE: {STATES: [ProtocolState.AUTOSAMPLE,
ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH: {STATES: [ProtocolState.COMMAND]},
Capability.DEIONIZED_WATER_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]},
Capability.REAGENT_FLUSH_100ML: {STATES: [ProtocolState.COMMAND]}
}
###
# Instrument output (driver input) Definitions
###
# Configuration string received from the instrument via the L command
# (clock set to 2014-01-01 00:00:00) with sampling set to start 540 days
# (~18 months) later and stop 365 days after that. SAMI and Device1
# (external SBE pump) are set to run every 60 minutes, but will be polled
# on a regular schedule rather than autosampled. Device1 is not configured
# to run after the SAMI and will run for 10 seconds. To configure the
# instrument using this string, add a null byte (00) to the end of the
# string.
VALID_CONFIG_STRING = 'CEE90B0002C7EA0001E133800A000E100402000E10010B' + \
'000000000D000000000D000000000D07' + \
'1020FF54181C010038' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000000000000000000000000' + \
'000000000000000000000000000000' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + SAMI_NEWLINE
# Data records -- SAMI and Device1 (external pump) (responses to R0 and R1
# commands, respectively)
VALID_R0_BLANK_SAMPLE = '*542705CEE91CC800400019096206800730074C2CE042' + \
'74003B0018096106800732074E0D82066124' + SAMI_NEWLINE
VALID_R0_DATA_SAMPLE = '*542704CEE91CC8003B001909620155073003E908A1232' + \
'D0043001A09620154072F03EA0D92065F3B' + SAMI_NEWLINE
###
# Parameter and Type Definitions
###
_driver_parameters = {
# Parameters defined in the IOS
Parameter.LAUNCH_TIME: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00000000, VALUE: 0xCEE90B00, REQUIRED: True},
Parameter.START_TIME_FROM_LAUNCH: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02C7EA00, VALUE: 0x02C7EA00, REQUIRED: True},
Parameter.STOP_TIME_FROM_START: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01E13380, VALUE: 0x01E13380, REQUIRED: True},
Parameter.MODE_BITS: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0A, VALUE: 0x0A, REQUIRED: True},
Parameter.SAMI_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.SAMI_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x04, VALUE: 0x04, REQUIRED: True},
Parameter.SAMI_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x02, VALUE: 0x02, REQUIRED: True},
Parameter.DEVICE1_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000E10, VALUE: 0x000E10, REQUIRED: True},
Parameter.DEVICE1_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.DEVICE1_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0B, VALUE: 0x0B, REQUIRED: True},
Parameter.DEVICE2_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE2_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE2_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.DEVICE3_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.DEVICE3_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.DEVICE3_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x0D, REQUIRED: True},
Parameter.PRESTART_SAMPLE_INTERVAL: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x000000, VALUE: 0x000000, REQUIRED: True},
Parameter.PRESTART_DRIVER_VERSION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.PRESTART_PARAMS_POINTER: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x0D, VALUE: 0x00, REQUIRED: True},
Parameter.GLOBAL_CONFIGURATION: {TYPE: int, READONLY: True, DA: True, STARTUP: True,
DEFAULT: 0x07, VALUE: 0x07, REQUIRED: True},
Parameter.PUMP_PULSE: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x10, VALUE: 0x10, REQUIRED: True},
Parameter.PUMP_DURATION: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x20, VALUE: 0x20, REQUIRED: True},
Parameter.SAMPLES_PER_MEASUREMENT: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0xFF, VALUE: 0xFF, REQUIRED: True},
Parameter.CYCLES_BETWEEN_BLANKS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x54, VALUE: 0x54, REQUIRED: True},
Parameter.NUMBER_REAGENT_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x18, VALUE: 0x18, REQUIRED: True},
Parameter.NUMBER_BLANK_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x1C, VALUE: 0x1C, REQUIRED: True},
Parameter.FLUSH_PUMP_INTERVAL: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
Parameter.PUMP_SETTINGS: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x00, VALUE: 0x00, REQUIRED: True},
Parameter.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, READONLY: False, DA: True, STARTUP: True,
DEFAULT: 0x38, VALUE: 0x38, REQUIRED: True},
Parameter.AUTO_SAMPLE_INTERVAL: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x38, VALUE: 3600, REQUIRED: True},
Parameter.REAGENT_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.DEIONIZED_WATER_FLUSH_DURATION: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x08, VALUE: 0x08, REQUIRED: True},
Parameter.PUMP_100ML_CYCLES: {TYPE: int, READONLY: False, DA: False, STARTUP: False,
DEFAULT: 0x01, VALUE: 0x01, REQUIRED: True},
}
_sami_data_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 4)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x003B, 0x0019, 0x0962, 0x0155,
0x0730, 0x03E9, 0x08A1, 0x232D,
0x0043, 0x001A, 0x0962, 0x0154,
0x072F, 0x03EA], REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D92, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x065F, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x3B, REQUIRED: True}
}
_sami_blank_sample_parameters = {
# SAMI Type 4/5 sample (in this case it is a Type 5)
Pco2wSamiSampleDataParticleKey.UNIQUE_ID: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_LENGTH: {TYPE: int, VALUE: 0x27, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TYPE: {TYPE: int, VALUE: 0x05, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.RECORD_TIME: {TYPE: int, VALUE: 0xCEE91CC8, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.LIGHT_MEASUREMENTS: {TYPE: list, VALUE: [0x0040, 0x0019, 0x0962, 0x0680, 0x0730,
0x074C, 0x2CE0, 0x4274, 0x003B, 0x0018,
0x0961, 0x0680, 0x0732, 0x074E],
REQUIRED: True},
Pco2wSamiSampleDataParticleKey.VOLTAGE_BATTERY: {TYPE: int, VALUE: 0x0D82, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.THERMISTER_RAW: {TYPE: int, VALUE: 0x0661, REQUIRED: True},
Pco2wSamiSampleDataParticleKey.CHECKSUM: {TYPE: int, VALUE: 0x24, REQUIRED: True}
}
_configuration_parameters = {
# Configuration settings
Pco2waConfigurationDataParticleKey.LAUNCH_TIME: {TYPE: int, VALUE: 0xCEE90B00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.START_TIME_OFFSET: {TYPE: int, VALUE: 0x02C7EA00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.RECORDING_TIME: {TYPE: int, VALUE: 0x01E13380, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PMI_SAMPLE_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMI_SAMPLE_SCHEDULE: {TYPE: int, VALUE: 1, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT1_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 1, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT2_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_FOLLOWS_SAMI_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SLOT3_INDEPENDENT_SCHEDULE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_SAMI: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_SAMI: {TYPE: int, VALUE: 0x04, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_SAMI: {TYPE: int, VALUE: 0x02, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE1: {TYPE: int, VALUE: 0x000E10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE1: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE1: {TYPE: int, VALUE: 0x0B, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE2: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE2: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE2: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_DEVICE3: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_DEVICE3: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_DEVICE3: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.TIMER_INTERVAL_PRESTART: {TYPE: int, VALUE: 0x000000, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DRIVER_ID_PRESTART: {TYPE: int, VALUE: 0x00, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PARAMETER_POINTER_PRESTART: {TYPE: int, VALUE: 0x0D, REQUIRED: True},
Pco2waConfigurationDataParticleKey.USE_BAUD_RATE_57600: {TYPE: int, VALUE: 1, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_RECORD_TYPE: {TYPE: int, VALUE: 1, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SEND_LIVE_RECORDS: {TYPE: int, VALUE: 1, REQUIRED: True},
Pco2waConfigurationDataParticleKey.EXTEND_GLOBAL_CONFIG: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_PULSE: {TYPE: int, VALUE: 0x10, REQUIRED: True},
Pco2waConfigurationDataParticleKey.PUMP_DURATION: {TYPE: int, VALUE: 0x20, REQUIRED: True},
Pco2waConfigurationDataParticleKey.SAMPLES_PER_MEASUREMENT: {TYPE: int, VALUE: 0xFF, REQUIRED: True},
Pco2waConfigurationDataParticleKey.CYCLES_BETWEEN_BLANKS: {TYPE: int, VALUE: 0x54, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_REAGENT_CYCLES: {TYPE: int, VALUE: 0x18, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_BLANK_CYCLES: {TYPE: int, VALUE: 0x1C, REQUIRED: True},
Pco2waConfigurationDataParticleKey.FLUSH_PUMP_INTERVAL: {TYPE: int, VALUE: 0x01, REQUIRED: True},
Pco2waConfigurationDataParticleKey.DISABLE_START_BLANK_FLUSH: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.MEASURE_AFTER_PUMP_PULSE: {TYPE: int, VALUE: 0, REQUIRED: True},
Pco2waConfigurationDataParticleKey.NUMBER_EXTRA_PUMP_CYCLES: {TYPE: int, VALUE: 0x38, REQUIRED: True},
}
###
# Driver Parameter Methods
###
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify
values.
@param current_parameters: driver parameters read from the driver
instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters,
verify_values)
def assert_particle_sami_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particles (Type 4 and 5). Used in INT test where type doesn't matter.
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
def assert_particle_sami_data_sample(self, data_particle, verify_values=False):
"""
Verify sami_data_sample particle (Type 4)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 4, msg="Not a regular sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_data_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE)
self.assert_data_particle_parameters(data_particle,
self._sami_data_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 4
self.assertEquals(record_type, required_record_type)
def assert_particle_sami_blank_sample(self, data_particle, verify_values=False):
"""
Verify sami_blank_sample particle (Type 5)
@param data_particle: Pco2wSamiSampleDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
self.assertEqual(record_type, 5, msg="Not a blank sample, record_type = %d" % record_type)
self.assert_data_particle_keys(Pco2wSamiSampleDataParticleKey,
self._sami_blank_sample_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL)
self.assert_data_particle_parameters(data_particle,
self._sami_blank_sample_parameters,
verify_values)
sample_dict = self.get_data_particle_values_as_dict(data_particle)
record_type = sample_dict.get(Pco2wSamiSampleDataParticleKey.RECORD_TYPE)
required_record_type = 5
self.assertEquals(record_type, required_record_type)
def assert_particle_configuration(self, data_particle, verify_values=False):
"""
Verify configuration particle
@param data_particle: Pco2wConfigurationDataParticle data particle
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(Pco2waConfigurationDataParticleKey,
self._configuration_parameters)
self.assert_data_particle_header(data_particle,
DataParticleType.PCO2W_A_CONFIGURATION)
self.assert_data_particle_parameters(data_particle,
self._configuration_parameters,
verify_values)
###############################################################################
# UNIT TESTS #
# Unit Tests: test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
@attr('UNIT', group='mi')
class DriverUnitTest(Pco2DriverUnitTest, DriverTestMixinSub):
capabilities_test_dict = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.WAITING: ['DRIVER_EVENT_GET'],
ProtocolState.COMMAND: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_START_DIRECT',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_ACQUIRE_SAMPLE',
'PROTOCOL_EVENT_RETURN_TO_UNKNOWN',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH',
'DRIVER_EVENT_REAGENT_FLUSH',
'DRIVER_EVENT_DEIONIZED_WATER_FLUSH_100ML',
'DRIVER_EVENT_REAGENT_FLUSH_100ML'],
ProtocolState.DEIONIZED_WATER_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.REAGENT_FLUSH: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.DEIONIZED_WATER_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.REAGENT_FLUSH_100ML: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_ACQUIRE_SAMPLE',
'DRIVER_EVENT_ACQUIRE_BLANK_SAMPLE',
'DRIVER_EVENT_STOP_AUTOSAMPLE',
'DRIVER_EVENT_ACQUIRE_STATUS'],
ProtocolState.DIRECT_ACCESS: ['EXECUTE_DIRECT',
'DRIVER_EVENT_STOP_DIRECT'],
ProtocolState.POLLED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.POLLED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.SCHEDULED_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
ProtocolState.SCHEDULED_BLANK_SAMPLE: ['PROTOCOL_EVENT_EXECUTE',
'PROTOCOL_EVENT_SUCCESS',
'PROTOCOL_EVENT_TIMEOUT'],
}
def test_base_driver_enums(self):
"""
Verify that all the SAMI Instrument driver enumerations have no
duplicate values that might cause confusion. Also do a little
extra validation for the Capabilities
Extra enumeration tests are done in a specific subclass
"""
# Test Enums defined in the base SAMI driver
self.assert_enum_has_no_duplicates(ProtocolState())
self.assert_enum_has_no_duplicates(ProtocolEvent())
# Test capabilities for duplicates, then verify that capabilities
# is a subset of proto events
self.assert_enum_has_no_duplicates(Capability())
self.assert_enum_complete(Capability(), ProtocolEvent())
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might
cause confusion.
"""
self.assert_enum_has_no_duplicates(DataParticleType())
self.assert_enum_has_no_duplicates(Parameter())
self.assert_enum_has_no_duplicates(InstrumentCommand())
def test_chunker(self):
"""
Test the chunker and verify the particles created.
"""
chunker = StringChunker(Protocol.sieve_function)
for part in [self.VALID_STATUS_MESSAGE, self.VALID_R0_BLANK_SAMPLE,
self.VALID_R0_DATA_SAMPLE, self.VALID_CONFIG_STRING]:
self.assert_chunker_sample(chunker, part)
self.assert_chunker_sample_with_noise(chunker, part)
self.assert_chunker_fragmented_sample(chunker, part)
self.assert_chunker_combined_sample(chunker, part)
@unittest.skip('Fails when run with other unit tests')
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the
correct data particles
"""
# Create and initialize the instrument driver with a mock port agent
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver)
self.assert_raw_particle_published(driver, True)
# Start validating data particles
self.assert_particle_published(driver, self.VALID_STATUS_MESSAGE,
self.assert_particle_regular_status, True)
self.assert_particle_published(driver, self.VALID_R0_BLANK_SAMPLE,
self.assert_particle_sami_blank_sample, True)
self.assert_particle_published(driver, self.VALID_R0_DATA_SAMPLE,
self.assert_particle_sami_data_sample, True)
self.assert_particle_published(driver, self.VALID_CONFIG_STRING,
self.assert_particle_configuration, True)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities. Iterate through available
capabilities, and verify that they can pass successfully through the
filter. Test silly made up capabilities to verify they are blocked by
filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, SAMI_NEWLINE, mock_callback)
driver_capabilities = Capability().list()
test_capabilities = Capability().list()
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in
this dict must also be defined in the protocol FSM. Note, the EXIT and
ENTER DRIVER_EVENTS don't need to be listed here.
"""
# capabilities defined in base class test_driver.
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self.capabilities_test_dict)
@unittest.skip('long running test, avoid for regular unit testing')
def test_pump_commands(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_commands(driver)
@unittest.skip('long running test, avoid for regular unit testing')
def test_pump_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_pump_timing(driver)
@unittest.skip('long running test, avoid for regular unit testing')
def test_waiting_discover(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_waiting_discover(driver)
def test_autosample_timing(self):
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_autosample_timing(driver)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(Pco2DriverIntegrationTest, DriverTestMixinSub):
"""
Integration Tests:
test_startup_params: Verify that driver startup parameters are set properly.
test_set: In command state, test configuration particle generation.
Parameter.PUMP_PULSE
Parameter.PUMP_DURATION
Parameter.SAMPLES_PER_MEASUREMENT
Parameter.CYCLES_BETWEEN_BLANKS
Parameter.NUMBER_REAGENT_CYCLES
Parameter.NUMBER_BLANK_CYCLES
Parameter.FLUSH_PUMP_INTERVAL
Parameter.BIT_SWITCHES
Parameter.NUMBER_EXTRA_PUMP_CYCLES
Parameter.AUTO_SAMPLE_INTERVAL
Negative Set Tests:
START_TIME_FROM_LAUNCH
STOP_TIME_FROM_START
MODE_BITS
SAMI_SAMPLE_INTERVAL
test_commands: In autosample and command states, test particle generation.
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
ACQUIRE_BLANK_SAMPLE = ProtocolEvent.ACQUIRE_BLANK_SAMPLE
test_autosample: Test autosample particle generation.
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
test_flush_pump: Test flush pump commands
"""
# def test_initialize_driver(self):
# self.assert_initialize_driver()
def test_startup_params(self):
startup_values = {
Parameter.PUMP_PULSE: 0x10,
Parameter.PUMP_DURATION: 0x20,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFF,
Parameter.CYCLES_BETWEEN_BLANKS: 0x54,
Parameter.NUMBER_REAGENT_CYCLES: 0x18,
Parameter.NUMBER_BLANK_CYCLES: 0x1C,
Parameter.FLUSH_PUMP_INTERVAL: 0x01,
Parameter.PUMP_SETTINGS: 0x01,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x38,
Parameter.AUTO_SAMPLE_INTERVAL: 3600,
Parameter.REAGENT_FLUSH_DURATION: 0x08,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x08,
Parameter.PUMP_100ML_CYCLES: 1
}
new_values = {
Parameter.PUMP_PULSE: 0x11,
Parameter.PUMP_DURATION: 0x21,
Parameter.SAMPLES_PER_MEASUREMENT: 0xFA,
Parameter.CYCLES_BETWEEN_BLANKS: 0xA9,
Parameter.NUMBER_REAGENT_CYCLES: 0x19,
Parameter.NUMBER_BLANK_CYCLES: 0x1D,
Parameter.FLUSH_PUMP_INTERVAL: 0x02,
Parameter.PUMP_SETTINGS: 0x02,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 0x39,
Parameter.AUTO_SAMPLE_INTERVAL: 600,
Parameter.REAGENT_FLUSH_DURATION: 0x01,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 0x0F,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_initialize_driver()
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
self.assert_set_bulk(new_values)
self.driver_client.cmd_dvr('apply_startup_params')
for (key, val) in startup_values.iteritems():
self.assert_get(key, val)
def test_set(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 77)
self.assert_set(Parameter.CYCLES_BETWEEN_BLANKS, 7)
self.assert_set(Parameter.PUMP_PULSE, 20)
self.assert_set(Parameter.SAMPLES_PER_MEASUREMENT, 239)
self.assert_set(Parameter.NUMBER_REAGENT_CYCLES, 26)
self.assert_set(Parameter.NUMBER_BLANK_CYCLES, 30)
self.assert_set(Parameter.FLUSH_PUMP_INTERVAL, 2)
self.assert_set(Parameter.PUMP_SETTINGS, 1)
self.assert_set(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88)
self.assert_set(Parameter.REAGENT_FLUSH_DURATION, 16)
self.assert_set(Parameter.DEIONIZED_WATER_FLUSH_DURATION, 4)
self.assert_set(Parameter.PUMP_100ML_CYCLES, 14)
self.assert_set_readonly(Parameter.START_TIME_FROM_LAUNCH, 84600)
self.assert_set_readonly(Parameter.STOP_TIME_FROM_START, 84600)
self.assert_set_readonly(Parameter.MODE_BITS, 10)
self.assert_set_readonly(Parameter.SAMI_SAMPLE_INTERVAL, 1800)
def test_bulk_set(self):
self.assert_initialize_driver()
new_values = {
Parameter.AUTO_SAMPLE_INTERVAL: 77,
Parameter.CYCLES_BETWEEN_BLANKS: 7,
Parameter.PUMP_PULSE: 20,
Parameter.SAMPLES_PER_MEASUREMENT: 239,
Parameter.NUMBER_REAGENT_CYCLES: 26,
Parameter.NUMBER_BLANK_CYCLES: 30,
Parameter.FLUSH_PUMP_INTERVAL: 2,
Parameter.PUMP_SETTINGS: 1,
Parameter.NUMBER_EXTRA_PUMP_CYCLES: 88,
Parameter.REAGENT_FLUSH_DURATION: 4,
Parameter.DEIONIZED_WATER_FLUSH_DURATION: 16,
Parameter.PUMP_100ML_CYCLES: 14
}
self.assert_set_bulk(new_values)
def test_bad_parameters(self):
self.assert_initialize_driver()
self.assert_set_exception(Parameter.CYCLES_BETWEEN_BLANKS, value=7.0)
self.assert_set_exception(Parameter.PUMP_PULSE, value=20.0)
self.assert_set_exception(Parameter.SAMPLES_PER_MEASUREMENT, 239.0)
self.assert_set_exception(Parameter.NUMBER_REAGENT_CYCLES, 26.0)
self.assert_set_exception(Parameter.NUMBER_BLANK_CYCLES, 30.0)
self.assert_set_exception(Parameter.FLUSH_PUMP_INTERVAL, 2.0)
self.assert_set_exception(Parameter.PUMP_SETTINGS, 1.0)
self.assert_set_exception(Parameter.NUMBER_EXTRA_PUMP_CYCLES, 88.0)
def test_acquire_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_acquire_blank_sample(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
def test_auto_sample(self):
self.assert_initialize_driver()
self.assert_set(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
self.clear_events()
# Now verify that no more particles get generated
failed = False
try:
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=180)
failed = True
except AssertionError:
pass
self.assertFalse(failed)
# Restart autosample
self.clear_events()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
particle_count=4, timeout=320)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_polled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_SAMPLE, state=ProtocolState.POLLED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
def test_polled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.POLLED_BLANK_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
def test_scheduled_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_scheduled_blank_sample_state(self):
self.assert_initialize_driver()
self.assert_driver_command(ProtocolEvent.START_AUTOSAMPLE, state=ProtocolState.SCHEDULED_SAMPLE, delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE, self.assert_particle_sami_data_sample,
timeout=160)
self.clear_events()
self.assert_driver_command(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, state=ProtocolState.SCHEDULED_BLANK_SAMPLE,
delay=5)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, self.assert_particle_sami_blank_sample,
timeout=160)
self.assert_driver_command(ProtocolEvent.STOP_AUTOSAMPLE, state=ProtocolState.COMMAND, delay=5)
def test_acquire_status(self):
self.assert_initialize_driver()
self.clear_events()
self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS, DataParticleType.PCO2W_A_REGULAR_STATUS,
self.assert_particle_regular_status)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_CONFIGURATION,
self.assert_particle_configuration)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_BATTERY_VOLTAGE,
self.assert_particle_battery_voltage)
self.assert_async_particle_generation(DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE,
self.assert_particle_thermistor_voltage)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(Pco2DriverQualificationTest, DriverTestMixinSub):
@unittest.skip("Runs for several hours to test default autosample rate of 60 minutes")
def test_overnight(self):
"""
Verify autosample at default rate
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.PUMP_SETTINGS, 0x00)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_A_SAMI_SAMPLE,
timeout=14400)
def test_direct_access_telnet_mode(self):
"""
@brief This test manually tests that the Instrument Driver properly
supports direct access to the physical instrument. (telnet mode)
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
configuration_string = 'CF889C9C02C7EA0001E1338002000E10040200000000000000000000000000000000000000000' + \
'71020FFA8181C0100380000000000000000000000000000000000000000000000000000000000' + \
'00000000000000000000000000000000000000000000000000000000000000000000000000000' + \
'0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF' + \
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF'
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
# Erase memory
self.tcp_client.send_data("E5A%s" % SAMI_NEWLINE)
time.sleep(1)
# Load a new configuration string changing X to X
self.tcp_client.send_data("L5A%s" % SAMI_NEWLINE)
time.sleep(1)
self.tcp_client.send_data("%s00%s" % (configuration_string, SAMI_NEWLINE))
time.sleep(1)
# Check that configuration was changed
self.tcp_client.send_data("L%s" % SAMI_NEWLINE)
return_value = self.tcp_client.expect(configuration_string)
self.assertTrue(return_value)
###
# Add instrument specific code here.
###
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_get_parameter(Parameter.CYCLES_BETWEEN_BLANKS, 7)
def test_command_poll(self):
self.assert_enter_command_mode()
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_A_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_A_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH, delay=15, agent_state=ResourceAgentState.COMMAND,
resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_resource_command(ProtocolEvent.REAGENT_FLUSH_100ML, delay=15,
agent_state=ResourceAgentState.COMMAND, resource_state=ProtocolState.COMMAND)
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample_poll(self):
self.assert_enter_command_mode()
self.assert_start_autosample(timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_SAMPLE, self.assert_particle_sami_data_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_BLANK_SAMPLE, self.assert_particle_sami_blank_sample,
DataParticleType.PCO2W_A_SAMI_SAMPLE_CAL, sample_count=1, timeout=200)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_regular_status,
DataParticleType.PCO2W_A_REGULAR_STATUS, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_configuration,
DataParticleType.PCO2W_A_CONFIGURATION, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_battery_voltage,
DataParticleType.PCO2W_A_BATTERY_VOLTAGE, sample_count=1, timeout=10)
self.assert_particle_polled(ProtocolEvent.ACQUIRE_STATUS, self.assert_particle_thermistor_voltage,
DataParticleType.PCO2W_A_THERMISTOR_VOLTAGE, sample_count=1, timeout=10)
self.assert_stop_autosample()
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
def test_autosample(self):
"""
Verify autosample works and data particles are created
"""
self.assert_enter_command_mode()
self.assert_set_parameter(Parameter.AUTO_SAMPLE_INTERVAL, 60)
self.assert_sample_autosample(self.assert_particle_sami_data_sample, DataParticleType.PCO2W_A_SAMI_SAMPLE)
def test_get_capabilities(self):
"""
@brief Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.START_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE,
ProtocolEvent.DEIONIZED_WATER_FLUSH,
ProtocolEvent.REAGENT_FLUSH,
ProtocolEvent.DEIONIZED_WATER_FLUSH_100ML,
ProtocolEvent.REAGENT_FLUSH_100ML
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
da_capabilities = copy.deepcopy(capabilities)
da_capabilities[AgentCapabilityType.AGENT_COMMAND] = [ResourceAgentEvent.GO_COMMAND]
da_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
# Test direct access disconnect
self.assert_direct_access_start_telnet(timeout=10)
self.assertTrue(self.tcp_client)
self.assert_capabilities(da_capabilities)
self.tcp_client.disconnect()
# Now do it again, but use the event to stop DA
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_direct_access_start_telnet(timeout=10)
self.assert_capabilities(da_capabilities)
self.assert_direct_access_stop_telnet()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
st_capabilities = copy.deepcopy(capabilities)
st_capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
st_capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.STOP_AUTOSAMPLE,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.ACQUIRE_SAMPLE,
ProtocolEvent.ACQUIRE_BLANK_SAMPLE
]
self.assert_start_autosample(timeout=200)
self.assert_capabilities(st_capabilities)
self.assert_stop_autosample()
##################
# Command Mode
##################
# We should be back in command mode from DA.
self.assert_state_change(ResourceAgentState.COMMAND, ProtocolState.COMMAND, 60)
self.assert_capabilities(capabilities)
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities) | bsd-2-clause |
sudheesh001/oh-mainline | vendor/packages/Pygments/pygments/styles/__init__.py | 364 | 2117 | # -*- coding: utf-8 -*-
"""
pygments.styles
~~~~~~~~~~~~~~~
Contains built-in styles.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.plugin import find_plugin_styles
from pygments.util import ClassNotFound
#: Maps style names to 'submodule::classname'.
STYLE_MAP = {
'default': 'default::DefaultStyle',
'emacs': 'emacs::EmacsStyle',
'friendly': 'friendly::FriendlyStyle',
'colorful': 'colorful::ColorfulStyle',
'autumn': 'autumn::AutumnStyle',
'murphy': 'murphy::MurphyStyle',
'manni': 'manni::ManniStyle',
'monokai': 'monokai::MonokaiStyle',
'perldoc': 'perldoc::PerldocStyle',
'pastie': 'pastie::PastieStyle',
'borland': 'borland::BorlandStyle',
'trac': 'trac::TracStyle',
'native': 'native::NativeStyle',
'fruity': 'fruity::FruityStyle',
'bw': 'bw::BlackWhiteStyle',
'vim': 'vim::VimStyle',
'vs': 'vs::VisualStudioStyle',
'tango': 'tango::TangoStyle',
'rrt': 'rrt::RrtStyle',
}
def get_style_by_name(name):
if name in STYLE_MAP:
mod, cls = STYLE_MAP[name].split('::')
builtin = "yes"
else:
for found_name, style in find_plugin_styles():
if name == found_name:
return style
# perhaps it got dropped into our styles package
builtin = ""
mod = name
cls = name.title() + "Style"
try:
mod = __import__('pygments.styles.' + mod, None, None, [cls])
except ImportError:
raise ClassNotFound("Could not find style module %r" % mod +
(builtin and ", though it should be builtin") + ".")
try:
return getattr(mod, cls)
except AttributeError:
raise ClassNotFound("Could not find style class %r in style module." % cls)
def get_all_styles():
"""Return an generator for all styles by name,
both builtin and plugin."""
for name in STYLE_MAP:
yield name
for name, _ in find_plugin_styles():
yield name
| agpl-3.0 |
matthewjwolff/LoveLetter | engine/Deck.py | 1 | 1402 | '''
Created on Nov 3, 2016
The set of cards in the deck
@author: mjw
'''
from random import randint
from .Baron import Baron
from .Countess import Countess
from .Prince import Prince
from .Priest import Priest
from .Princess import Princess
from .King import King
from .Guard import Guard
from .Handmaid import Handmaid
import random
class Deck(object):
'''
The Deck contains all the cards in the game. Notably, it instantiates
itself with a random permutation of the standard Love Letter deck and
exposes a way to draw from this random permutation.
'''
unshuffled = [Princess, Countess, King, Prince, Prince,
Handmaid, Handmaid, Baron, Baron, Priest,
Priest, Guard, Guard, Guard, Guard, Guard]
def __init__(self):
'''
Randomly pull constructors out of the unshuffled list. Don't pull the
same one twice. Then create an instance and add to the deck
'''
self.shuffled = []
for clazz in Deck.unshuffled:
self.shuffled.append(clazz())
# now shuffle
random.shuffle(self.shuffled)
def size(self):
return len(self.shuffled)
def getCard(self):
if len(self.shuffled) == 0:
return None
else:
top = self.shuffled[0]
self.shuffled = self.shuffled[1:]
return top | gpl-3.0 |
cloudify-cosmo/cloudify-rest-client | cloudify_rest_client/deployment_updates.py | 1 | 8490 | ########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import json
import urllib
import shutil
import urlparse
import tempfile
from mimetypes import MimeTypes
from requests_toolbelt.multipart.encoder import MultipartEncoder
from cloudify_rest_client import utils
from cloudify_rest_client.responses import ListResponse
class DeploymentUpdate(dict):
def __init__(self, update):
self.update(update)
@property
def id(self):
return self['id']
@property
def state(self):
return self['state']
@property
def deployment_id(self):
return self['deployment_id']
@property
def old_blueprint_id(self):
return self['old_blueprint_id']
@property
def new_blueprint_id(self):
return self['new_blueprint_id']
@property
def old_inputs(self):
return self['old_inputs']
@property
def new_inputs(self):
return self['new_inputs']
@property
def steps(self):
return self['steps']
@property
def execution_id(self):
return self['execution_id']
@property
def created_at(self):
return self['created_at']
class DeploymentUpdatesClient(object):
def __init__(self, api):
self.api = api
def list(self, _include=None, sort=None, is_descending=False, **kwargs):
"""List deployment updates
:param _include: List of fields to include in response.
:param sort: Key for sorting the list.
:param is_descending: True for descending order, False for ascending.
:param kwargs: Optional filter fields. for a list of available fields
see the REST service's models.DeploymentUpdate.fields
"""
uri = '/deployment-updates'
params = kwargs
if sort:
params['_sort'] = '-' + sort if is_descending else sort
response = self.api.get(uri, params=params, _include=_include)
items = [DeploymentUpdate(item) for item in response['items']]
return ListResponse(items, response['metadata'])
def _update_from_blueprint(self,
deployment_id,
blueprint_path,
inputs=None):
"""Create a deployment update transaction for blueprint app.
:param deployment_id: The deployment id
:param blueprint_path: the path of the blueprint to stage
"""
assert deployment_id
tempdir = tempfile.mkdtemp()
try:
tar_path = utils.tar_blueprint(blueprint_path, tempdir)
application_filename = os.path.basename(blueprint_path)
return self._update_from_archive(deployment_id,
tar_path,
application_filename,
inputs=inputs)
finally:
shutil.rmtree(tempdir)
@staticmethod
def _update_from_archive(deployment_id,
archive_path,
application_file_name=None,
inputs=None):
"""Create a deployment update transaction for an archived app.
:param archive_path: the path for the archived app.
:param application_file_name: the main blueprint filename.
:param deployment_id: the deployment id to update.
:return: DeploymentUpdate dict
:rtype: DeploymentUpdate
"""
assert deployment_id
mime_types = MimeTypes()
data_form = {}
params = {}
# all the inputs are passed through the query
if inputs:
inputs_file = tempfile.TemporaryFile()
json.dump(inputs, inputs_file)
inputs_file.seek(0)
data_form['inputs'] = ('inputs', inputs_file, 'text/plain')
if application_file_name:
params['application_file_name'] = \
urllib.quote(application_file_name)
# For a Windows path (e.g. "C:\aaa\bbb.zip") scheme is the
# drive letter and therefore the 2nd condition is present
if all([urlparse.urlparse(archive_path).scheme,
not os.path.exists(archive_path)]):
# archive location is URL
params['blueprint_archive_url'] = archive_path
else:
data_form['blueprint_archive'] = (
os.path.basename(archive_path),
open(archive_path, 'rb'),
# Guess the archive mime type
mime_types.guess_type(urllib.pathname2url(archive_path)))
return data_form, params
def get(self, update_id, _include=None):
"""Get deployment update
:param update_id: The update id
"""
uri = '/deployment-updates/{0}'.format(update_id)
response = self.api.get(uri, _include=_include)
return DeploymentUpdate(response)
def update(self,
deployment_id,
blueprint_or_archive_path,
application_file_name=None,
inputs=None,
skip_install=False,
skip_uninstall=False,
workflow_id=None,
force=False):
# TODO better handle testing for a supported archive. in other commands
# it is done in the cli part (`commands.<command_name>)
if utils.is_supported_archive_type(blueprint_or_archive_path):
data_form, params = \
self._update_from_archive(deployment_id,
blueprint_or_archive_path,
application_file_name,
inputs=inputs)
else:
data_form, params = \
self._update_from_blueprint(deployment_id,
blueprint_or_archive_path,
inputs=inputs)
if workflow_id:
params['workflow_id'] = workflow_id
if skip_install:
params['skip_install'] = skip_install
if skip_uninstall:
params['skip_uninstall'] = skip_uninstall
if force:
params['force'] = force
data_and_headers = {}
if data_form:
data = MultipartEncoder(fields=data_form)
data_and_headers['data'] = data
data_and_headers['headers'] = {'Content-type': data.content_type}
uri = '/deployment-updates/{0}/update/initiate'.format(deployment_id)
response = self.api.post(uri, params=params, **data_and_headers)
return DeploymentUpdate(response)
def update_with_existing_blueprint(self,
deployment_id,
blueprint_id=None,
inputs=None,
skip_install=False,
skip_uninstall=False,
workflow_id=None,
force=False):
data = {
'workflow_id': workflow_id,
'skip_install': skip_install,
'skip_uninstall': skip_uninstall,
'force': force,
'blueprint_id': blueprint_id
}
if inputs:
data['inputs'] = inputs
uri = '/deployment-updates/{0}/update/initiate'.format(deployment_id)
response = self.api.put(uri, data=data)
return DeploymentUpdate(response)
def finalize_commit(self, update_id):
"""Finalize the commiting process
:param update_id:
:return:
"""
assert update_id
uri = '/deployment-updates/{0}/update/finalize'.format(update_id)
response = self.api.post(uri)
return DeploymentUpdate(response)
| apache-2.0 |
IsCoolEntertainment/debpkg_python-virtualenv | virtualenv_embedded/site.py | 66 | 27552 | """Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix, it starts with sys.prefix and sys.exec_prefix (if different) and
appends lib/python<version>/site-packages as well as lib/site-python.
It also supports the Debian convention of
lib/python<version>/dist-packages. On other platforms (mainly Mac and
Windows), it uses just sys.prefix (and sys.exec_prefix, if different,
but this is unlikely). The resulting directories, if they exist, are
appended to sys.path, and also inspected for path configuration files.
FOR DEBIAN, this sys.path is augmented with directories in /usr/local.
Local addons go into /usr/local/lib/python<version>/site-packages
(resp. /usr/local/lib/site-python), Debian addons install into
/usr/{lib,share}/python<version>/dist-packages.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.X/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.X/site-packages/bar
/usr/local/lib/python2.X/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
try:
import __builtin__ as builtins
except ImportError:
import builtins
try:
set
except NameError:
from sets import Set as set
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
USER_SITE = None
USER_BASE = None
_is_64bit = (getattr(sys, 'maxsize', None) or getattr(sys, 'maxint')) > 2**32
_is_pypy = hasattr(sys, 'pypy_version_info')
_is_jython = sys.platform[:4] == 'java'
if _is_jython:
ModuleType = type(os)
def makepath(*paths):
dir = os.path.join(*paths)
if _is_jython and (dir == '__classpath__' or
dir.startswith('__pyclasspath__')):
return dir, dir
dir = os.path.abspath(dir)
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if ((_is_jython and not isinstance(m, ModuleType)) or
hasattr(m, '__loader__')):
# only modules need the abspath in Jython. and don't mess
# with a PEP 302-supplied __file__
continue
f = getattr(m, '__file__', None)
if f is None:
continue
m.__file__ = os.path.abspath(f)
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from distutils.util import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path[-1]), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
names.sort()
for name in names:
if name.endswith(os.extsep + "pth"):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if hasattr(sys, 'flags') and getattr(sys.flags, 'no_user_site', False):
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
USER_BASE is the root directory for all Python versions
USER_SITE is the user specific site-packages directory
USER_SITE/.. can be used for data.
"""
global USER_BASE, USER_SITE, ENABLE_USER_SITE
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
#if sys.platform in ('os2emx', 'riscos'):
# # Don't know what to put here
# USER_BASE = ''
# USER_SITE = ''
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser(base, "Python")
USER_SITE = os.path.join(USER_BASE,
"Python" + sys.version[0] + sys.version[2],
"site-packages")
else:
if env_base:
USER_BASE = env_base
else:
USER_BASE = joinuser("~", ".local")
USER_SITE = os.path.join(USER_BASE, "lib",
"python" + sys.version[:3],
"site-packages")
if ENABLE_USER_SITE and os.path.isdir(USER_SITE):
addsitedir(USER_SITE, known_paths)
if ENABLE_USER_SITE:
for dist_libdir in ("lib", "local/lib"):
user_site = os.path.join(USER_BASE, dist_libdir,
"python" + sys.version[:3],
"dist-packages")
if os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = open(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print(self.__lines[i])
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
try:
key = raw_input(prompt)
except NameError:
key = input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
builtins.copyright = _Printer("copyright", sys.copyright)
if _is_jython:
builtins.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
elif _is_pypy:
builtins.credits = _Printer(
"credits",
"PyPy is maintained by the PyPy developers: http://codespeak.net/pypy")
else:
builtins.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the built-in 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
builtins.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
def virtual_install_main_packages():
f = open(os.path.join(os.path.dirname(__file__), 'orig-prefix.txt'))
sys.real_prefix = f.read().strip()
f.close()
pos = 2
hardcoded_relative_dirs = []
if sys.path[0] == '':
pos += 1
if _is_jython:
paths = [os.path.join(sys.real_prefix, 'Lib')]
elif _is_pypy:
if sys.version_info > (3, 2):
cpyver = '%d' % sys.version_info[0]
elif sys.pypy_version_info >= (1, 5):
cpyver = '%d.%d' % sys.version_info[:2]
else:
cpyver = '%d.%d.%d' % sys.version_info[:3]
paths = [os.path.join(sys.real_prefix, 'lib_pypy'),
os.path.join(sys.real_prefix, 'lib-python', cpyver)]
if sys.pypy_version_info < (1, 9):
paths.insert(1, os.path.join(sys.real_prefix,
'lib-python', 'modified-%s' % cpyver))
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
#
# This is hardcoded in the Python executable, but relative to sys.prefix:
for path in paths[:]:
plat_path = os.path.join(path, 'plat-%s' % sys.platform)
if os.path.exists(plat_path):
paths.append(plat_path)
elif sys.platform == 'win32':
paths = [os.path.join(sys.real_prefix, 'Lib'), os.path.join(sys.real_prefix, 'DLLs')]
else:
paths = [os.path.join(sys.real_prefix, 'lib', 'python'+sys.version[:3])]
hardcoded_relative_dirs = paths[:] # for the special 'darwin' case below
lib64_path = os.path.join(sys.real_prefix, 'lib64', 'python'+sys.version[:3])
if os.path.exists(lib64_path):
if _is_64bit:
paths.insert(0, lib64_path)
else:
paths.append(lib64_path)
# This is hardcoded in the Python executable, but relative to
# sys.prefix. Debian change: we need to add the multiarch triplet
# here, which is where the real stuff lives. As per PEP 421, in
# Python 3.3+, this lives in sys.implementation, while in Python 2.7
# it lives in sys.
try:
arch = getattr(sys, 'implementation', sys)._multiarch
except AttributeError:
# This is a non-multiarch aware Python. Fallback to the old way.
arch = sys.platform
plat_path = os.path.join(sys.real_prefix, 'lib',
'python'+sys.version[:3],
'plat-%s' % arch)
if os.path.exists(plat_path):
paths.append(plat_path)
# This is hardcoded in the Python executable, but
# relative to sys.prefix, so we have to fix up:
for path in list(paths):
tk_dir = os.path.join(path, 'lib-tk')
if os.path.exists(tk_dir):
paths.append(tk_dir)
# These are hardcoded in the Apple's Python executable,
# but relative to sys.prefix, so we have to fix them up:
if sys.platform == 'darwin':
hardcoded_paths = [os.path.join(relative_dir, module)
for relative_dir in hardcoded_relative_dirs
for module in ('plat-darwin', 'plat-mac', 'plat-mac/lib-scriptpackages')]
for path in hardcoded_paths:
if os.path.exists(path):
paths.append(path)
sys.path.extend(paths)
def force_global_eggs_after_local_site_packages():
"""
Force easy_installed eggs in the global environment to get placed
in sys.path after all packages inside the virtualenv. This
maintains the "least surprise" result that packages in the
virtualenv always mask global packages, never the other way
around.
"""
egginsert = getattr(sys, '__egginsert', 0)
for i, path in enumerate(sys.path):
if i > egginsert and path.startswith(sys.prefix):
egginsert = i
sys.__egginsert = egginsert + 1
def virtual_addsitepackages(known_paths):
force_global_eggs_after_local_site_packages()
return addsitepackages(known_paths, sys_prefix=sys.real_prefix)
def fixclasspath():
"""Adjust the special classpath sys.path entries for Jython. These
entries should follow the base virtualenv lib directories.
"""
paths = []
classpaths = []
for path in sys.path:
if path == '__classpath__' or path.startswith('__pyclasspath__'):
classpaths.append(path)
else:
paths.append(path)
sys.path = paths
sys.path.extend(classpaths)
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
def main():
global ENABLE_USER_SITE
virtual_install_main_packages()
abs__file__()
paths_in_sys = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if _is_jython:
fixclasspath()
GLOBAL_SITE_PACKAGES = not os.path.exists(os.path.join(os.path.dirname(__file__), 'no-global-site-packages.txt'))
if not GLOBAL_SITE_PACKAGES:
ENABLE_USER_SITE = False
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
paths_in_sys = addsitepackages(paths_in_sys)
paths_in_sys = addusersitepackages(paths_in_sys)
if GLOBAL_SITE_PACKAGES:
paths_in_sys = virtual_addsitepackages(paths_in_sys)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
def exists(path):
if os.path.isdir(path):
return "exists"
else:
return "doesn't exist"
print("USER_BASE: %r (%s)" % (USER_BASE, exists(USER_BASE)))
print("USER_SITE: %r (%s)" % (USER_SITE, exists(USER_BASE)))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| mit |
SohKai/ChronoLogger | web/flask/lib/python2.7/site-packages/jinja2/testsuite/utils.py | 415 | 2235 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.utils
~~~~~~~~~~~~~~~~~~~~~~
Tests utilities jinja uses.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import gc
import unittest
import pickle
from jinja2.testsuite import JinjaTestCase
from jinja2.utils import LRUCache, escape, object_type_repr
class LRUCacheTestCase(JinjaTestCase):
def test_simple(self):
d = LRUCache(3)
d["a"] = 1
d["b"] = 2
d["c"] = 3
d["a"]
d["d"] = 4
assert len(d) == 3
assert 'a' in d and 'c' in d and 'd' in d and 'b' not in d
def test_pickleable(self):
cache = LRUCache(2)
cache["foo"] = 42
cache["bar"] = 23
cache["foo"]
for protocol in range(3):
copy = pickle.loads(pickle.dumps(cache, protocol))
assert copy.capacity == cache.capacity
assert copy._mapping == cache._mapping
assert copy._queue == cache._queue
class HelpersTestCase(JinjaTestCase):
def test_object_type_repr(self):
class X(object):
pass
self.assert_equal(object_type_repr(42), 'int object')
self.assert_equal(object_type_repr([]), 'list object')
self.assert_equal(object_type_repr(X()),
'jinja2.testsuite.utils.X object')
self.assert_equal(object_type_repr(None), 'None')
self.assert_equal(object_type_repr(Ellipsis), 'Ellipsis')
class MarkupLeakTestCase(JinjaTestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LRUCacheTestCase))
suite.addTest(unittest.makeSuite(HelpersTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
| mit |
sudkannan/xen-hv | dist/install/usr/lib64/python2.6/site-packages/xen/xend/tests/test_XendConfig.py | 49 | 2017 | import unittest
import xen.xend.XendConfig as XendConfig
class test_XendConfig(unittest.TestCase):
def testParseFromSXP(self):
cfg = XendConfig.XendConfig(
sxp_obj = (
['vm',
['bootloader_args', '-q --default_args="root=/dev/sda1 ro" --extra_args="quiet" /images/VM1.sda'],
['bootloader', '/usr/bin/pygrub'],
['device', ['vif', ['mac', '00:16:3E:4C:D1:00'], ['script', 'vif-bridge'], ['bridge', 'xenbr0']]],
['device', ['vif', ['mac', '00:16:3E:48:56:26'], ['script', 'vif-bridge'], ['bridge', 'vbridge0']]],
['device', ['vbd', ['uname', 'phy:/images/VM1.sda'], ['dev', 'sda'], ['mode', 'w']]],
['device', ['vbd', ['uname', 'phy:/images/VM1.sdb'], ['dev', 'sdb'], ['mode', 'w']]],
['memory', '256'], ['name', 'VM1'], ['on_crash', 'restart'],
['uuid', '10927a76-fe27-49b2-8f57-2970b7bbed6c'], ['vcpus', '1']
]))
self.assertEqual(cfg['uuid'], '10927a76-fe27-49b2-8f57-2970b7bbed6c')
self.assertEqual(cfg['name_label'], 'VM1')
self.assertEqual(cfg['memory_static_max'], 256)
ordered_refs = cfg.ordered_device_refs()
self.assertEqual(cfg['devices'][ordered_refs[0]][0], 'vbd')
self.assertEqual(cfg['devices'][ordered_refs[1]][0], 'vbd')
self.assertEqual(cfg['devices'][ordered_refs[2]][0], 'vif')
self.assertEqual(cfg['devices'][ordered_refs[3]][0], 'vif')
self.assertEqual(cfg['devices'][ordered_refs[0]][1]['uname'],
'phy:/images/VM1.sda')
self.assertEqual(cfg['devices'][ordered_refs[1]][1]['uname'],
'phy:/images/VM1.sdb')
self.assertEqual(cfg['devices'][ordered_refs[2]][1]['mac'],
'00:16:3E:4C:D1:00')
self.assertEqual(cfg['devices'][ordered_refs[3]][1]['mac'],
'00:16:3E:48:56:26')
def test_suite():
return unittest.makeSuite(test_XendConfig)
| gpl-2.0 |
AMObox/teammaniac | script.module.requests/lib/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| gpl-2.0 |
yamila-moreno/django | tests/template_backends/test_dummy.py | 306 | 3603 | # coding: utf-8
from __future__ import unicode_literals
from django.forms import CharField, Form, Media
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
super(TemplateStringsTests, cls).setUpClass()
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_non_existing(self):
with self.assertRaises(TemplateDoesNotExist) as e:
self.engine.get_template('template_backends/non_existing.html')
self.assertEqual(e.exception.backend, self.engine)
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_django_html_escaping(self):
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
class TestForm(Form):
test_field = CharField()
media = Media(js=['my-script.js'])
form = TestForm()
template = self.engine.get_template('template_backends/django_escaping.html')
content = template.render({'media': media, 'test_form': form})
expected = '{}\n\n{}\n\n{}'.format(media, form, form['test_field'])
self.assertHTMLEqual(content, expected)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware().process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = (
'<input type="hidden" name="csrfmiddlewaretoken" '
'value="{}" />'.format(get_token(request)))
self.assertHTMLEqual(content, expected)
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
| bsd-3-clause |
ar7z1/ansible | lib/ansible/module_utils/facts/hardware/hpux.py | 159 | 8351 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from ansible.module_utils.facts.hardware.base import Hardware, HardwareCollector
class HPUXHardware(Hardware):
"""
HP-UX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def populate(self, collected_facts=None):
hardware_facts = {}
cpu_facts = self.get_cpu_facts(collected_facts=collected_facts)
memory_facts = self.get_memory_facts()
hw_facts = self.get_hw_facts()
hardware_facts.update(cpu_facts)
hardware_facts.update(memory_facts)
hardware_facts.update(hw_facts)
return hardware_facts
def get_cpu_facts(self, collected_facts=None):
cpu_facts = {}
collected_facts = collected_facts or {}
if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip())
# Working with machinfo mess
elif collected_facts.get('ansible_architecture') == 'ia64':
if collected_facts.get('ansible_distribution_version') == "B.11.23":
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
if out:
cpu_facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
if out:
cpu_facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = self.module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
cpu_facts['processor_cores'] = int(out.strip())
if collected_facts.get('ansible_distribution_version') == "B.11.31":
# if machinfo return cores strings release B.11.31 > 1204
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip() == '0':
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
# If hyperthreading is active divide cores by 2
rc, out, err = self.module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +', ' ', out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
cpu_facts['processor_cores'] = int(data[0]) / 2
else:
if len(data) == 1:
cpu_facts['processor_cores'] = cpu_facts['processor_count']
else:
cpu_facts['processor_cores'] = int(data[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
cpu_facts['processor'] = out.strip()
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
cpu_facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
cpu_facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
cpu_facts['processor'] = out.strip()
return cpu_facts
def get_memory_facts(self, collected_facts=None):
memory_facts = {}
collected_facts = collected_facts or {}
pagesize = 4096
rc, out, err = self.module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +', ' ', out).split(' ')[5].strip())
memory_facts['memfree_mb'] = pagesize * data // 1024 // 1024
if collected_facts.get('ansible_architecture') in ['9000/800', '9000/785']:
try:
rc, out, err = self.module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*', out).groups()[0].strip()
memory_facts['memtotal_mb'] = int(data) // 1024
except AttributeError:
# For systems where memory details aren't sent to syslog or the log has rotated, use parsed
# adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
use_unsafe_shell=True)
if not err:
data = out
memory_facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search(r'Memory[\ :=]*([0-9]*).*MB.*', out).groups()[0].strip()
memory_facts['memtotal_mb'] = int(data)
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f -q")
memory_facts['swaptotal_mb'] = int(out.strip())
rc, out, err = self.module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().splitlines():
swap += int(re.sub(' +', ' ', line).split(' ')[3].strip())
memory_facts['swapfree_mb'] = swap
return memory_facts
def get_hw_facts(self, collected_facts=None):
hw_facts = {}
collected_facts = collected_facts or {}
rc, out, err = self.module.run_command("model")
hw_facts['model'] = out.strip()
if collected_facts.get('ansible_architecture') == 'ia64':
separator = ':'
if collected_facts.get('ansible_distribution_version') == "B.11.23":
separator = '='
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
hw_facts['firmware_version'] = out.split(separator)[1].strip()
rc, out, err = self.module.run_command("/usr/contrib/bin/machinfo |grep -i 'Machine serial number' ", use_unsafe_shell=True)
if rc == 0 and out:
hw_facts['product_serial'] = out.split(separator)[1].strip()
return hw_facts
class HPUXHardwareCollector(HardwareCollector):
_fact_class = HPUXHardware
_platform = 'HP-UX'
required_facts = set(['platform', 'distribution'])
| gpl-3.0 |
rversteegen/commandergenius | project/jni/python/src/Lib/encodings/zlib_codec.py | 533 | 3015 | """ Python 'zlib_codec' Codec - zlib compression encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com).
"""
import codecs
import zlib # this codec needs the optional zlib module !
### Codec APIs
def zlib_encode(input,errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.compress(input)
return (output, len(input))
def zlib_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
output = zlib.decompress(input)
return (output, len(input))
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return zlib_encode(input, errors)
def decode(self, input, errors='strict'):
return zlib_decode(input, errors)
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.compressobj = zlib.compressobj()
def encode(self, input, final=False):
if final:
c = self.compressobj.compress(input)
return c + self.compressobj.flush()
else:
return self.compressobj.compress(input)
def reset(self):
self.compressobj = zlib.compressobj()
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict'):
assert errors == 'strict'
self.errors = errors
self.decompressobj = zlib.decompressobj()
def decode(self, input, final=False):
if final:
c = self.decompressobj.decompress(input)
return c + self.decompressobj.flush()
else:
return self.decompressobj.decompress(input)
def reset(self):
self.decompressobj = zlib.decompressobj()
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='zlib',
encode=zlib_encode,
decode=zlib_decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-2.1 |
hdinsight/hue | desktop/core/ext-py/Django-1.6.10/django/db/models/fields/related.py | 47 | 72699 | from operator import attrgetter
from django.db import connection, connections, router
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject, PathInfo
from django.db.models.query import QuerySet
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_text
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.translation import ugettext_lazy as _
from django.utils.functional import curry, cached_property
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
if isinstance(relation, six.string_types):
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
else:
# it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(Field):
def db_type(self, connection):
'''By default related field will not have a column
as it relates columns to another table'''
return None
def contribute_to_class(self, cls, name, virtual_only=False):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name, virtual_only=virtual_only)
if not cls._meta.abstract and self.rel.related_name:
related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower()
}
self.rel.related_name = related_name
other = self.rel.to
if isinstance(other, six.string_types) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.model_name + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.set_field_name()
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_query_name or self.rel.related_name or self.opts.model_name
class RenameRelatedObjectDescriptorMethods(RenameMethodsBase):
renamed_methods = (
('get_query_set', 'get_queryset', PendingDeprecationWarning),
('get_prefetch_query_set', 'get_prefetch_queryset', PendingDeprecationWarning),
)
class SingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.related.model, **db_hints)
return self.related.model._base_manager.using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
query = {'%s__in' % self.related.field.name: instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
params = {}
for lh_field, rh_field in self.related.field.related_fields:
params['%s__%s' % (self.related.field.name, rh_field.name)] = getattr(instance, rh_field.attname)
try:
rel_obj = self.get_queryset(instance=instance).get(**params)
except self.related.model.DoesNotExist:
rel_obj = None
else:
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.related.model.DoesNotExist("%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()))
else:
return rel_obj
def __set__(self, instance, value):
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple([getattr(instance, field.attname) for field in self.related.field.foreign_related_fields])
if None in related_pk:
raise ValueError('Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, instance._meta.object_name))
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(six.with_metaclass(RenameRelatedObjectDescriptorMethods)):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **db_hints):
db = router.db_for_read(self.field.rel.to, **db_hints)
rel_mgr = self.field.rel.to._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if getattr(rel_mgr, 'use_for_related_fields', False):
return rel_mgr.using(db)
else:
return QuerySet(self.field.rel.to).using(db)
def get_prefetch_queryset(self, instances):
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.rel.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
qs = self.get_queryset(instance=instances[0]).filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.rel.multiple:
rel_obj_cache_name = self.field.related.get_cache_name()
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return qs, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
params = dict(
(rh_field.attname, getattr(instance, lh_field.attname))
for lh_field, rh_field in self.field.related_fields)
qs = self.get_queryset(instance=instance)
extra_filter = self.field.get_extra_descriptor_filter(instance)
if isinstance(extra_filter, dict):
params.update(extra_filter)
qs = qs.filter(**params)
else:
qs = qs.filter(extra_filter, **params)
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
if not self.field.rel.multiple:
setattr(rel_obj, self.field.related.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.field.rel.to.DoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name))
else:
return rel_obj
def __set__(self, instance, value):
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.related.get_cache_name(), None)
# Set the value of the related field
for lh_field, rh_field in self.field.related_fields:
try:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
except AttributeError:
setattr(instance, lh_field.attname, None)
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
if value is not None and not self.field.rel.multiple:
setattr(value, self.field.related.get_cache_name(), instance)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's default
# manager.
superclass = self.related.model._default_manager.__class__
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.core_filters= {'%s__exact' % rel_field.name: instance}
self.model = rel_model
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[rel_field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
qs = super(RelatedManager, self).get_queryset().using(db).filter(**self.core_filters)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
for field in rel_field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {rel_field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances):
rel_obj_attr = rel_field.get_local_related_value
instance_attr = rel_field.get_foreign_related_value
instances_dict = dict((instance_attr(inst), inst) for inst in instances)
db = self._db or router.db_for_read(self.model, instance=instances[0])
query = {'%s__in' % rel_field.name: instances}
qs = super(RelatedManager, self).get_queryset().using(db).filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in qs:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_field.name, instance)
cache_name = rel_field.related_query_name()
return qs, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
setattr(obj, rel_field.name, self.instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs[rel_field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = rel_field.get_foreign_related_value(self.instance)
for obj in objs:
# Is obj actually part of this descriptor set?
if rel_field.get_local_related_value(obj) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, self.instance))
remove.alters_data = True
def clear(self):
self.update(**{rel_field.name: None})
clear.alters_data = True
return RelatedManager
def create_many_related_manager(superclass, rel):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
class ManyRelatedManager(superclass):
def __init__(self, model=None, query_field_name=None, instance=None, symmetrical=None,
source_field_name=None, target_field_name=None, reverse=False,
through=None, prefetch_cache_name=None):
super(ManyRelatedManager, self).__init__()
self.model = model
self.query_field_name = query_field_name
source_field = through._meta.get_field(source_field_name)
source_related_fields = source_field.related_fields
self.core_filters = {}
for lh_field, rh_field in source_related_fields:
self.core_filters['%s__%s' % (query_field_name, rh_field.name)] = getattr(instance, rh_field.attname)
self.instance = instance
self.symmetrical = symmetrical
self.source_field = source_field
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.reverse = reverse
self.through = through
self.prefetch_cache_name = prefetch_cache_name
self.related_val = source_field.get_foreign_related_value(instance)
# Used for single column related auto created models
self._fk_val = self.related_val[0]
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def _get_fk_val(self, obj, field_name):
"""
Returns the correct value for this relationship's foreign key. This
might be something else than pk value when to_field is used.
"""
fk = self.through._meta.get_field(field_name)
if fk.rel.field_name and fk.rel.field_name != fk.rel.to._meta.pk.attname:
attname = fk.rel.get_related_field().get_attname()
return fk.get_prep_lookup('exact', getattr(obj, attname))
else:
return obj.pk
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances):
instance = instances[0]
db = self._db or router.db_for_read(instance.__class__, instance=instance)
query = {'%s__in' % self.query_field_name: instances}
qs = super(ManyRelatedManager, self).get_queryset().using(db)._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
qs = qs.extra(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column))) for f in fk.local_related_fields))
return (qs,
lambda result: tuple([getattr(result, '_prefetch_related_val_%s' % f.attname) for f in fk.local_related_fields]),
lambda inst: tuple([getattr(inst, f.attname) for f in fk.foreign_related_fields]),
False,
self.prefetch_cache_name)
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
fk_val = self._get_fk_val(obj, target_field_name)
if fk_val is None:
raise ValueError('Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name))
new_ids.add(self._get_fk_val(obj, target_field_name))
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected, got %r" % (self.model._meta.object_name, obj))
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self._fk_val,
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(self._get_fk_val(obj, target_field_name))
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._fk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_field_name: the PK colname in join table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self.related_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related
# model's default manager.
return create_many_related_manager(
self.related.model._default_manager.__class__,
self.related.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
rel_model = self.related.model
manager = self.related_manager_cls(
model=rel_model,
query_field_name=self.related.field.name,
prefetch_cache_name=self.related.field.related_query_name(),
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True,
through=self.related.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
@cached_property
def related_manager_cls(self):
# Dynamically create a class that subclasses the related model's
# default manager.
return create_many_related_manager(
self.field.rel.to._default_manager.__class__,
self.field.rel
)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
manager = self.related_manager_cls(
model=self.field.rel.to,
query_field_name=self.field.related_query_name(),
prefetch_cache_name=self.field.name,
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False,
through=self.field.rel.through,
)
return manager
def __set__(self, instance, value):
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
# clear() can change expected output of 'value' queryset, we force evaluation
# of queryset before clear; ticket #19816
value = tuple(value)
manager.clear()
manager.add(*value)
class ForeignObjectRel(object):
def __init__(self, field, to, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.field = field
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
self.limit_choices_to = {} if limit_choices_to is None else limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_joining_columns(self):
return self.field.get_reverse_joining_columns()
def get_extra_restriction(self, where_class, alias, related_alias):
return self.field.get_extra_restriction(where_class, related_alias, alias)
def set_field_name(self):
"""
Sets the related field's name, this is not available until later stages
of app loading, so set_field_name is called from
set_attributes_from_rel()
"""
# By default foreign object doesn't relate to any remote field (for
# example custom multicolumn joins currently have no remote field).
self.field_name = None
class ManyToOneRel(ForeignObjectRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(ManyToOneRel, self).__init__(
field, to, related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name)
self.field_name = field_name
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
def set_field_name(self):
self.field_name = self.field_name or self.to._meta.pk.name
class OneToOneRel(ManyToOneRel):
def __init__(self, field, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None, related_query_name=None):
super(OneToOneRel, self).__init__(field, to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete, related_query_name=related_query_name,
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None, db_constraint=True, related_query_name=None):
if through and not db_constraint:
raise ValueError("Can't supply a through model and db_constraint=False")
self.to = to
self.related_name = related_name
self.related_query_name = related_query_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
self.db_constraint = db_constraint
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignObject(RelatedField):
requires_unique_target = True
generate_reverse_relation = True
def __init__(self, to, from_fields, to_fields, **kwargs):
self.from_fields = from_fields
self.to_fields = to_fields
if 'rel' not in kwargs:
kwargs['rel'] = ForeignObjectRel(
self, to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
super(ForeignObject, self).__init__(**kwargs)
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
to_field_name = self.to_fields[index]
from_field = (self if from_field_name == 'self'
else self.opts.get_field_by_name(from_field_name)[0])
to_field = (self.rel.to._meta.pk if to_field_name is None
else self.rel.to._meta.get_field_by_name(to_field_name)[0])
related_fields.append((from_field, to_field))
return related_fields
@property
def related_fields(self):
if not hasattr(self, '_related_fields'):
self._related_fields = self.resolve_related_fields()
return self._related_fields
@property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@property
def local_related_fields(self):
return tuple([lhs_field for lhs_field, rhs_field in self.related_fields])
@property
def foreign_related_fields(self):
return tuple([rhs_field for lhs_field, rhs_field in self.related_fields])
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
ret.append(instance.pk)
else:
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super(ForeignObject, self).get_attname_column()
return attname, None
def get_joining_columns(self, reverse_join=False):
source = self.reverse_related_fields if reverse_join else self.related_fields
return tuple([(lhs_field.column, rhs_field.column) for lhs_field, rhs_field in source])
def get_reverse_joining_columns(self):
return self.get_joining_columns(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Returns an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, where_class, alias, related_alias):
"""
Returns a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(qn, connection) method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
opts = self.rel.to._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def get_lookup_constraint(self, constraint_class, alias, targets, sources, lookup_type,
raw_value):
from django.db.models.sql.where import SubqueryConstraint, Constraint, AND, OR
root_constraint = constraint_class()
assert len(targets) == len(sources)
def get_normalized_value(value):
from django.db.models import Model
if isinstance(value, Model):
value_list = []
for source in sources:
# Account for one-to-one relations when sent a different model
while not isinstance(value, source.model) and source.rel:
source = source.rel.to._meta.get_field(source.rel.field_name)
value_list.append(getattr(value, source.attname))
return tuple(value_list)
elif not isinstance(value, tuple):
return (value,)
return value
is_multicolumn = len(self.related_fields) > 1
if (hasattr(raw_value, '_as_sql') or
hasattr(raw_value, 'get_compiler')):
root_constraint.add(SubqueryConstraint(alias, [target.column for target in targets],
[source.name for source in sources], raw_value),
AND)
elif lookup_type == 'isnull':
root_constraint.add(
(Constraint(alias, targets[0].column, targets[0]), lookup_type, raw_value), AND)
elif (lookup_type == 'exact' or (lookup_type in ['gt', 'lt', 'gte', 'lte']
and not is_multicolumn)):
value = get_normalized_value(raw_value)
for index, source in enumerate(sources):
root_constraint.add(
(Constraint(alias, targets[index].column, sources[index]), lookup_type,
value[index]), AND)
elif lookup_type in ['range', 'in'] and not is_multicolumn:
values = [get_normalized_value(value) for value in raw_value]
value = [val[0] for val in values]
root_constraint.add(
(Constraint(alias, targets[0].column, sources[0]), lookup_type, value), AND)
elif lookup_type == 'in':
values = [get_normalized_value(value) for value in raw_value]
for value in values:
value_constraint = constraint_class()
for index, target in enumerate(targets):
value_constraint.add(
(Constraint(alias, target.column, sources[index]), 'exact', value[index]),
AND)
root_constraint.add(value_constraint, OR)
else:
raise TypeError('Related Field got invalid lookup: %s' % lookup_type)
return root_constraint
@property
def attnames(self):
return tuple([field.attname for field in self.local_related_fields])
def get_defaults(self):
return tuple([field.get_default() for field in self.local_related_fields])
def contribute_to_class(self, cls, name, virtual_only=False):
super(ForeignObject, self).contribute_to_class(cls, name, virtual_only=virtual_only)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
class ForeignKey(ForeignObject):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('%(model)s instance with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel,
db_constraint=True, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
self.db_constraint = db_constraint
kwargs['rel'] = rel_class(
self, to, to_field,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
super(ForeignKey, self).__init__(to, ['self'], [to_field], **kwargs)
@property
def related_field(self):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
opts = self.model._meta
from_opts = self.rel.to._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.rel, not self.unique, False)]
return pathinfos
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'model': self.rel.to._meta.verbose_name, 'pk': value},
)
def get_attname(self):
return '%s_id' % self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.related_field.name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.related_field.attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.related_field.get_db_prep_save(value,
connection=connection)
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_text(choice_list[1][0])
return super(ForeignKey, self).value_to_string(obj)
def contribute_to_related_class(self, cls, related):
super(ForeignKey, self).contribute_to_related_class(cls, related)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
if isinstance(self.rel.to, six.string_types):
raise ValueError("Cannot create form field for %r yet, because "
"its related model %r has not been loaded yet" %
(self.name, self.rel.to))
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.related_field
if (isinstance(rel_field, AutoField) or
(not connection.features.related_fields_match_type and
isinstance(rel_field, (PositiveIntegerField,
PositiveSmallIntegerField)))):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, six.string_types) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, six.string_types):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.model_name
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'db_tablespace': klass._meta.db_tablespace,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint),
to: models.ForeignKey(to_model, related_name='%s+' % name, db_tablespace=field.db_tablespace, db_constraint=field.rel.db_constraint)
})
class ManyToManyField(RelatedField):
description = _("Many-to-many relationship")
def __init__(self, to, db_constraint=True, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, six.string_types), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
# Python 2.6 and earlier require dictionary keys to be of str type,
# not unicode and class names must be ASCII (in Python 2.x), so we
# forcibly coerce it here (breaks early if there's a problem).
to = str(to)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
related_query_name=kwargs.pop('related_query_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to == RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None),
db_constraint=db_constraint,
)
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
super(ManyToManyField, self).__init__(**kwargs)
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
pathinfos = []
int_model = self.rel.through
linkfield1 = int_model._meta.get_field_by_name(self.m2m_field_name())[0]
linkfield2 = int_model._meta.get_field_by_name(self.m2m_reverse_field_name())[0]
if direct:
join1infos = linkfield1.get_reverse_path_info()
join2infos = linkfield2.get_path_info()
else:
join1infos = linkfield2.get_reverse_path_info()
join2infos = linkfield1.get_path_info()
pathinfos.extend(join1infos)
pathinfos.extend(join2infos)
return pathinfos
def get_path_info(self):
return self._get_path_info(direct=True)
def get_reverse_path_info(self):
return self._get_path_info(direct=False)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f, 'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_text(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
# 3) The class owning the m2m field has been swapped out.
if not self.rel.through and not cls._meta.abstract and not cls._meta.swapped:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, six.string_types):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# and swapped models don't get a related descriptor.
if not self.rel.is_hidden() and not related.model._meta.swapped:
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
| apache-2.0 |
mahak/keystone | keystone/tests/unit/test_hacking_checks.py | 2 | 3967 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import pycodestyle
from keystone.tests.hacking import checks
from keystone.tests import unit
from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures
class BaseStyleCheck(unit.BaseTestCase):
def setUp(self):
super(BaseStyleCheck, self).setUp()
self.code_ex = self.useFixture(self.get_fixture())
self.addCleanup(delattr, self, 'code_ex')
def get_checker(self):
"""Return the checker to be used for tests in this class."""
raise NotImplementedError('subclasses must provide '
'a real implementation')
def get_fixture(self):
return hacking_fixtures.HackingCode()
def run_check(self, code):
pycodestyle.register_check(self.get_checker())
lines = textwrap.dedent(code).strip().splitlines(True)
# Load all keystone hacking checks, they are of the form Kddd,
# where ddd can from range from 000-999
guide = pycodestyle.StyleGuide(select='K')
checker = pycodestyle.Checker(lines=lines, options=guide.options)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def assert_has_errors(self, code, expected_errors=None):
actual_errors = [e[:3] for e in self.run_check(code)]
self.assertItemsEqual(expected_errors or [], actual_errors)
class TestCheckForMutableDefaultArgs(BaseStyleCheck):
def get_checker(self):
return checks.CheckForMutableDefaultArgs
def test(self):
code = self.code_ex.mutable_default_args['code']
errors = self.code_ex.mutable_default_args['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestBlockCommentsBeginWithASpace(BaseStyleCheck):
def get_checker(self):
return checks.block_comments_begin_with_a_space
def test(self):
code = self.code_ex.comments_begin_with_space['code']
errors = self.code_ex.comments_begin_with_space['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestTranslationChecks(BaseStyleCheck):
def get_checker(self):
return checks.CheckForTranslationIssues
def get_fixture(self):
return hacking_fixtures.HackingTranslations()
def assert_has_errors(self, code, expected_errors=None):
# pull out the parts of the error that we'll match against
actual_errors = (e[:3] for e in self.run_check(code))
# adjust line numbers to make the fixture data more readable.
import_lines = len(self.code_ex.shared_imports.split('\n')) - 1
actual_errors = [(e[0] - import_lines, e[1], e[2])
for e in actual_errors]
self.assertEqual(expected_errors or [], actual_errors)
def test_for_translations(self):
for example in self.code_ex.examples:
code = self.code_ex.shared_imports + example['code']
errors = example['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
class TestDictConstructorWithSequenceCopy(BaseStyleCheck):
def get_checker(self):
return checks.dict_constructor_with_sequence_copy
def test(self):
code = self.code_ex.dict_constructor['code']
errors = self.code_ex.dict_constructor['expected_errors']
self.assert_has_errors(code, expected_errors=errors)
| apache-2.0 |
DevipriyaSarkar/Sunshine | weather.py | 1 | 12913 | from Tkinter import *
from keys import OPEN_WEATHER_API_KEY
from urllib2 import urlopen
from io import BytesIO
from PIL import Image, ImageTk
import datetime
import tkFont
import contextlib
import json
from sys import platform as sp
import socket
import string
# check for internet connectivity
def is_connected():
try:
# see if we can resolve the host name -- tells us if there is a DNS listening
host = socket.gethostbyname(REMOTE_SERVER)
# connect to the host -- tells us if the host is actually reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
class Weather:
def __init__(self, master, city):
button_city.config(state=DISABLED)
if city != "":
if is_connected(): # internet available, fetch weather data
weather_frame = Frame(master)
weather_frame.grid(row=0)
self.city_name = string.capwords(city)
self.day = 0
self.custom_font = tkFont.Font(family="Helvetica", size=12, weight="bold")
self.custom_heading_font = tkFont.Font(family="Helvetica", size=14, weight="bold")
self.current_city_id = self.get_city_id(self.city_name) # open weather map city id
if self.current_city_id != -1: # if city id found
# initialization of all member variables to None
self.t_dt = StringVar()
self.t_temp_day = StringVar()
self.t_temp_min = StringVar()
self.t_temp_max = StringVar()
self.t_pressure = StringVar()
self.t_humidity = StringVar()
self.t_weather_icon_url = None
self.t_weather_main = StringVar()
self.t_weather_desc = StringVar()
self.t_wind_speed = StringVar()
self.t_wind_dir = StringVar()
self.t_cloudiness = StringVar()
self.t_rain = StringVar()
self.label_weather_icon = Label(weather_frame)
# retrieve and display weather data
self.get_weather(self.current_city_id, self.day)
self.display_data(weather_frame)
# bottom navigation frame
bottom_frame = Frame(master, height=2, borderwidth=1, relief=FLAT)
bottom_frame.grid(row=1, columnspan=2, padx=4, pady=4)
prev_img = ImageTk.PhotoImage(file="prev.png")
self.prev_button = Button(bottom_frame, text="<<", image=prev_img, command=self.go_to_prev)
self.prev_button.image = prev_img
self.prev_button.grid(row=0, column=0, padx=4, pady=4)
self.prev_button.config(state=DISABLED)
next_img = ImageTk.PhotoImage(file="next.png")
self.next_button = Button(bottom_frame, text=">>", image=next_img, command=self.go_to_next)
self.next_button.image = next_img
self.next_button.grid(row=0, column=1, padx=4, pady=4)
else:
# city id not found
label_invalid = Label(master, text="Sorry, city not found.\nPlease try a different name.",
font=("Helvetica", 10, "bold"))
label_invalid.place(relx=0.5, rely=0.5, anchor="center")
label_invalid.pack(fill=BOTH, expand=1, padx=4, pady=4)
else:
# internet not available, display error message
label_no_internet = Label(master, text="Sorry, can not connect to internet. \n"
"Please check your connection and try again.",
font=("Helvetica", 10, "bold"))
label_no_internet.place(relx=0.5, rely=0.5, anchor="center")
label_no_internet.pack(fill=BOTH, expand=1, padx=4, pady=4)
else:
# string empty, display error message
label_empty_str = Label(master, text="Please enter a city to proceed.",
font=("Helvetica", 10, "bold"))
label_empty_str.place(relx=0.5, rely=0.5, anchor="center")
label_empty_str.pack(fill=BOTH, expand=1, padx=4, pady=4)
button_city.config(state=NORMAL)
# get the weather information of the passed city id
def get_weather(self, current_city_id, day):
current_weather_url = "http://api.openweathermap.org/data/2.5/forecast/daily?id=%d&units=metric&appid=%s" \
% (current_city_id, OPEN_WEATHER_API_KEY)
with contextlib.closing(urlopen(current_weather_url)) as response:
json_data = json.load(response)
weather_list = json_data['list']
json_data_list = weather_list[day]
print(json_data_list)
self.load_data_from_json(json_data_list)
# retrieve data from passed json object
def load_data_from_json(self, cur_json_list):
self.t_dt.set((datetime.datetime.fromtimestamp(cur_json_list['dt'])).strftime('%d-%m-%Y'))
t_temp = cur_json_list['temp']
self.t_temp_day.set("Temperature:\n %0.2f%sC" % (t_temp['day'], degree_sign.encode('utf-8')))
self.t_temp_min.set("Minimum: %0.2f%sC" % (t_temp['min'], degree_sign.encode('utf-8')))
self.t_temp_max.set("Maximum: %0.2f%sC" % (t_temp['max'], degree_sign.encode('utf-8')))
self.t_pressure.set("%0.2f hPa" % cur_json_list['pressure'])
self.t_humidity.set("%0.2f %%" % cur_json_list['humidity'])
t_weather = cur_json_list['weather'][0]
self.t_weather_main.set(t_weather['main'])
self.t_weather_desc.set(t_weather['description'].capitalize())
t_weather_icon = t_weather['icon']
self.t_weather_icon_url = "http://openweathermap.org/img/w/%s.png" % t_weather_icon
self.t_wind_speed.set("%0.2f m/s" % cur_json_list['speed'])
self.t_wind_dir.set("%0.2f degrees" % cur_json_list['deg'])
self.t_cloudiness.set("%0.2f %%" % cur_json_list['clouds'])
if 'rain' in cur_json_list:
self.t_rain.set("%0.2f mm" % cur_json_list['rain'])
else:
self.t_rain.set("No rain today.")
# find open weather map city id for the given city name
@staticmethod
def get_city_id(city_name):
with open('city.list.json', 'r') as city_list:
data_string = city_list.read()
json_data = json.loads(data_string)
for city in json_data['city']:
if city['name'] == city_name:
return city['_id']
else:
return -1 # city id not found
# display retrieved data on GUI
def display_data(self, master):
label_city = Label(master, text=self.city_name, font=self.custom_heading_font)
label_city.grid(row=0, columnspan=2, padx=4, pady=4)
label_time = Label(master, textvariable=self.t_dt, font=self.custom_font)
label_time.grid(row=1, columnspan=2, padx=2, pady=2)
self.set_weather_icon()
Label(master, textvariable=self.t_weather_main, font=self.custom_font).grid(row=2, column=1, padx=2, pady=2)
Label(master, textvariable=self.t_weather_desc).grid(row=3, column=1, padx=2, pady=2)
label_temp_day = Label(master, textvariable=self.t_temp_day, font=self.custom_font)
label_temp_day.grid(row=4, column=0, rowspan=2, padx=2, pady=2)
Label(master, textvariable=self.t_temp_min).grid(row=4, column=1, padx=2, pady=2)
Label(master, textvariable=self.t_temp_max).grid(row=5, column=1, padx=2, pady=2)
Label(master, text="Pressure").grid(row=6, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_pressure).grid(row=6, column=1, padx=2, pady=2)
Label(master, text="Humidity").grid(row=7, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_humidity).grid(row=7, column=1, padx=2, pady=2)
Label(master, text="Wind Speed").grid(row=8, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_wind_speed).grid(row=8, column=1, padx=2, pady=2)
Label(master, text="Wind Direction").grid(row=9, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_wind_dir).grid(row=9, column=1, padx=2, pady=2)
Label(master, text="Cloudiness").grid(row=10, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_cloudiness).grid(row=10, column=1, padx=2, pady=2)
Label(master, text="Rain").grid(row=11, column=0, padx=2, pady=2)
Label(master, textvariable=self.t_rain).grid(row=11, column=1, padx=2, pady=2)
self.scale_widgets(master)
# scale the widgets with the master window
@staticmethod
def scale_widgets(master):
master.columnconfigure(0, weight=1)
master.columnconfigure(1, weight=1)
master.rowconfigure(0, weight=1)
master.rowconfigure(1, weight=1)
master.rowconfigure(2, weight=1)
master.rowconfigure(3, weight=1)
master.rowconfigure(4, weight=1)
master.rowconfigure(5, weight=1)
master.rowconfigure(6, weight=1)
master.rowconfigure(7, weight=1)
master.rowconfigure(8, weight=1)
master.rowconfigure(9, weight=1)
master.rowconfigure(10, weight=1)
master.rowconfigure(11, weight=1)
master.rowconfigure(12, weight=1)
# set the weather icon
def set_weather_icon(self):
with contextlib.closing(urlopen(self.t_weather_icon_url)) as raw_data:
image = Image.open(BytesIO(raw_data.read()))
weather_icon = ImageTk.PhotoImage(image)
self.label_weather_icon.configure(image=weather_icon)
self.label_weather_icon.image = weather_icon # keep a reference
# When a PhotoImage object is garbage-collected by Python
# (e.g. when you return from a function which stored an image in a local variable),
# the image is cleared even if it is being displayed by a Tkinter widget.
# To avoid this, the program must keep an extra reference to the image object.
self.label_weather_icon.grid(row=2, rowspan=2, column=0)
# get the previous day's weather
def go_to_prev(self):
print("Go to previous day")
self.day -= 1
if self.day > 6:
self.day = 6
elif self.day < 0:
self.day = 0
else:
self.get_weather(self.current_city_id, self.day)
self.set_weather_icon()
self.button_state_check()
# get the next day's weather
def go_to_next(self):
print("Go to next day")
self.day += 1
if self.day > 6:
self.day = 6
elif self.day < 0:
self.day = 0
else:
self.get_weather(self.current_city_id, self.day)
self.set_weather_icon()
self.button_state_check()
# update the state of the navigation buttons
def button_state_check(self):
if self.day == 0:
self.prev_button.config(state=DISABLED)
self.next_button.config(state=NORMAL)
elif self.day == 6:
self.prev_button.config(state=NORMAL)
self.next_button.config(state=DISABLED)
else:
self.prev_button.config(state=NORMAL)
self.next_button.config(state=NORMAL)
# called on show weather button click
def show():
city = entry_city.get()
print city
for child in content_frame.winfo_children():
child.destroy()
Weather(content_frame, city)
root = Tk()
root.wm_title("Sunshine")
if sp == 'linux' or sp == 'linux2' or sp == 'darwin':
img = PhotoImage(file='sun.png')
root.tk.call('wm', 'iconphoto', root._w, img)
else:
root.iconbitmap(default='sun.ico')
degree_sign = u'\N{DEGREE SIGN}'
REMOTE_SERVER = "www.google.com"
# top city frame
font = tkFont.Font(family="Helvetica", size=10)
top_frame = Frame(root)
top_frame.grid(row=0, columnspan=2, padx=4, pady=4)
Label(top_frame, text="City", font=font).grid(row=0, column=0, sticky="W", padx=4, pady=4)
entry_city = Entry(top_frame, font=font)
entry_city.grid(row=0, column=1, padx=4, pady=4)
entry_city.focus_set()
content_frame = Frame(root)
content_frame.grid(row=1, columnspan=2)
button_city = Button(top_frame, text="Show Weather", command=show, font=font, relief=GROOVE)
button_city.grid(row=1, columnspan=2, padx=4, pady=4)
root.update()
root.minsize(200, root.winfo_height())
root.bind("<Return>", lambda x: show()) # invoke function on pressing enter
# scaling
root.columnconfigure(0, weight=1)
root.columnconfigure(1, weight=1)
root.rowconfigure(0, weight=1)
root.rowconfigure(1, weight=1)
root.mainloop()
| mit |
vcordie/tgstation | tools/UnquotedListAssociations/UnquotedListAssociations.py | 123 | 4070 |
# Unquoted List Association Locater and Reporter, by RemieRichards V1.0 - 26/11/16
# list("string" = value) is valid in DM
# unfortunately, so is list(string = value) (Notice the lack of quotes? BAD BAD, it conflicts with var names!)
import sys
import os
import re
from datetime import date
#Climbs up from /tools/UnquotedListAssociations and along to ../code
scan_dir = "code" #used later to truncate log file paths
real_dir = os.path.abspath("../../"+scan_dir)
define_dict = {}
total_unquoted_list_associations = 0
log_output = True #Set to false for mad speeeeeed (slightly faster because no text output to the window, but still full log files)
#Scan a directory, scanning any dm files it finds
def unquoted_list_associations_scan_dir(scan_dir):
global total_unquoted_list_associations
if os.path.exists(scan_dir):
if os.path.isdir(scan_dir):
build_define_dictionary(scan_dir)
output_str = ""
files_scanned = 0
files_with_named_list_args = 0
for root, dirs, files in os.walk(scan_dir):
for f in files:
if log_output:
print str(f)
scan_result = scan_dm_file_for_unquoted_list_associations(os.path.join(root, f))
files_scanned += 1
if scan_result:
output_str += scan_result+"\n"
files_with_named_list_args += 1
output_str += str(files_with_named_list_args) + "/" + str(files_scanned) + " files have Unquoted List Associations in them"
output_str += "\nThere are " + str(total_unquoted_list_associations) + " total Unquoted List Associations"
todays_file = str(date.today())+"-unquoted_list_associations_log.txt"
output_file = open(todays_file, "w") #w so it overrides existing files for today, there should only really be one file per day
output_file.write(output_str)
#Scan one file, returning a string as a "report" or if there are no NamedListArgs, False
def scan_dm_file_for_unquoted_list_associations(_file):
global total_unquoted_list_associations
if not _file.endswith(".dm"):
return False
with open(_file, "r") as dm_file:
filecontents = dm_file.read()
unquoted_list_associations = []
list_definitions = []
for listdef in re.findall(r"=\s*list\((.*)\)", filecontents):
list_definitions.append(listdef)
listdefs = ' '.join(list_definitions)
for matchtuple in re.findall(r"(?:list\(|,)\s*(\w+)\s*,*\s*=\s*(\w+)", listdefs):
if not define_dict.get(matchtuple[0], False): #defines are valid
unquoted_list_associations.append(matchtuple)
count = len(unquoted_list_associations)
if count:
file_report = ".."+scan_dir+str(_file).split(scan_dir)[1]+" " #crop it down to ..\code\DIR\FILE.dm, everything else is developer specific
for nla in unquoted_list_associations:
file_report += "\nlist(" + nla[0] + " = " + nla[1] + ")"
total_unquoted_list_associations += count
file_report += "\nTotal Unquoted List Associations: "+str(count)
return file_report
else:
return False
#Build a dict of defines, such that we can rule them out as NamedListArgs
def build_define_dictionary(scan_dir):
define_dict = {}
for root, dirs, files in os.walk(scan_dir):
for f in files:
scan_dm_file_for_defines(os.path.join(root, f))
#Find all #define X Y in a file and update define_dict so that define_dict[X] = True
def scan_dm_file_for_defines(_file):
if not _file.endswith(".dm"):
return False
with open(_file, "r") as dm_file:
filecontents = dm_file.read()
for define_def in re.findall(r"#define\s+([\w()]+)[ \t]+[^\n]+", filecontents):
define_dict[define_def] = True
unquoted_list_associations_scan_dir(real_dir)
print "Done!"
| agpl-3.0 |
kotejante/python-moodle | examples/enrol_users.py | 1 | 1352 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Moodle Webservice
# Copyright (c) 2011 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Raimon Esteve <resteve@zikzakmedia.com>
# Jesus Martín <jmartin@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from config import *
from moodle_ws_client import moodle
mdl = moodle.MDL()
# xmlrpc Connection
"""
Enrol users
"""
enrols =[{
'roleid': 1,
'userid': 3,
'courseid': 1,
}]
print mdl.enrol_users(server, enrols)
| agpl-3.0 |
beck/django | tests/utils_tests/test_numberformat.py | 307 | 4049 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from decimal import Decimal
from sys import float_info
from unittest import TestCase
from django.utils.numberformat import format as nformat
class TestNumberFormat(TestCase):
def test_format_number(self):
self.assertEqual(nformat(1234, '.'), '1234')
self.assertEqual(nformat(1234.2, '.'), '1234.2')
self.assertEqual(nformat(1234, '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat(1234, '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat(-1234.33, '.', decimal_pos=1), '-1234.3')
def test_format_string(self):
self.assertEqual(nformat('1234', '.'), '1234')
self.assertEqual(nformat('1234.2', '.'), '1234.2')
self.assertEqual(nformat('1234', '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=','),
'1234')
self.assertEqual(nformat('1234', '.', grouping=2, thousand_sep=',',
force_grouping=True), '12,34')
self.assertEqual(nformat('-1234.33', '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat('10000', '.', grouping=3,
thousand_sep='comma', force_grouping=True),
'10comma000')
def test_large_number(self):
most_max = ('{}179769313486231570814527423731704356798070567525844996'
'598917476803157260780028538760589558632766878171540458953'
'514382464234321326889464182768467546703537516986049910576'
'551282076245490090389328944075868508455133942304583236903'
'222948165808559332123348274797826204144723168738177180919'
'29988125040402618412485836{}')
most_max2 = ('{}35953862697246314162905484746340871359614113505168999'
'31978349536063145215600570775211791172655337563430809179'
'07028764928468642653778928365536935093407075033972099821'
'15310256415249098018077865788815173701691026788460916647'
'38064458963316171186642466965495956524082894463374763543'
'61838599762500808052368249716736')
int_max = int(float_info.max)
self.assertEqual(nformat(int_max, '.'), most_max.format('', '8'))
self.assertEqual(nformat(int_max + 1, '.'), most_max.format('', '9'))
self.assertEqual(nformat(int_max * 2, '.'), most_max2.format(''))
self.assertEqual(nformat(0 - int_max, '.'), most_max.format('-', '8'))
self.assertEqual(nformat(-1 - int_max, '.'), most_max.format('-', '9'))
self.assertEqual(nformat(-2 * int_max, '.'), most_max2.format('-'))
def test_decimal_numbers(self):
self.assertEqual(nformat(Decimal('1234'), '.'), '1234')
self.assertEqual(nformat(Decimal('1234.2'), '.'), '1234.2')
self.assertEqual(nformat(Decimal('1234'), '.', decimal_pos=2), '1234.00')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=','), '1234')
self.assertEqual(nformat(Decimal('1234'), '.', grouping=2, thousand_sep=',', force_grouping=True), '12,34')
self.assertEqual(nformat(Decimal('-1234.33'), '.', decimal_pos=1), '-1234.3')
self.assertEqual(nformat(Decimal('0.00000001'), '.', decimal_pos=8), '0.00000001')
def test_decimal_subclass(self):
class EuroDecimal(Decimal):
"""
Wrapper for Decimal which prefixes each amount with the € symbol.
"""
def __format__(self, specifier, **kwargs):
amount = super(EuroDecimal, self).__format__(specifier, **kwargs)
return '€ {}'.format(amount)
price = EuroDecimal('1.23')
self.assertEqual(nformat(price, ','), '€ 1,23')
| bsd-3-clause |
dendisuhubdy/tensorflow | tensorflow/contrib/autograph/converters/control_flow.py | 3 | 12364 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import cfg
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class SymbolNamer(object):
"""Describes the interface for ControlFlowTransformer's namer."""
def new_symbol(self, name_root, reserved_locals):
"""Generate a new unique symbol.
Args:
name_root: String, used as stem in the new name.
reserved_locals: Set(string), additional local symbols that are reserved
and which should not be used.
Returns:
String.
"""
raise NotImplementedError()
class ControlFlowTransformer(transformer.Base):
"""Transforms control flow structures like loops an conditionals."""
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if aliased_orig_names:
template = """
def body_name():
aliased_new_names, = aliased_orig_names,
body
return (returns,)
"""
return templates.replace(
template,
body_name=body_name,
body=body,
aliased_orig_names=aliased_orig_names,
aliased_new_names=aliased_new_names,
returns=returns)
else:
template = """
def body_name():
body
return (returns,)
"""
return templates.replace(
template, body_name=body_name, body=body, returns=returns)
def _create_cond_expr(self, results, test, body_name, orelse_name):
if results is not None:
template = """
results = ag__.utils.run_cond(test, body_name, orelse_name)
"""
return templates.replace(
template,
test=test,
results=results,
body_name=body_name,
orelse_name=orelse_name)
else:
template = """
ag__.utils.run_cond(test, body_name, orelse_name)
"""
return templates.replace(
template, test=test, body_name=body_name, orelse_name=orelse_name)
def visit_If(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, NodeAnno.ORELSE_SCOPE)
body_defs = body_scope.created | body_scope.modified
orelse_defs = orelse_scope.created | orelse_scope.modified
live = anno.getanno(node, 'live_out')
# We'll need to check if we're closing over variables that are defined
# elsewhere in the function
# NOTE: we can only detect syntactic closure in the scope
# of the code passed in. If the AutoGraph'd function itself closes
# over other variables, this analysis won't take that into account.
defined = anno.getanno(node, 'defined_in')
# We only need to return variables that are
# - modified by one or both branches
# - live (or has a live parent) at the end of the conditional
modified = []
for def_ in body_defs | orelse_defs:
def_with_parents = set((def_,)) | def_.support_set
if live & def_with_parents:
modified.append(def_)
# We need to check if live created variables are balanced
# in both branches
created = live & (body_scope.created | orelse_scope.created)
# The if statement is illegal if there are variables that are created,
# that are also live, but both branches don't create them.
if created:
if created != (body_scope.created & live):
raise ValueError(
'The main branch does not create all live symbols that the else '
'branch does.')
if created != (orelse_scope.created & live):
raise ValueError(
'The else branch does not create all live symbols that the main '
'branch does.')
# Alias the closure variables inside the conditional functions
# to avoid errors caused by the local variables created in the branch
# functions.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
aliased_body_orig_names = tuple(body_scope.modified - body_scope.created)
aliased_orelse_orig_names = tuple(orelse_scope.modified -
orelse_scope.created)
aliased_body_new_names = tuple(
self.context.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
aliased_orelse_new_names = tuple(
self.context.namer.new_symbol(s.ssf(), orelse_scope.referenced)
for s in aliased_orelse_orig_names)
alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))
alias_orelse_map = dict(
zip(aliased_orelse_orig_names, aliased_orelse_new_names))
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
if not modified:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
results = None
elif len(modified) == 1:
results = modified[0]
else:
results = gast.Tuple([s.ast() for s in modified], None)
body_name = self.context.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.context.namer.new_symbol('if_false',
orelse_scope.referenced)
if modified:
def build_returns(aliased_names, alias_map, scope):
"""Builds list of return variables for a branch of a conditional."""
returns = []
for s in modified:
if s in aliased_names:
returns.append(alias_map[s])
else:
if s not in scope.created | defined:
raise ValueError(
'Attempting to return variable "%s" from the true branch of '
'a conditional, but it was not closed over, or created in '
'this branch.' % str(s))
else:
returns.append(s)
return tuple(returns)
body_returns = build_returns(aliased_body_orig_names, alias_body_map,
body_scope)
orelse_returns = build_returns(aliased_orelse_orig_names,
alias_orelse_map, orelse_scope)
else:
body_returns = orelse_returns = templates.replace('tf.ones(())')[0].value
body_def = self._create_cond_branch(
body_name,
aliased_orig_names=tuple(aliased_body_orig_names),
aliased_new_names=tuple(aliased_body_new_names),
body=node_body,
returns=body_returns)
orelse_def = self._create_cond_branch(
orelse_name,
aliased_orig_names=tuple(aliased_orelse_orig_names),
aliased_new_names=tuple(aliased_orelse_new_names),
body=node_orelse,
returns=orelse_returns)
cond_expr = self._create_cond_expr(results, node.test, body_name,
orelse_name)
return body_def + orelse_def + cond_expr
def visit_While(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
cond_scope = anno.getanno(node, NodeAnno.COND_SCOPE)
cond_closure = set()
for s in cond_scope.referenced:
for root in s.support_set:
if root not in body_scope.created:
cond_closure.add(root)
state = list(body_closure)
if not state:
# TODO(mdan): Implement this properly.
# To complete this statement, we need to check whether any variable
# created inside the body scope is used before being modified outside the
# scope. This should be done during activity analysis, and in general
# should cover the case where variables may not be initialized.
raise ValueError('cannot convert while loop: no outputs')
state_ssf = [
self.context.namer.new_symbol(s.ssf(), all_referenced) for s in state
]
ssf_map = {
name: ssf
for name, ssf in zip(state, state_ssf)
if str(name) != ssf
}
if len(state) == 1:
state = state[0]
state_ssf = state_ssf[0]
state_ast_tuple = state
else:
state_ast_tuple = gast.Tuple([n.ast() for n in state], None)
node_body = ast_util.rename_symbols(node.body, ssf_map)
test = ast_util.rename_symbols(node.test, ssf_map)
template = """
def test_name(state_ssf):
return test
def body_name(state_ssf):
body
return state_ssf,
state_ast_tuple = ag__.while_stmt(
test_name, body_name, (state,), (extra_deps,))
"""
node = templates.replace(
template,
state=state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
test_name=self.context.namer.new_symbol('loop_test',
body_scope.referenced),
test=test,
body_name=self.context.namer.new_symbol('loop_body',
body_scope.referenced),
body=node_body,
extra_deps=tuple(s.ast() for s in cond_closure),
)
return node
def visit_For(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
state = list(body_closure)
state_ssf = [
self.context.namer.new_symbol(s.ssf(), all_referenced) for s in state
]
ssf_map = {
name: ssf
for name, ssf in zip(state, state_ssf)
if str(name) != ssf
}
if len(state) == 1:
state = state[0]
state_ssf = state_ssf[0]
state_ast_tuple = state
else:
state_ast_tuple = gast.Tuple([n.ast() for n in state], None)
node_body = ast_util.rename_symbols(node.body, ssf_map)
if anno.hasanno(node, 'extra_test'):
extra_test = anno.getanno(node, 'extra_test')
extra_test = ast_util.rename_symbols(extra_test, ssf_map)
else:
extra_test = parser.parse_expression('True')
template = """
def extra_test_name(state_ssf):
return extra_test_expr
def body_name(iterate, state_ssf):
body
return state_ssf,
state_ast_tuple = ag__.for_stmt(
iter_, extra_test_name, body_name, (state,))
"""
node = templates.replace(
template,
state=state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
iter_=node.iter,
iterate=node.target,
extra_test_name=self.context.namer.new_symbol('extra_test',
all_referenced),
extra_test_expr=extra_test,
body_name=self.context.namer.new_symbol('loop_body', all_referenced),
body=node_body)
return node
def transform(node, context):
cfg.run_analyses(node, cfg.Liveness(context))
cfg.run_analyses(node, cfg.Defined(context))
node = ControlFlowTransformer(context).visit(node)
return node
| apache-2.0 |
sogelink/ansible | lib/ansible/plugins/lookup/nested.py | 54 | 2691 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: nested
version_added: "1.1"
short_description: composes a list with nested elements of other lists
description:
- Takes the input lists and returns a list with elements that are lists composed of the elements of the input lists
options:
_raw:
description:
- a set of lists
required: True
"""
EXAMPLES = """
- name: give users access to multiple databases
mysql_user:
name: "{{ item[0] }}"
priv: "{{ item[1] }}.*:ALL"
append_privs: yes
password: "foo"
with_nested:
- [ 'alice', 'bob' ]
- [ 'clientdb', 'employeedb', 'providerdb' ]
As with the case of 'with_items' above, you can use previously defined variables.:
- name: here, 'users' contains the above list of employees
mysql_user:
name: "{{ item[0] }}"
priv: "{{ item[1] }}.*:ALL"
append_privs: yes
password: "foo"
with_nested:
- "{{ users }}"
- [ 'clientdb', 'employeedb', 'providerdb' ]
"""
RETURN = """
_list:
description:
- A list composed of lists paring the elements of the input lists
type: list
"""
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _lookup_variables(self, terms, variables):
results = []
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = self._combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(self._flatten(x))
return new_result
| gpl-3.0 |
nerdvegas/rez | src/rez/package_filter.py | 1 | 16840 | from rez.packages import iter_packages
from rez.exceptions import ConfigurationError
from rez.config import config
from rez.utils.data_utils import cached_property, cached_class_property
from rez.vendor.six import six
from rez.vendor.version.requirement import VersionedObject, Requirement
from hashlib import sha1
import fnmatch
import re
basestring = six.string_types[0]
class PackageFilterBase(object):
def excludes(self, package):
"""Determine if the filter excludes the given package.
Args:
package (`Package`): Package to filter.
Returns:
`Rule` object that excludes the package, or None if the package was
not excluded.
"""
raise NotImplementedError
def add_exclusion(self, rule):
"""Add an exclusion rule.
Args:
rule (`Rule`): Rule to exclude on.
"""
raise NotImplementedError
def add_inclusion(self, rule):
"""Add an inclusion rule.
Args:
rule (`Rule`): Rule to include on.
"""
raise NotImplementedError
@classmethod
def from_pod(cls, data):
"""Convert from POD types to equivalent package filter."""
raise NotImplementedError
def to_pod(self):
"""Convert to POD type, suitable for storing in an rxt file."""
raise NotImplementedError
def iter_packages(self, name, range_=None, paths=None):
"""Same as iter_packages in packages.py, but also applies this filter.
Args:
name (str): Name of the package, eg 'maya'.
range_ (VersionRange or str): If provided, limits the versions returned
to those in `range_`.
paths (list of str, optional): paths to search for packages, defaults
to `config.packages_path`.
Returns:
`Package` iterator.
"""
for package in iter_packages(name, range_, paths):
if not self.excludes(package):
yield package
@property
def sha1(self):
return sha1(str(self).encode("utf-8")).hexdigest()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
class PackageFilter(PackageFilterBase):
"""A package filter.
A package filter is a set of rules that hides some packages but leaves others
visible. For example, a package filter might be used to hide all packages
whos version ends in the string '.beta'. A package filter might also be used
simply to act as a blacklist, hiding some specific packages that are known
to be problematic.
Rules can be added as 'exclusion' or 'inclusion' rules. A package is only
excluded iff it matches one or more exclusion rules, and does not match any
inclusion rules.
"""
def __init__(self):
self._excludes = {}
self._includes = {}
def excludes(self, package):
if not self._excludes:
return None # quick out
def _match(rules):
if rules:
for rule in rules:
if rule.match(package):
return rule
return None
excludes = self._excludes.get(package.name)
excl = _match(excludes)
if not excl:
excludes = self._excludes.get(None)
excl = _match(excludes)
if excl:
includes = self._includes.get(package.name)
incl = _match(includes)
if incl:
excl = None
else:
includes = self._includes.get(None)
if _match(includes):
excl = None
return excl
def add_exclusion(self, rule):
self._add_rule(self._excludes, rule)
def add_inclusion(self, rule):
self._add_rule(self._includes, rule)
def copy(self):
"""Return a shallow copy of the filter.
Adding rules to the copy will not alter the source.
"""
other = PackageFilter.__new__(PackageFilter)
other._excludes = self._excludes.copy()
other._includes = self._includes.copy()
return other
def __and__(self, other):
"""Combine two filters."""
result = self.copy()
for rule in other._excludes.values():
result.add_exclusion(rule)
for rule in other._includes.values():
result.add_inclusion(rule)
return result
def __nonzero__(self):
return bool(self._excludes)
__bool__ = __nonzero__ # py3 compat
@cached_property
def cost(self):
"""Get the approximate cost of this filter.
Cost is the total cost of the exclusion rules in this filter. The cost
of family-specific filters is divided by 10.
Returns:
float: The approximate cost of the filter.
"""
total = 0.0
for family, rules in self._excludes.items():
cost = sum(x.cost() for x in rules)
if family:
cost = cost / float(10)
total += cost
return total
@classmethod
def from_pod(cls, data):
f = PackageFilter()
for namespace, func in (("excludes", f.add_exclusion),
("includes", f.add_inclusion)):
rule_strs = data.get(namespace, [])
if isinstance(rule_strs, basestring):
rule_strs = [rule_strs]
for rule_str in rule_strs:
rule = Rule.parse_rule(rule_str)
func(rule)
return f
def to_pod(self):
data = {}
for namespace, dict_ in (("excludes", self._excludes),
("includes", self._includes)):
if dict_:
rules = []
for rules_ in dict_.values():
rules.extend(map(str, rules_))
data[namespace] = rules
return data
def _add_rule(self, rules_dict, rule):
family = rule.family()
rules_ = rules_dict.get(family, [])
rules_dict[family] = sorted(rules_ + [rule], key=lambda x: x.cost())
cached_property.uncache(self, "cost")
def __str__(self):
def sortkey(rule_items):
family, rules = rule_items
if family is None:
return ("", rules)
return rule_items
return str((sorted(self._excludes.items(), key=sortkey),
sorted(self._includes.items(), key=sortkey)))
class PackageFilterList(PackageFilterBase):
"""A list of package filters.
A package is excluded by a filter list iff any filter within the list
excludes it.
"""
def __init__(self):
self.filters = []
def add_filter(self, package_filter):
"""Add a filter to the list.
Args:
package_filter (`PackageFilter`): Filter to add.
"""
filters = self.filters + [package_filter]
self.filters = sorted(filters, key=lambda x: x.cost)
def add_exclusion(self, rule):
if self.filters:
f = self.filters[-1]
f.add_exclusion(rule)
else:
f = PackageFilter()
f.add_exclusion(rule)
self.add_filter(f)
def add_inclusion(self, rule):
"""
Note:
Adding an inclusion to a filter list applies that inclusion across
all filters.
"""
for f in self.filters:
f.add_inclusion(rule)
def excludes(self, package):
for f in self.filters:
rule = f.excludes(package)
if rule:
return rule
return None
def copy(self):
"""Return a copy of the filter list.
Adding rules to the copy will not alter the source.
"""
other = PackageFilterList.__new__(PackageFilterList)
other.filters = [x.copy() for x in self.filters]
return other
@classmethod
def from_pod(cls, data):
flist = PackageFilterList()
for dict_ in data:
f = PackageFilter.from_pod(dict_)
flist.add_filter(f)
return flist
def to_pod(self):
data = []
for f in self.filters:
data.append(f.to_pod())
return data
def __nonzero__(self):
return any(self.filters)
__bool__ = __nonzero__ # py3 compat
def __str__(self):
filters = sorted(self.filters, key=lambda x: (x.cost, str(x)))
return str(tuple(filters))
@cached_class_property
def singleton(cls):
"""Filter list as configured by rezconfig.package_filter."""
return cls.from_pod(config.package_filter)
# filter that does not exclude any packages
no_filter = PackageFilterList()
class Rule(object):
name = None
"""Relative cost of rule - cheaper rules are checked first."""
def match(self, package):
"""Apply the rule to the package.
Args:
package (`Package`): Package to filter.
Returns:
bool: True if the package matches the filter, False otherwise.
"""
raise NotImplementedError
def family(self):
"""Returns a package family string if this rule only applies to a given
package family, otherwise None."""
return self._family
def cost(self):
"""Relative cost of filter. Cheaper filters are applied first."""
raise NotImplementedError
@classmethod
def parse_rule(cls, txt):
"""Parse a rule from a string.
See rezconfig.package_filter for an overview of valid strings.
Args:
txt (str): String to parse.
Returns:
`Rule` instance.
"""
types = {"glob": GlobRule,
"regex": RegexRule,
"range": RangeRule,
"before": TimestampRule,
"after": TimestampRule}
# parse form 'x(y)' into x, y
label, txt = Rule._parse_label(txt)
if label is None:
if '*' in txt:
label = "glob"
else:
label = "range"
elif label not in types:
raise ConfigurationError(
"'%s' is not a valid package filter type" % label)
rule_cls = types[label]
txt_ = "%s(%s)" % (label, txt)
try:
rule = rule_cls._parse(txt_)
except Exception as e:
raise ConfigurationError("Error parsing package filter '%s': %s: %s"
% (txt_, e.__class__.__name__, str(e)))
return rule
@classmethod
def _parse(cls, txt):
"""Create a rule from a string.
Returns:
`Rule` instance, or None if the string does not represent an instance
of this rule type.
"""
raise NotImplementedError
@classmethod
def _parse_label(cls, txt):
m = cls.label_re.match(txt)
if m:
label, txt = m.groups()
return label, txt
else:
return None, txt
@classmethod
def _extract_family(cls, txt):
m = cls.family_re.match(txt)
if m:
return m.group()[:-1]
return None
def __repr__(self):
return str(self)
family_re = re.compile("[^*?]+" + VersionedObject.sep_regex_str)
label_re = re.compile("^([^(]+)\\(([^\\(\\)]+)\\)$")
class RegexRuleBase(Rule):
def match(self, package):
return bool(self.regex.match(package.qualified_name))
def cost(self):
return 10
@classmethod
def _parse(cls, txt):
_, txt = Rule._parse_label(txt)
return cls(txt)
def __str__(self):
return "%s(%s)" % (self.name, self.txt)
class RegexRule(RegexRuleBase):
"""A rule that matches a package if its qualified name matches a regex string.
For example, the package 'foo-1.beta' would match the regex rule '.*\\.beta$'.
"""
name = "regex"
def __init__(self, s):
"""Create a regex rule.
Args:
s (str): Regex pattern. Eg '.*\\.beta$'.
"""
self.txt = s
self._family = self._extract_family(s)
self.regex = re.compile(s)
class GlobRule(RegexRuleBase):
"""A rule that matches a package if its qualified name matches a glob string.
For example, the package 'foo-1.2' would match the glob rule 'foo-*'.
"""
name = "glob"
def __init__(self, s):
"""Create a glob rule.
Args:
s (str): Glob pattern. Eg 'foo.*', '*.beta'.
"""
self.txt = s
self._family = self._extract_family(s)
self.regex = re.compile(fnmatch.translate(s))
class RangeRule(Rule):
"""A rule that matches a package if that package does not conflict with a
given requirement.
For example, the package 'foo-1.2' would match the requirement rule 'foo<10'.
"""
name = "range"
def __init__(self, requirement):
self._requirement = requirement
self._family = requirement.name
def match(self, package):
o = VersionedObject.construct(package.name, package.version)
return not self._requirement.conflicts_with(o)
def cost(self):
return 10
@classmethod
def _parse(cls, txt):
_, txt = Rule._parse_label(txt)
return cls(Requirement(txt))
def __str__(self):
return "%s(%s)" % (self.name, str(self._requirement))
class TimestampRule(Rule):
"""A rule that matches a package if that package was released before the
given timestamp.
Note:
The 'timestamp' argument used for resolves is ANDed with any package
filters - providing a filter containing timestamp rules does not override
the value of 'timestamp'.
Note:
Do NOT use a timestamp rule to mimic what the 'timestamp' resolve argument
does. 'timestamp' is treated differently - the memcache caching system
is aware of it, so timestamped resolves get cached. Non-timestamped
resolves also get cached, but their cache entries are invalidated more
often (when new packages are released).
There is still a legitimate case to use a global timestamp rule though.
You might want to ignore all packages released after time X, except for
some specific packages that you want to let through. To do this you would
create a package filter containing a timestamp rule with family=None,
and other family-specific timestamp rules to override that.
"""
name = "timestamp"
def __init__(self, timestamp, family=None, reverse=False):
"""Create a timestamp rule.
Args:
timestamp (int): Epoch time.
family (str): Package family to apply the rule to.
reverse (bool): If True, reverse the logic so that packages released
*after* the timestamp are matched.
"""
self.timestamp = timestamp
self.reverse = reverse
self._family = family
def match(self, package):
if self.reverse:
return (package.timestamp > self.timestamp)
else:
return (package.timestamp <= self.timestamp)
def cost(self):
# This is expensive because it causes a package load
return 1000
@classmethod
def after(cls, timestamp, family=None):
return cls(timestamp, family=family, reverse=True)
@classmethod
def before(cls, timestamp, family=None):
return cls(timestamp, family=family)
@classmethod
def _parse(cls, txt):
label, txt = Rule._parse_label(txt)
if ':' in txt:
family, txt = txt.split(':', 1)
else:
family = None
timestamp = int(txt)
reverse = (label == "after")
return cls(timestamp, family=family, reverse=reverse)
def __str__(self):
label = "after" if self.reverse else "before"
parts = []
if self._family:
parts.append(self._family)
parts.append(str(self.timestamp))
return "%s(%s)" % (label, ':'.join(parts))
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 |
BeyondTheClouds/nova | nova/api/openstack/compute/legacy_v2/contrib/flavor_disabled.py | 79 | 2223 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Flavor Disabled API extension."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
authorize = extensions.soft_extension_authorizer('compute', 'flavor_disabled')
class FlavorDisabledController(wsgi.Controller):
def _extend_flavors(self, req, flavors):
for flavor in flavors:
db_flavor = req.get_db_flavor(flavor['id'])
key = "%s:disabled" % Flavor_disabled.alias
flavor[key] = db_flavor['disabled']
def _show(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
if 'flavor' in resp_obj.obj:
self._extend_flavors(req, [resp_obj.obj['flavor']])
@wsgi.extends
def show(self, req, resp_obj, id):
return self._show(req, resp_obj)
@wsgi.extends(action='create')
def create(self, req, resp_obj, body):
return self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
if not authorize(req.environ['nova.context']):
return
self._extend_flavors(req, list(resp_obj.obj['flavors']))
class Flavor_disabled(extensions.ExtensionDescriptor):
"""Support to show the disabled status of a flavor."""
name = "FlavorDisabled"
alias = "OS-FLV-DISABLED"
namespace = ("http://docs.openstack.org/compute/ext/"
"flavor_disabled/api/v1.1")
updated = "2012-08-29T00:00:00Z"
def get_controller_extensions(self):
controller = FlavorDisabledController()
extension = extensions.ControllerExtension(self, 'flavors', controller)
return [extension]
| apache-2.0 |
saitoha/termprop | __init__.py | 1 | 1329 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012-2014, Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ***** END LICENSE BLOCK *****
from termprop import Termprop, MockTermprop
if __name__ == "__main__":
Termprop().test()
| mit |
zzzirk/boto | tests/unit/dynamodb/test_types.py | 74 | 6285 | #!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from decimal import Decimal
from tests.compat import unittest
from boto.compat import six
from boto.dynamodb import types
from boto.dynamodb.exceptions import DynamoDBNumberError
class TestDynamizer(unittest.TestCase):
def setUp(self):
pass
def test_encoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.encode('foo'), {'S': 'foo'})
self.assertEqual(dynamizer.encode(54), {'N': '54'})
self.assertEqual(dynamizer.encode(Decimal('1.1')), {'N': '1.1'})
self.assertEqual(dynamizer.encode(set([1, 2, 3])),
{'NS': ['1', '2', '3']})
self.assertIn(dynamizer.encode(set(['foo', 'bar'])),
({'SS': ['foo', 'bar']}, {'SS': ['bar', 'foo']}))
self.assertEqual(dynamizer.encode(types.Binary(b'\x01')),
{'B': 'AQ=='})
self.assertEqual(dynamizer.encode(set([types.Binary(b'\x01')])),
{'BS': ['AQ==']})
self.assertEqual(dynamizer.encode(['foo', 54, [1]]),
{'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]})
self.assertEqual(dynamizer.encode({'foo': 'bar', 'hoge': {'sub': 1}}),
{'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}})
self.assertEqual(dynamizer.encode(None), {'NULL': True})
self.assertEqual(dynamizer.encode(False), {'BOOL': False})
def test_decoding_to_dynamodb(self):
dynamizer = types.Dynamizer()
self.assertEqual(dynamizer.decode({'S': 'foo'}), 'foo')
self.assertEqual(dynamizer.decode({'N': '54'}), 54)
self.assertEqual(dynamizer.decode({'N': '1.1'}), Decimal('1.1'))
self.assertEqual(dynamizer.decode({'NS': ['1', '2', '3']}),
set([1, 2, 3]))
self.assertEqual(dynamizer.decode({'SS': ['foo', 'bar']}),
set(['foo', 'bar']))
self.assertEqual(dynamizer.decode({'B': 'AQ=='}), types.Binary(b'\x01'))
self.assertEqual(dynamizer.decode({'BS': ['AQ==']}),
set([types.Binary(b'\x01')]))
self.assertEqual(dynamizer.decode({'L': [{'S': 'foo'}, {'N': '54'}, {'L': [{'N': '1'}]}]}),
['foo', 54, [1]])
self.assertEqual(dynamizer.decode({'M': {'foo': {'S': 'bar'}, 'hoge': {'M': {'sub': {'N': '1'}}}}}),
{'foo': 'bar', 'hoge': {'sub': 1}})
self.assertEqual(dynamizer.decode({'NULL': True}), None)
self.assertEqual(dynamizer.decode({'BOOL': False}), False)
def test_float_conversion_errors(self):
dynamizer = types.Dynamizer()
# When supporting decimals, certain floats will work:
self.assertEqual(dynamizer.encode(1.25), {'N': '1.25'})
# And some will generate errors, which is why it's best
# to just use Decimals directly:
with self.assertRaises(DynamoDBNumberError):
dynamizer.encode(1.1)
def test_non_boolean_conversions(self):
dynamizer = types.NonBooleanDynamizer()
self.assertEqual(dynamizer.encode(True), {'N': '1'})
def test_lossy_float_conversions(self):
dynamizer = types.LossyFloatDynamizer()
# Just testing the differences here, specifically float conversions:
self.assertEqual(dynamizer.encode(1.1), {'N': '1.1'})
self.assertEqual(dynamizer.decode({'N': '1.1'}), 1.1)
self.assertEqual(dynamizer.encode(set([1.1])),
{'NS': ['1.1']})
self.assertEqual(dynamizer.decode({'NS': ['1.1', '2.2', '3.3']}),
set([1.1, 2.2, 3.3]))
class TestBinary(unittest.TestCase):
def test_good_input(self):
data = types.Binary(b'\x01')
self.assertEqual(b'\x01', data)
self.assertEqual(b'\x01', bytes(data))
def test_non_ascii_good_input(self):
# Binary data that is out of ASCII range
data = types.Binary(b'\x88')
self.assertEqual(b'\x88', data)
self.assertEqual(b'\x88', bytes(data))
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_bad_input(self):
with self.assertRaises(TypeError):
types.Binary(1)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_bytes_input(self):
data = types.Binary(1)
self.assertEqual(data, b'\x00')
self.assertEqual(data.value, b'\x00')
@unittest.skipUnless(six.PY2, "Python 2 only")
def test_unicode_py2(self):
# It's dirty. But remains for backward compatibility.
data = types.Binary(u'\x01')
self.assertEqual(data, b'\x01')
self.assertEqual(bytes(data), b'\x01')
# Delegate to built-in b'\x01' == u'\x01'
# In Python 2.x these are considered equal
self.assertEqual(data, u'\x01')
# Check that the value field is of type bytes
self.assertEqual(type(data.value), bytes)
@unittest.skipUnless(six.PY3, "Python 3 only")
def test_unicode_py3(self):
with self.assertRaises(TypeError):
types.Binary(u'\x01')
if __name__ == '__main__':
unittest.main()
| mit |
LavyshAlexander/namebench | nb_third_party/dns/ttl.py | 248 | 2180 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS TTL conversion."""
import dns.exception
class BadTTL(dns.exception.SyntaxError):
pass
def from_text(text):
"""Convert the text form of a TTL to an integer.
The BIND 8 units syntax for TTLs (e.g. '1w6d4h3m10s') is supported.
@param text: the textual TTL
@type text: string
@raises dns.ttl.BadTTL: the TTL is not well-formed
@rtype: int
"""
if text.isdigit():
total = long(text)
else:
if not text[0].isdigit():
raise BadTTL
total = 0L
current = 0L
for c in text:
if c.isdigit():
current *= 10
current += long(c)
else:
c = c.lower()
if c == 'w':
total += current * 604800L
elif c == 'd':
total += current * 86400L
elif c == 'h':
total += current * 3600L
elif c == 'm':
total += current * 60L
elif c == 's':
total += current
else:
raise BadTTL("unknown unit '%s'" % c)
current = 0
if not current == 0:
raise BadTTL("trailing integer")
if total < 0L or total > 2147483647L:
raise BadTTL("TTL should be between 0 and 2^31 - 1 (inclusive)")
return total
| apache-2.0 |
barachka/odoo | addons/document/wizard/__init__.py | 444 | 1084 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_configuration
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kaltsimon/youtube-dl | youtube_dl/extractor/reverbnation.py | 151 | 1429 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import str_or_none
class ReverbNationIE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?reverbnation\.com/.*?/song/(?P<id>\d+).*?$'
_TESTS = [{
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
'info_dict': {
"id": "16965047",
"ext": "mp3",
"title": "MONA LISA",
"uploader": "ALKILADOS",
"uploader_id": "216429",
"thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
song_id = mobj.group('id')
api_res = self._download_json(
'https://api.reverbnation.com/song/%s' % song_id,
song_id,
note='Downloading information of song %s' % song_id
)
return {
'id': song_id,
'title': api_res.get('name'),
'url': api_res.get('url'),
'uploader': api_res.get('artist', {}).get('name'),
'uploader_id': str_or_none(api_res.get('artist', {}).get('id')),
'thumbnail': self._proto_relative_url(
api_res.get('image', api_res.get('thumbnail'))),
'ext': 'mp3',
'vcodec': 'none',
}
| unlicense |
thiphariel/navitia | source/jormungandr/jormungandr/authentication.py | 9 | 6565 | # encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import logging
import binascii
from flask_restful import reqparse, abort
import flask_restful
from flask import request, g
from functools import wraps
from jormungandr.exceptions import RegionNotFound
import datetime
import base64
from navitiacommon.models import User, Instance, db, Key
from jormungandr import cache, app as current_app
def authentication_required(func):
"""
decorateur chargé de l'authentification des requetes
fonctionne pour chaque API prenant un paramétre la région
"""
@wraps(func)
def wrapper(*args, **kwargs):
region = None
if 'region' in kwargs:
region = kwargs['region']
#TODO revoir comment on gere le lon/lat
elif 'lon' in kwargs and 'lat' in kwargs:
try: # quick fix to avoid circular dependencies
from jormungandr import i_manager
region = i_manager.get_region(lon=kwargs['lon'],
lat=kwargs['lat'])
except RegionNotFound:
pass
elif current_app.config.get('DEFAULT_REGION'): # if a default region is defined in config
region = current_app.config.get('DEFAULT_REGION') # we use it
user = get_user(token=get_token())
if not region:
#we could not find any regions, we abort
abort_request(user=user)
if has_access(region, 'ALL', abort=True, user=user):
return func(*args, **kwargs)
return wrapper
def get_token():
"""
find the Token in the "Authorization" HTTP header
two cases are handle:
- the token is the only value in the header
- Basic Authentication is used and the token is in the username part
In this case the Value of the header look like this:
"BASIC 54651a4ae4rae"
The second part is the username and the password separate by a ":"
and encoded in base64
"""
auth = None
if 'Authorization' in request.headers:
auth = request.headers['Authorization']
elif 'key' in request.args:
auth = request.args['key']
if not auth:
return None
args = auth.split(' ')
if len(args) == 2:
b64 = args[1]
try:
decoded = base64.decodestring(b64)
return decoded.split(':')[0]
except binascii.Error:
logging.getLogger(__name__).info('badly formated token %s', auth)
flask_restful.abort(401, message="Unauthorized, invalid token", status=401)
return None
else:
return auth
@cache.memoize(current_app.config['CACHE_CONFIGURATION'].get('TIMEOUT_AUTHENTICATION', 300))
def has_access(region, api, abort, user):
"""
Check the Authorization of the current user for this region and this API.
If abort is True, the request is aborted with the appropriate HTTP code.
Warning: Please this function is cached therefore it should not be
dependent of the request context, so keep it as a pure function.
"""
if current_app.config.get('PUBLIC', False):
#if jormungandr is on public mode we skip the authentification process
return True
if not user:
#no user --> no need to continue, we can abort, a user is mandatory even for free region
abort_request(user=user)
model_instance = Instance.get_by_name(region)
if not model_instance:
if abort:
raise RegionNotFound(region)
return False
if (model_instance.is_free and user.have_access_to_free_instances) or user.has_access(model_instance.id, api):
return True
else:
if abort:
abort_request(user=user)
else:
return False
@cache.memoize(current_app.config['CACHE_CONFIGURATION'].get('TIMEOUT_AUTHENTICATION', 300))
def cache_get_user(token):
"""
We allow this method to be cached even if it depends on the current time
because we assume the cache time is small and the error can be tolerated.
"""
return User.get_from_token(token, datetime.datetime.now())
@cache.memoize(current_app.config['CACHE_CONFIGURATION'].get('TIMEOUT_AUTHENTICATION', 300))
def cache_get_key(token):
return Key.get_by_token(token)
def get_user(token, abort_if_no_token=True):
"""
return the current authenticated User or None
"""
if hasattr(g, 'user'):
return g.user
else:
if not token:
#a token is mandatory for non public jormungandr
if not current_app.config.get('PUBLIC', False):
if abort_if_no_token:
flask_restful.abort(401, message='no token')
else:
return None
else: # for public one we allow unknown user
g.user = User(login="unknown_user")
g.user.id = 0
else:
g.user = cache_get_user(token)
return g.user
def get_app_name(token):
"""
return the app_name for the token
"""
if token:
key = cache_get_key(token)
if key:
return key.app_name
return None
def abort_request(user=None):
"""
abort a request with the proper http status in case of authentification
issues
"""
if user:
flask_restful.abort(403)
else:
flask_restful.abort(401)
| agpl-3.0 |
namhyung/uftrace | tests/t224_dynamic_lib.py | 2 | 1213 | #!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'dynmain', """
# DURATION TID FUNCTION
[ 26661] | main() {
[ 26661] | lib_a() {
[ 26661] | lib_b() {
1.187 us [ 26661] | lib_c();
2.271 us [ 26661] | } /* lib_b */
2.647 us [ 26661] | } /* lib_a */
[ 26661] | lib_d() {
[ 26661] | lib_e() {
0.974 us [ 26661] | lib_f();
1.266 us [ 26661] | } /* lib_e */
1.438 us [ 26661] | } /* lib_d */
7.607 us [ 26661] | } /* main */
""")
def build(self, name, cflags='', ldflags=''):
if TestBase.build_notrace_lib(self, 'dyn1', 'libdyn1', cflags, ldflags) != 0:
return TestBase.TEST_BUILD_FAIL
if TestBase.build_notrace_lib(self, 'dyn2', 'libdyn2', cflags, ldflags) != 0:
return TestBase.TEST_BUILD_FAIL
return TestBase.build_libmain(self, name, 's-dynmain.c',
['libdyn1.so', 'libdyn2.so'],
cflags, ldflags, instrument=False)
def setup(self):
self.option = '-Pmain -P.@libdyn1.so -P.@libdyn2.so --no-libcall'
| gpl-2.0 |
goldmann/docker-scripts | docker_squash/image.py | 2 | 37525 |
import datetime
import docker
import hashlib
import json
import logging
import os
import re
import shutil
import six
import tarfile
import tempfile
import threading
from docker_squash.errors import SquashError, SquashUnnecessaryError
if not six.PY3:
import docker_squash.lib.xtarfile
class Chdir(object):
""" Context manager for changing the current working directory """
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class Image(object):
"""
Base class for all Docker image formats. Contains many functions that are handy
while squashing the image.
This class should not be used directly.
"""
FORMAT = None
""" Image format version """
def __init__(self, log, docker, image, from_layer, tmp_dir=None, tag=None):
self.log = log
self.debug = self.log.isEnabledFor(logging.DEBUG)
self.docker = docker
self.image = image
self.from_layer = from_layer
self.tag = tag
self.image_name = None
self.image_tag = None
self.squash_id = None
# Workaround for https://play.golang.org/p/sCsWMXYxqy
#
# Golang doesn't add padding to microseconds when marshaling
# microseconds in date into JSON. Python does.
# We need to produce same output as Docker's to not generate
# different metadata. That's why we need to strip all zeros at the
# end of the date string...
self.date = re.sub(
r'0*Z$', 'Z', datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'))
""" Date used in metadata, already formatted using the `%Y-%m-%dT%H:%M:%S.%fZ` format """
self.tmp_dir = tmp_dir
""" Main temporary directory to save all working files. This is the root directory for all other temporary files. """
def squash(self):
self._before_squashing()
ret = self._squash()
self._after_squashing()
return ret
def _squash(self):
pass
def cleanup(self):
""" Cleanup the temporary directory """
self.log.debug("Cleaning up %s temporary directory" % self.tmp_dir)
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _initialize_directories(self):
# Prepare temporary directory where all the work will be executed
try:
self.tmp_dir = self._prepare_tmp_directory(self.tmp_dir)
except:
raise SquashError("Preparing temporary directory failed")
# Temporary location on the disk of the old, unpacked *image*
self.old_image_dir = os.path.join(self.tmp_dir, "old")
# Temporary location on the disk of the new, unpacked, squashed *image*
self.new_image_dir = os.path.join(self.tmp_dir, "new")
# Temporary location on the disk of the squashed *layer*
self.squashed_dir = os.path.join(self.new_image_dir, "squashed")
for d in self.old_image_dir, self.new_image_dir:
os.makedirs(d)
def _squash_id(self, layer):
if layer == "<missing>":
self.log.warn(
"You try to squash from layer that does not have it's own ID, we'll try to find it later")
return None
try:
squash_id = self.docker.inspect_image(layer)['Id']
except:
raise SquashError(
"Could not get the layer ID to squash, please check provided 'layer' argument: %s" % layer)
if squash_id not in self.old_image_layers:
raise SquashError(
"Couldn't find the provided layer (%s) in the %s image" % (layer, self.image))
self.log.debug("Layer ID to squash from: %s" % squash_id)
return squash_id
def _validate_number_of_layers(self, number_of_layers):
"""
Makes sure that the specified number of layers to squash
is a valid number
"""
# Only positive numbers are correct
if number_of_layers <= 0:
raise SquashError(
"Number of layers to squash cannot be less or equal 0, provided: %s" % number_of_layers)
# Do not squash if provided number of layer to squash is bigger
# than number of actual layers in the image
if number_of_layers > len(self.old_image_layers):
raise SquashError(
"Cannot squash %s layers, the %s image contains only %s layers" % (number_of_layers, self.image, len(self.old_image_layers)))
def _before_squashing(self):
self._initialize_directories()
# Location of the tar archive with squashed layers
self.squashed_tar = os.path.join(self.squashed_dir, "layer.tar")
if self.tag:
self.image_name, self.image_tag = self._parse_image_name(self.tag)
# The image id or name of the image to be squashed
try:
self.old_image_id = self.docker.inspect_image(self.image)['Id']
except SquashError:
raise SquashError(
"Could not get the image ID to squash, please check provided 'image' argument: %s" % self.image)
self.old_image_layers = []
# Read all layers in the image
self._read_layers(self.old_image_layers, self.old_image_id)
self.old_image_layers.reverse()
self.log.info("Old image has %s layers", len(self.old_image_layers))
self.log.debug("Old layers: %s", self.old_image_layers)
# By default - squash all layers.
if self.from_layer == None:
self.from_layer = len(self.old_image_layers)
try:
number_of_layers = int(self.from_layer)
self.log.debug(
"We detected number of layers as the argument to squash")
except ValueError:
self.log.debug("We detected layer as the argument to squash")
squash_id = self._squash_id(self.from_layer)
if not squash_id:
raise SquashError(
"The %s layer could not be found in the %s image" % (self.from_layer, self.image))
number_of_layers = len(self.old_image_layers) - \
self.old_image_layers.index(squash_id) - 1
self._validate_number_of_layers(number_of_layers)
marker = len(self.old_image_layers) - number_of_layers
self.layers_to_squash = self.old_image_layers[marker:]
self.layers_to_move = self.old_image_layers[:marker]
self.log.info("Checking if squashing is necessary...")
if len(self.layers_to_squash) < 1:
raise SquashError(
"Invalid number of layers to squash: %s" % len(self.layers_to_squash))
if len(self.layers_to_squash) == 1:
raise SquashUnnecessaryError(
"Single layer marked to squash, no squashing is required")
self.log.info("Attempting to squash last %s layers...",
number_of_layers)
self.log.debug("Layers to squash: %s", self.layers_to_squash)
self.log.debug("Layers to move: %s", self.layers_to_move)
# Fetch the image and unpack it on the fly to the old image directory
self._save_image(self.old_image_id, self.old_image_dir)
self.size_before = self._dir_size(self.old_image_dir)
self.log.info("Squashing image '%s'..." % self.image)
def _after_squashing(self):
self.log.debug("Removing from disk already squashed layers...")
shutil.rmtree(self.old_image_dir, ignore_errors=True)
self.size_after = self._dir_size(self.new_image_dir)
size_before_mb = float(self.size_before)/1024/1024
size_after_mb = float(self.size_after)/1024/1024
self.log.info("Original image size: %.2f MB" % size_before_mb)
self.log.info("Squashed image size: %.2f MB" % size_after_mb)
if (size_after_mb >= size_before_mb):
self.log.info("If the squashed image is larger than original it means that there were no meaningful files to squash and it just added metadata. Are you sure you specified correct parameters?")
else:
self.log.info("Image size decreased by %.2f %%" % float(
((size_before_mb-size_after_mb)/size_before_mb)*100))
def _dir_size(self, directory):
size = 0
for path, dirs, files in os.walk(directory):
for f in files:
size += os.path.getsize(os.path.join(path, f))
return size
def layer_paths(self):
"""
Returns name of directories to layers in the exported tar archive.
"""
pass
def export_tar_archive(self, target_tar_file):
self._tar_image(target_tar_file, self.new_image_dir)
self.log.info("Image available at '%s'" % target_tar_file)
def load_squashed_image(self):
self._load_image(self.new_image_dir)
if self.tag:
self.log.info("Image registered in Docker daemon as %s:%s" %
(self.image_name, self.image_tag))
def _files_in_layers(self, layers, directory):
"""
Prepare a list of files in all layers
"""
files = {}
for layer in layers:
self.log.debug("Generating list of files in layer '%s'..." % layer)
tar_file = os.path.join(directory, layer, "layer.tar")
with tarfile.open(tar_file, 'r', format=tarfile.PAX_FORMAT) as tar:
files[layer] = [self._normalize_path(
x) for x in tar.getnames()]
self.log.debug("Done, found %s files" % len(files[layer]))
return files
def _prepare_tmp_directory(self, tmp_dir):
""" Creates temporary directory that is used to work on layers """
if tmp_dir:
if os.path.exists(tmp_dir):
raise SquashError(
"The '%s' directory already exists, please remove it before you proceed" % tmp_dir)
os.makedirs(tmp_dir)
else:
tmp_dir = tempfile.mkdtemp(prefix="docker-squash-")
self.log.debug("Using %s as the temporary directory" % tmp_dir)
return tmp_dir
def _load_image(self, directory):
tar_file = os.path.join(self.tmp_dir, "image.tar")
self._tar_image(tar_file, directory)
with open(tar_file, 'rb') as f:
self.log.debug("Loading squashed image...")
self.docker.load_image(f)
self.log.debug("Image loaded!")
os.remove(tar_file)
def _tar_image(self, target_tar_file, directory):
with tarfile.open(target_tar_file, 'w', format=tarfile.PAX_FORMAT) as tar:
self.log.debug("Generating tar archive for the squashed image...")
with Chdir(directory):
# docker produces images like this:
# repositories
# <layer>/json
# and not:
# ./
# ./repositories
# ./<layer>/json
for f in os.listdir("."):
tar.add(f)
self.log.debug("Archive generated")
def _layers_to_squash(self, layers, from_layer):
""" Prepares a list of layer IDs that should be squashed """
to_squash = []
to_leave = []
should_squash = True
for l in reversed(layers):
if l == from_layer:
should_squash = False
if should_squash:
to_squash.append(l)
else:
to_leave.append(l)
to_squash.reverse()
to_leave.reverse()
return to_squash, to_leave
def _extract_tar(self, fileobj, directory):
with tarfile.open(fileobj=fileobj, mode='r|') as tar:
tar.extractall(path=directory)
def _save_image(self, image_id, directory):
""" Saves the image as a tar archive under specified name """
for x in [0, 1, 2]:
self.log.info("Saving image %s to %s directory..." %
(image_id, directory))
self.log.debug("Try #%s..." % (x + 1))
try:
image = self.docker.get_image(image_id)
if docker.version_info[0] < 3:
# Docker library prior to 3.0.0 returned the requests
# object directly which cold be used to read from
self.log.debug(
"Extracting image using HTTPResponse object directly")
self._extract_tar(image, directory)
else:
# Docker library >=3.0.0 returns iterator over raw data
self.log.debug(
"Extracting image using iterator over raw data")
fd_r, fd_w = os.pipe()
r = os.fdopen(fd_r, 'rb')
w = os.fdopen(fd_w, 'wb')
extracter = threading.Thread(
target=self._extract_tar, args=(r, directory))
extracter.start()
for chunk in image:
w.write(chunk)
w.flush()
w.close()
extracter.join()
r.close()
self.log.info("Image saved!")
return True
except Exception as e:
self.log.exception(e)
self.log.warn(
"An error occured while saving the %s image, retrying..." % image_id)
raise SquashError("Couldn't save %s image!" % image_id)
def _unpack(self, tar_file, directory):
""" Unpacks tar archive to selected directory """
self.log.info("Unpacking %s tar file to %s directory" %
(tar_file, directory))
with tarfile.open(tar_file, 'r') as tar:
tar.extractall(path=directory)
self.log.info("Archive unpacked!")
def _read_layers(self, layers, image_id):
""" Reads the JSON metadata for specified layer / image id """
for layer in self.docker.history(image_id):
layers.append(layer['Id'])
def _parse_image_name(self, image):
"""
Parses the provided image name and splits it in the
name and tag part, if possible. If no tag is provided
'latest' is used.
"""
if ':' in image and '/' not in image.split(':')[-1]:
image_tag = image.split(':')[-1]
image_name = image[:-(len(image_tag) + 1)]
else:
image_tag = "latest"
image_name = image
return (image_name, image_tag)
def _dump_json(self, data, new_line=False):
"""
Helper function to marshal object into JSON string.
Additionally a sha256sum of the created JSON string is generated.
"""
# We do not want any spaces between keys and values in JSON
json_data = json.dumps(data, separators=(',', ':'))
if new_line:
json_data = "%s\n" % json_data
# Generate sha256sum of the JSON data, may be handy
sha = hashlib.sha256(json_data.encode('utf-8')).hexdigest()
return json_data, sha
def _generate_repositories_json(self, repositories_file, image_id, name, tag):
if not image_id:
raise SquashError("Provided image id cannot be null")
if name == tag == None:
self.log.debug(
"No name and tag provided for the image, skipping generating repositories file")
return
repos = {}
repos[name] = {}
repos[name][tag] = image_id
data = json.dumps(repos, separators=(',', ':'))
with open(repositories_file, 'w') as f:
f.write(data)
f.write("\n")
def _write_version_file(self, squashed_dir):
version_file = os.path.join(squashed_dir, "VERSION")
with open(version_file, 'w') as f:
f.write("1.0")
def _write_json_metadata(self, metadata, metadata_file):
with open(metadata_file, 'w') as f:
f.write(metadata)
def _read_old_metadata(self, old_json_file):
self.log.debug("Reading JSON metadata file '%s'..." % old_json_file)
# Read original metadata
with open(old_json_file, 'r') as f:
metadata = json.load(f)
return metadata
def _move_layers(self, layers, src, dest):
"""
This moves all the layers that should be copied as-is.
In other words - all layers that are not meant to be squashed will be
moved from the old image to the new image untouched.
"""
for layer in layers:
layer_id = layer.replace('sha256:', '')
self.log.debug("Moving unmodified layer '%s'..." % layer_id)
shutil.move(os.path.join(src, layer_id), dest)
def _file_should_be_skipped(self, file_name, file_paths):
# file_paths is now array of array with files to be skipped.
# First level are layers, second are files in these layers.
layer_nb = 1
for layers in file_paths:
for file_path in layers:
if file_name == file_path or file_name.startswith(file_path + "/"):
return layer_nb
layer_nb += 1
return 0
def _marker_files(self, tar, members):
"""
Searches for marker files in the specified archive.
Docker marker files are files taht have the .wh. prefix in the name.
These files mark the corresponding file to be removed (hidden) when
we start a container from the image.
"""
marker_files = {}
self.log.debug(
"Searching for marker files in '%s' archive..." % tar.name)
for member in members:
if '.wh.' in member.name:
self.log.debug("Found '%s' marker file" % member.name)
marker_files[member] = tar.extractfile(member)
self.log.debug("Done, found %s files" % len(marker_files))
return marker_files
def _add_markers(self, markers, tar, files_in_layers, added_symlinks):
"""
This method is responsible for adding back all markers that were not
added to the squashed layer AND files they refer to can be found in layers
we do not squash.
"""
if markers:
self.log.debug("Marker files to add: %s" %
[o.name for o in markers.keys()])
else:
# No marker files to add
return
# https://github.com/goldmann/docker-squash/issues/108
# Some tar archives do have the filenames prefixed with './'
# which does not have any effect when we unpack the tar achive,
# but when processing tar content - we see this.
tar_files = [self._normalize_path(x) for x in tar.getnames()]
for marker, marker_file in six.iteritems(markers):
actual_file = marker.name.replace('.wh.', '')
normalized_file = self._normalize_path(actual_file)
should_be_added_back = False
if self._file_should_be_skipped(normalized_file, added_symlinks):
self.log.debug(
"Skipping '%s' marker file, this file is on a symlink path" % normalized_file)
continue
if normalized_file in tar_files:
self.log.debug(
"Skipping '%s' marker file, this file was added earlier for some reason..." % normalized_file)
continue
if files_in_layers:
for files in files_in_layers.values():
if normalized_file in files:
should_be_added_back = True
break
else:
# There are no previous layers, so we need to add it back
# In fact this shouldn't happen since having a marker file
# where there is no previous layer does not make sense.
should_be_added_back = True
if should_be_added_back:
self.log.debug(
"Adding '%s' marker file back..." % marker.name)
# Marker files on AUFS are hardlinks, we need to create
# regular files, therefore we need to recreate the tarinfo
# object
tar.addfile(tarfile.TarInfo(name=marker.name), marker_file)
# Add the file name to the list too to avoid re-reading all files
# in tar archive
tar_files.append(normalized_file)
else:
self.log.debug(
"Skipping '%s' marker file..." % marker.name)
def _normalize_path(self, path):
return os.path.normpath(os.path.join("/", path))
def _add_hardlinks(self, squashed_tar, squashed_files, to_skip, skipped_hard_links):
for layer, hardlinks_in_layer in enumerate(skipped_hard_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(hardlinks_in_layer):
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# We need to check if we should skip adding back the hard link
# This can happen in the following situations:
# 1. hard link is on the list of files to skip
# 2. hard link target is on the list of files to skip
# 3. hard link is already in squashed files
# 4. hard link target is NOT in already squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname or normalized_name in squashed_files or normalized_linkname not in squashed_files:
self.log.debug("Found a hard link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding hard link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
def _add_file(self, member, content, squashed_tar, squashed_files, to_skip):
normalized_name = self._normalize_path(member.name)
if normalized_name in squashed_files:
self.log.debug(
"Skipping file '%s' because it is already squashed" % normalized_name)
return
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
return
if content:
squashed_tar.addfile(member, content)
else:
# Special case: other(?) files, we skip the file
# itself
squashed_tar.addfile(member)
# We added a file to the squashed tar, so let's note it
squashed_files.append(normalized_name)
def _add_symlinks(self, squashed_tar, squashed_files, to_skip, skipped_sym_links):
added_symlinks = []
for layer, symlinks_in_layer in enumerate(skipped_sym_links):
# We need to start from 1, that's why we bump it here
current_layer = layer + 1
for member in six.itervalues(symlinks_in_layer):
# Handling symlinks. This is similar to hard links with one
# difference. Sometimes we do want to have broken symlinks
# be addedeither case because these can point to locations
# that will become avaialble after adding volumes for example.
normalized_name = self._normalize_path(member.name)
normalized_linkname = self._normalize_path(member.linkname)
# File is already in squashed files, skipping
if normalized_name in squashed_files:
self.log.debug(
"Found a symbolic link '%s' which is already squashed, skipping" % (normalized_name))
continue
if self._file_should_be_skipped(normalized_name, added_symlinks):
self.log.debug(
"Found a symbolic link '%s' which is on a path to previously squashed symlink, skipping" % (normalized_name))
continue
# Find out if the name is on the list of files to skip - if it is - get the layer number
# where it was found
layer_skip_name = self._file_should_be_skipped(
normalized_name, to_skip)
# Do the same for linkname
layer_skip_linkname = self._file_should_be_skipped(
normalized_linkname, to_skip)
# If name or linkname was found in the lists of files to be
# skipped or it's not found in the squashed files
if layer_skip_name and current_layer > layer_skip_name or layer_skip_linkname and current_layer > layer_skip_linkname:
self.log.debug("Found a symbolic link '%s' to a file which is marked to be skipped: '%s', skipping link too" % (
normalized_name, normalized_linkname))
else:
if self.debug:
self.log.debug("Adding symbolic link '%s' pointing to '%s' back..." % (
normalized_name, normalized_linkname))
added_symlinks.append([normalized_name])
squashed_files.append(normalized_name)
squashed_tar.addfile(member)
return added_symlinks
def _squash_layers(self, layers_to_squash, layers_to_move):
self.log.info("Starting squashing...")
# Reverse the layers to squash - we begin with the newest one
# to make the tar lighter
layers_to_squash.reverse()
# Find all files in layers that we don't squash
files_in_layers_to_move = self._files_in_layers(
layers_to_move, self.old_image_dir)
with tarfile.open(self.squashed_tar, 'w', format=tarfile.PAX_FORMAT) as squashed_tar:
to_skip = []
skipped_markers = {}
skipped_hard_links = []
skipped_sym_links = []
skipped_files = []
# List of filenames in the squashed archive
squashed_files = []
# List of opaque directories in the image
opaque_dirs = []
for layer_id in layers_to_squash:
layer_tar_file = os.path.join(
self.old_image_dir, layer_id, "layer.tar")
self.log.info("Squashing file '%s'..." % layer_tar_file)
# Open the exiting layer to squash
with tarfile.open(layer_tar_file, 'r', format=tarfile.PAX_FORMAT) as layer_tar:
# Find all marker files for all layers
# We need the list of marker files upfront, so we can
# skip unnecessary files
members = layer_tar.getmembers()
markers = self._marker_files(layer_tar, members)
skipped_sym_link_files = {}
skipped_hard_link_files = {}
skipped_files_in_layer = {}
files_to_skip = []
# List of opaque directories found in this layer
layer_opaque_dirs = []
# Add it as early as possible, we will be populating
# 'skipped_sym_link_files' array later
skipped_sym_links.append(skipped_sym_link_files)
# Add it as early as possible, we will be populating
# 'files_to_skip' array later
to_skip.append(files_to_skip)
# Iterate over marker files found for this particular
# layer and if a file in the squashed layers file corresponding
# to the marker file is found, then skip both files
for marker, marker_file in six.iteritems(markers):
# We have a opaque directory marker file
# https://github.com/opencontainers/image-spec/blob/master/layer.md#opaque-whiteout
if marker.name.endswith('.wh..wh..opq'):
opaque_dir = os.path.dirname(marker.name)
self.log.debug(
"Found opaque directory: '%s'" % opaque_dir)
layer_opaque_dirs.append(opaque_dir)
else:
files_to_skip.append(
self._normalize_path(marker.name.replace('.wh.', '')))
skipped_markers[marker] = marker_file
# Copy all the files to the new tar
for member in members:
normalized_name = self._normalize_path(member.name)
if self._is_in_opaque_dir(member, opaque_dirs):
self.log.debug(
"Skipping file '%s' because it is in an opaque directory" % normalized_name)
continue
# Skip all symlinks, we'll investigate them later
if member.issym():
skipped_sym_link_files[normalized_name] = member
continue
if member in six.iterkeys(skipped_markers):
self.log.debug(
"Skipping '%s' marker file, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
continue
if self._file_should_be_skipped(normalized_name, skipped_sym_links):
self.log.debug(
"Skipping '%s' file because it's on a symlink path, at the end of squashing we'll see if it's necessary to add it back" % normalized_name)
if member.isfile():
f = (member, layer_tar.extractfile(member))
else:
f = (member, None)
skipped_files_in_layer[normalized_name] = f
continue
# Skip files that are marked to be skipped
if self._file_should_be_skipped(normalized_name, to_skip):
self.log.debug(
"Skipping '%s' file because it's on the list to skip files" % normalized_name)
continue
# Check if file is already added to the archive
if normalized_name in squashed_files:
# File already exist in the squashed archive, skip it because
# file want to add is older than the one already in the archive.
# This is true because we do reverse squashing - from
# newer to older layer
self.log.debug(
"Skipping '%s' file because it's older than file already added to the archive" % normalized_name)
continue
# Hard links are processed after everything else
if member.islnk():
skipped_hard_link_files[normalized_name] = member
continue
content = None
if member.isfile():
content = layer_tar.extractfile(member)
self._add_file(member, content,
squashed_tar, squashed_files, to_skip)
skipped_hard_links.append(skipped_hard_link_files)
skipped_files.append(skipped_files_in_layer)
opaque_dirs += layer_opaque_dirs
self._add_hardlinks(squashed_tar, squashed_files,
to_skip, skipped_hard_links)
added_symlinks = self._add_symlinks(
squashed_tar, squashed_files, to_skip, skipped_sym_links)
for layer in skipped_files:
for member, content in six.itervalues(layer):
self._add_file(member, content, squashed_tar,
squashed_files, added_symlinks)
if files_in_layers_to_move:
self._reduce(skipped_markers)
self._add_markers(skipped_markers, squashed_tar,
files_in_layers_to_move, added_symlinks)
self.log.info("Squashing finished!")
def _is_in_opaque_dir(self, member, dirs):
"""
If the member we investigate is an opaque directory
or if the member is located inside of the opaque directory,
we copy these files as-is. Any other layer that has content
on the opaque directory will be ignored!
"""
for opaque_dir in dirs:
if member.name == opaque_dir or member.name.startswith("%s/" % opaque_dir):
self.log.debug("Member '%s' found to be part of opaque directory '%s'" % (
member.name, opaque_dir))
return True
return False
def _reduce(self, markers):
"""
This function is responsible for reducing marker files
that are scheduled to be added at the end of squashing to
minimum.
In some cases, one marker file will overlap
with others making others not necessary.
This is not only about adding less marker files, but
if we try to add a marker file for a file or directory
deeper in the hierarchy of already marked directory,
the image will not be successfully loaded back into Docker
daemon.
Passed dictionary containing markers is altered *in-place*.
Args:
markers (dict): Dictionary of markers scheduled to be added.
"""
self.log.debug("Reducing marker files to be added back...")
# Prepare a list of files (or directories) based on the marker
# files scheduled to be added
marked_files = list(map(lambda x: self._normalize_path(
x.name.replace('.wh.', '')), markers.keys()))
# List of markers that should be not added back to tar file
to_remove = []
for marker in markers.keys():
self.log.debug(
"Investigating '{}' marker file".format(marker.name))
path = self._normalize_path(marker.name.replace('.wh.', ''))
# Iterate over the path hierarchy, but starting with the
# root directory. This will make it possible to remove
# marker files based on the highest possible directory level
for directory in reversed(self._path_hierarchy(path)):
if directory in marked_files:
self.log.debug(
"Marker file '{}' is superseded by higher-level marker file: '{}'".format(marker.name, directory))
to_remove.append(marker)
break
self.log.debug("Removing {} marker files".format(len(to_remove)))
if to_remove:
for marker in to_remove:
self.log.debug("Removing '{}' marker file".format(marker.name))
markers.pop(marker)
self.log.debug("Marker files reduced")
def _path_hierarchy(self, path):
"""
Creates a full hierarchy of directories for a given path.
For a particular path, a list will be returned
containing paths from the path specified, through all levels
up to the root directory.
Example:
Path '/opt/testing/some/dir/structure/file'
will return:
['/opt/testing/some/dir/structure', '/opt/testing/some/dir', '/opt/testing/some', '/opt/testing', '/opt', '/']
"""
if not path:
raise SquashError("No path provided to create the hierarchy for")
hierarchy = []
dirname = os.path.dirname(path)
hierarchy.append(dirname)
# If we are already at root level, stop
if dirname != '/':
hierarchy.extend(self._path_hierarchy(dirname))
return hierarchy
| mit |
RasPlex/plex-home-theatre | plex/Third-Party/gtest/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
ayumilong/rethinkdb | external/v8_3.30.33.16/tools/run-valgrind.py | 92 | 2874 | #!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple wrapper for running valgrind and checking the output on
# stderr for memory leaks.
import subprocess
import sys
import re
VALGRIND_ARGUMENTS = [
'valgrind',
'--error-exitcode=1',
'--leak-check=full',
'--smc-check=all'
]
# Compute the command line.
command = VALGRIND_ARGUMENTS + sys.argv[1:]
# Run valgrind.
process = subprocess.Popen(command, stderr=subprocess.PIPE)
code = process.wait();
errors = process.stderr.readlines();
# If valgrind produced an error, we report that to the user.
if code != 0:
sys.stderr.writelines(errors)
sys.exit(code)
# Look through the leak details and make sure that we don't
# have any definitely, indirectly, and possibly lost bytes.
LEAK_RE = r"(?:definitely|indirectly|possibly) lost: "
LEAK_LINE_MATCHER = re.compile(LEAK_RE)
LEAK_OKAY_MATCHER = re.compile(r"lost: 0 bytes in 0 blocks")
leaks = []
for line in errors:
if LEAK_LINE_MATCHER.search(line):
leaks.append(line)
if not LEAK_OKAY_MATCHER.search(line):
sys.stderr.writelines(errors)
sys.exit(1)
# Make sure we found between 2 and 3 leak lines.
if len(leaks) < 2 or len(leaks) > 3:
sys.stderr.writelines(errors)
sys.stderr.write('\n\n#### Malformed valgrind output.\n#### Exiting.\n')
sys.exit(1)
# No leaks found.
sys.exit(0)
| agpl-3.0 |
suneeth51/neutron | neutron/api/v2/router.py | 20 | 5105 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import routes as routes_mapper
import six
import six.moves.urllib.parse as urlparse
import webob
import webob.dec
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron import manager
from neutron import policy
from neutron.quota import resource_registry
from neutron import wsgi
LOG = logging.getLogger(__name__)
RESOURCES = {'network': 'networks',
'subnet': 'subnets',
'subnetpool': 'subnetpools',
'port': 'ports'}
SUB_RESOURCES = {}
COLLECTION_ACTIONS = ['index', 'create']
MEMBER_ACTIONS = ['show', 'update', 'delete']
REQUIREMENTS = {'id': attributes.UUID_PATTERN, 'format': 'json'}
class Index(wsgi.Application):
def __init__(self, resources):
self.resources = resources
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
metadata = {}
layout = []
for name, collection in six.iteritems(self.resources):
href = urlparse.urljoin(req.path_url, collection)
resource = {'name': name,
'collection': collection,
'links': [{'rel': 'self',
'href': href}]}
layout.append(resource)
response = dict(resources=layout)
content_type = req.best_match_content_type()
body = wsgi.Serializer(metadata=metadata).serialize(response,
content_type)
return webob.Response(body=body, content_type=content_type)
class APIRouter(wsgi.Router):
@classmethod
def factory(cls, global_config, **local_config):
return cls(**local_config)
def __init__(self, **local_config):
mapper = routes_mapper.Mapper()
plugin = manager.NeutronManager.get_plugin()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
ext_mgr.extend_resources("2.0", attributes.RESOURCE_ATTRIBUTE_MAP)
col_kwargs = dict(collection_actions=COLLECTION_ACTIONS,
member_actions=MEMBER_ACTIONS)
def _map_resource(collection, resource, params, parent=None):
allow_bulk = cfg.CONF.allow_bulk
allow_pagination = cfg.CONF.allow_pagination
allow_sorting = cfg.CONF.allow_sorting
controller = base.create_resource(
collection, resource, plugin, params, allow_bulk=allow_bulk,
parent=parent, allow_pagination=allow_pagination,
allow_sorting=allow_sorting)
path_prefix = None
if parent:
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection)
mapper_kwargs = dict(controller=controller,
requirements=REQUIREMENTS,
path_prefix=path_prefix,
**col_kwargs)
return mapper.collection(collection, resource,
**mapper_kwargs)
mapper.connect('index', '/', controller=Index(RESOURCES))
for resource in RESOURCES:
_map_resource(RESOURCES[resource], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
RESOURCES[resource], dict()))
resource_registry.register_resource_by_name(resource)
for resource in SUB_RESOURCES:
_map_resource(SUB_RESOURCES[resource]['collection_name'], resource,
attributes.RESOURCE_ATTRIBUTE_MAP.get(
SUB_RESOURCES[resource]['collection_name'],
dict()),
SUB_RESOURCES[resource]['parent'])
# Certain policy checks require that the extensions are loaded
# and the RESOURCE_ATTRIBUTE_MAP populated before they can be
# properly initialized. This can only be claimed with certainty
# once this point in the code has been reached. In the event
# that the policies have been initialized before this point,
# calling reset will cause the next policy check to
# re-initialize with all of the required data in place.
policy.reset()
super(APIRouter, self).__init__(mapper)
| apache-2.0 |
jiahaoliang/group-based-policy | gbpservice/nfp/configurator/drivers/loadbalancer/v2/haproxy/octavia_lib/common/utils.py | 1 | 2359 | # Copyright 2011, VMware, Inc., 2014 A10 Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import base64
import datetime
import hashlib
import random
import socket
from oslo_log import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
def get_hostname():
return socket.gethostname()
def get_random_string(length):
"""Get a random hex string of the specified length.
based on Cinder library
cinder/transfer/api.py
"""
rndstr = ""
random.seed(datetime.datetime.now().microsecond)
while len(rndstr) < length:
rndstr += hashlib.sha224(
str(random.random()).encode('ascii')
).hexdigest()
return rndstr[0:length]
def base64_sha1_string(string_to_hash):
hash_str = hashlib.sha1(string_to_hash.encode('utf-8')).digest()
b64_str = base64.b64encode(hash_str, str.encode('_-', 'ascii'))
return b64_str.decode('UTF-8')
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
LOG = logging.getLogger(func.__module__)
self.logger = LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return call
| apache-2.0 |
Archenemy-xiatian/foursquared | util/oget.py | 262 | 3416 | #!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| apache-2.0 |
mhallsmoore/qstrader | tests/unit/broker/portfolio/test_position_handler.py | 1 | 4684 | from collections import OrderedDict
import numpy as np
import pandas as pd
import pytz
from qstrader.broker.portfolio.position_handler import PositionHandler
from qstrader.broker.transaction.transaction import Transaction
def test_transact_position_new_position():
"""
Tests the 'transact_position' method for a transaction
with a brand new asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
transaction = Transaction(
asset,
quantity=100,
dt=pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC),
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 100
assert pos.sell_quantity == 0
assert pos.net_quantity == 100
assert pos.direction == 1
assert pos.avg_price == 960.2683000000001
def test_transact_position_current_position():
"""
Tests the 'transact_position' method for a transaction
with a current asset and checks that all objects are
set correctly.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123,
commission=26.83
)
ph.transact_position(transaction_long)
transaction_long_again = Transaction(
asset,
quantity=200,
dt=new_dt,
price=990.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_long_again)
# Check that the position object is set correctly
pos = ph.positions[asset]
assert pos.buy_quantity == 300
assert pos.sell_quantity == 0
assert pos.net_quantity == 300
assert pos.direction == 1
assert np.isclose(pos.avg_price, 980.1512)
def test_transact_position_quantity_zero():
"""
Tests the 'transact_position' method for a transaction
with net zero quantity after the transaction to ensure
deletion of the position.
"""
# Create the PositionHandler, Transaction and
# carry out a transaction
ph = PositionHandler()
asset = 'EQ:AMZN'
dt = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
new_dt = pd.Timestamp('2015-05-06 16:00:00', tz=pytz.UTC)
transaction_long = Transaction(
asset,
quantity=100,
dt=dt,
price=960.0,
order_id=123, commission=26.83
)
ph.transact_position(transaction_long)
transaction_close = Transaction(
asset,
quantity=-100,
dt=new_dt,
price=980.0,
order_id=234,
commission=18.53
)
ph.transact_position(transaction_close)
# Go long and then close, then check that the
# positions OrderedDict is empty
assert ph.positions == OrderedDict()
def test_total_values_for_no_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for the case
of no transactions being carried out.
"""
ph = PositionHandler()
assert ph.total_market_value() == 0.0
assert ph.total_unrealised_pnl() == 0.0
assert ph.total_realised_pnl() == 0.0
assert ph.total_pnl() == 0.0
def test_total_values_for_two_separate_transactions():
"""
Tests 'total_market_value', 'total_unrealised_pnl',
'total_realised_pnl' and 'total_pnl' for single
transactions in two separate assets.
"""
ph = PositionHandler()
# Asset 1
asset1 = 'EQ:AMZN'
dt1 = pd.Timestamp('2015-05-06 15:00:00', tz=pytz.UTC)
trans_pos_1 = Transaction(
asset1,
quantity=75,
dt=dt1,
price=483.45,
order_id=1,
commission=15.97
)
ph.transact_position(trans_pos_1)
# Asset 2
asset2 = 'EQ:MSFT'
dt2 = pd.Timestamp('2015-05-07 15:00:00', tz=pytz.UTC)
trans_pos_2 = Transaction(
asset2,
quantity=250,
dt=dt2,
price=142.58,
order_id=2,
commission=8.35
)
ph.transact_position(trans_pos_2)
# Check all total values
assert ph.total_market_value() == 71903.75
assert np.isclose(ph.total_unrealised_pnl(), -24.31999999999971)
assert ph.total_realised_pnl() == 0.0
assert np.isclose(ph.total_pnl(), -24.31999999999971)
| mit |
tjsavage/full_nonrel_starter | django/contrib/auth/admin.py | 153 | 6848 | from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import UserCreationForm, UserChangeForm, AdminPasswordChangeForm
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
csrf_protect_m = method_decorator(csrf_protect)
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
(_('Groups'), {'fields': ('groups',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('user_permissions',)
def __call__(self, request, url):
# this should not be here, but must be due to the way __call__ routes
# in ModelAdmin.
if url is None:
return self.changelist_view(request)
if url.endswith('password'):
return self.user_change_password(request, url.split('/')[0])
return super(UserAdmin, self).__call__(request, url)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls.defaults import patterns
return patterns('',
(r'^(\d+)/password/$', self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404('Your user does not have the "Change user" permission. In order to add users, Django requires that your user account have both the "Add user" and "Change user" permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
defaults = {
'auto_populated_fields': (),
'username_help_text': self.model._meta.get_field('username').help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url, extra_context)
def user_change_password(self, request, id):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.model, pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
new_user = form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': form.base_fields.keys()})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
return render_to_response(self.change_user_password_template or 'admin/auth/user/change_password.html', {
'title': _('Change password: %s') % escape(user.username),
'adminForm': adminForm,
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
'root_path': self.admin_site.root_path,
}, context_instance=RequestContext(request))
def response_add(self, request, obj, post_url_continue='../%s/'):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj, post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
| bsd-3-clause |
elimence/edx-platform | common/djangoapps/student/management/commands/set_staff.py | 1 | 1445 | from optparse import make_option
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand, CommandError
import re
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--unset',
action='store_true',
dest='unset',
default=False,
help='Set is_staff to False instead of True'),
)
args = '<user|email> [user|email ...]>'
help = """
This command will set is_staff to true for one or more users.
Lookup by username or email address, assumes usernames
do not look like email addresses.
"""
def handle(self, *args, **options):
if len(args) < 1:
raise CommandError('Usage is set_staff {0}'.format(self.args))
for user in args:
if re.match('[^@]+@[^@]+\.[^@]+', user):
try:
v = User.objects.get(email=user)
except:
raise CommandError("User {0} does not exist".format(user))
else:
try:
v = User.objects.get(username=user)
except:
raise CommandError("User {0} does not exist".format(user))
if options['unset']:
v.is_staff = False
else:
v.is_staff = True
v.save()
print 'Success!'
| agpl-3.0 |
soundarmoorthy/omaha-me | tools/generate_omaha3_idl.py | 64 | 3345 | #!/usr/bin/python2.4
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Generates IDL file Omaha3 internfaces."""
import commands
import getopt
import os
import sys
def _GetStatusOutput(cmd):
"""Return (status, output) of executing cmd in a shell."""
if os.name == "nt":
pipe = os.popen(cmd + " 2>&1", "r")
text = pipe.read()
sts = pipe.close()
if sts is None: sts = 0
if text[-1:] == "\n": text = text[:-1]
return sts, text
else:
return commands.getstatusoutput(cmd)
def _GenerateGuid():
(status, guid) = _GetStatusOutput("uuidgen.exe /c")
if status != 0:
raise SystemError("Failed to get GUID: %s" % guid)
return guid
def _GenerateIDLText(idl_template):
guid_placehold_marker = "___AUTO_GENERATED_GUID___"
while guid_placehold_marker in idl_template:
idl_template = idl_template.replace(guid_placehold_marker,
_GenerateGuid(),
1)
return idl_template
def _GenerateIDLFile(idl_template_filename, idl_output_filename):
f_in = open(idl_template_filename, "r")
idl_template = f_in.read()
f_in.close()
idl_output = _GenerateIDLText(idl_template)
f_out = open(idl_output_filename, "w")
f_out.write("// *** AUTOGENERATED FILE. DO NOT HAND-EDIT ***\n\n")
f_out.write(idl_output)
f_out.close()
def _Usage():
"""Prints out script usage information."""
print """
generate_omaha3_idl.py: Write out the given IDL file.
Usage:
generate_omaha3_idl.py [--help
| --idl_template_file filename
--idl_output_file filename]
Options:
--help Show this information.
--idl_output_file filename Path/name of output IDL filename.
--idl_template_file filename Path/name of input IDL template.
"""
def _Main():
"""Generates IDL file."""
# use getopt to parse the option and argument list; this may raise, but
# don't catch it
argument_list = ["help", "idl_template_file=", "idl_output_file="]
(opts, unused_args) = getopt.getopt(sys.argv[1:], "", argument_list)
if not opts or ("--help", "") in opts:
_Usage()
sys.exit()
idl_template_filename = ""
idl_output_filename = ""
for (o, v) in opts:
if o == "--idl_template_file":
idl_template_filename = v
if o == "--idl_output_file":
idl_output_filename = v
# make sure we have work to do
if not idl_template_filename:
raise StandardError("no idl_template_filename specified")
if not idl_output_filename:
raise StandardError("no idl_output_filename specified")
_GenerateIDLFile(idl_template_filename, idl_output_filename)
sys.exit()
if __name__ == "__main__":
_Main()
| apache-2.0 |
epssy/hue | desktop/core/ext-py/Django-1.6.10/tests/model_forms_regress/models.py | 109 | 2375 | from __future__ import unicode_literals
import os
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils._os import upath
class Person(models.Model):
name = models.CharField(max_length=100)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(upath(__file__)), match=".*\.py$", blank=True)
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
def __str__(self):
return self.headline
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class RealPerson(models.Model):
name = models.CharField(max_length=100)
def clean(self):
if self.name.lower() == 'anonymous':
raise ValidationError("Please specify a real name.")
class Author(models.Model):
publication = models.OneToOneField(Publication, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, null=False)
full_name = models.CharField(max_length=255)
class Homepage(models.Model):
url = models.URLField()
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
class Edition(models.Model):
author = models.ForeignKey(Person)
publication = models.ForeignKey(Publication)
edition = models.IntegerField()
isbn = models.CharField(max_length=13, unique=True)
class Meta:
unique_together = (('author', 'publication'), ('publication', 'edition'),)
| apache-2.0 |
lakshayg/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/sugar_test.py | 157 | 4205 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import ops
from tensorflow.contrib.labeled_tensor.python.ops import sugar
from tensorflow.contrib.labeled_tensor.python.ops import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class Base(test_util.Base):
def setUp(self):
super(Base, self).setUp()
self.small_lt = core.LabeledTensor(constant_op.constant([1]), [('x', 1)])
class ReshapeCoderTest(Base):
def setUp(self):
super(ReshapeCoderTest, self).setUp()
self.batch_size = 8
self.num_rows = 50
self.num_columns = 100
self.channels = ['red', 'green', 'blue']
self.masks = [False, True]
tensor = math_ops.range(0,
self.batch_size * self.num_rows * self.num_columns *
len(self.channels) * len(self.masks))
tensor = array_ops.reshape(tensor, [
self.batch_size, self.num_rows, self.num_columns, len(self.channels),
len(self.masks)
])
self.batch_axis = ('batch', range(self.batch_size))
self.row_axis = ('row', range(self.num_rows))
self.column_axis = ('column', range(self.num_columns))
self.channel_axis = ('channel', self.channels)
self.mask_axis = ('mask', self.masks)
axes = [
self.batch_axis, self.row_axis, self.column_axis, self.channel_axis,
self.mask_axis
]
self.masked_image_lt = core.LabeledTensor(tensor, axes)
def test_name(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
decode_lt = rc.decode(encode_lt)
self.assertIn('lt_reshape_encode', encode_lt.name)
self.assertIn('lt_reshape_decode', decode_lt.name)
def test_bijection_flat(self):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis,
('depth', len(self.channels) * len(self.masks))
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_bijection_with_labels(self):
depth_axis = core.Axis('depth', range(len(self.channels) * len(self.masks)))
rc = sugar.ReshapeCoder(['channel', 'mask'],
[depth_axis, ('other', ['label'])])
encode_lt = rc.encode(self.masked_image_lt)
golden_axes = core.Axes([
self.batch_axis, self.row_axis, self.column_axis, depth_axis,
('other', ['label'])
])
self.assertEqual(encode_lt.axes, golden_axes)
decode_lt = rc.decode(encode_lt)
self.assertLabeledTensorsEqual(decode_lt, self.masked_image_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.decode(self.masked_image_lt)
with self.assertRaises(ValueError):
rc = sugar.ReshapeCoder(['channel', 'mask'], ['depth'])
rc.encode(self.masked_image_lt)
rc.encode(ops.select(self.masked_image_lt, {'channel': 'red'}))
if __name__ == '__main__':
test.main()
| apache-2.0 |
cisco-open-source/kodi | lib/libUPnP/Platinum/Build/Tools/Scripts/XCodeMake.py | 262 | 2126 | #! /usr/bin/env python
"""
XCode Build Script
$Id: XCodeMake.py 655 2010-09-29 22:40:22Z soothe $
"""
import os
import sys
import getopt
import subprocess
# ------------------------------------------------------------
# usage
# ------------------------------------------------------------
def usage(errMsg):
try:
print 'Error: %s' % (errMsg)
except NameError:
pass
print 'Usage: '
print ' %s -p <path to project> -b [Release|Debug|etc.] -t [All|Platinum|PlatinumFramework|etc.] -s [macosx|iphoneos]' % (sys.argv[0])
print ''
print ' REQUIRED OPTIONS'
print '\t-p <project>'
print '\t-b <configuration>'
print '\t-t <target>'
print '\t-s <sdk>'
print ''
print ' BUILD OPTIONS'
print '\t-c\tMake clean'
# ------------------------------------------------------------
# main
# ------------------------------------------------------------
try:
opts, args = getopt.getopt(sys.argv[1:], "p:b:t:s:c")
except getopt.GetoptError, (msg, opt):
# print 'Error: invalid argument, %s: %s' % (opt, msg)
usage('invalid argument, %s: %s' % (opt, msg))
sys.exit(2)
# Build options
doingBuild = False
rebuildAll = False
makeClean = False
for opt, arg in opts:
if opt == '-p':
projectFile = arg
doingBuild = True
elif opt == '-b':
buildName = arg
doingBuild = True
elif opt == '-t':
targetName = arg
elif opt == '-s':
sdk = arg
elif opt == '-c':
makeClean = True
try:
buildSwitch = 'build'
if makeClean: buildSwitch = 'clean'
cmd_list = ['xcodebuild', '-project', '%s' % projectFile, '-target', '%s' % targetName, '-sdk', '%s' % sdk, '-configuration', '%s' % buildName, '%s' % buildSwitch]
cmd = " ".join(cmd_list)
print 'Executing:'
print cmd
retVal = subprocess.call(cmd_list)
# only the least sig 8 bits are the real return value
if retVal != 0:
print cmd
print '** BUILD FAILURE **'
sys.exit(retVal)
except NameError, (name):
usage('missing argument %s' % (name))
sys.exit(2)
| gpl-2.0 |
f171a9a3497c8b/fractals | fractals/lsys.py | 1 | 7311 | #!/usr/bin/python3
"""FRACTALS: LINDENMAYER SYSTEM"""
import numpy as np
import matplotlib.pyplot as plt
PRECISION = np.float32
def calc_rot_matrix(angle):
"""
Input:
angle
-- integer or float number
-- rotation angle in radians
-- positive number gives counter-clockwise direction of rotation (turns left)
-- negative number gives clockwise direction of rotation (turns right)
Returns 2x2 numpy array of floats, a 2D rotation matrix.
"""
return np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]], dtype=PRECISION)
def generate_pattern(lvl, states, rewrite_rules):
"""
Inputs:
lvl
-- integer number
-- the number of times (iterations) rewrite rules will be applied
states -- string, the initial state (axiom) of the system
rewrite_rules
-- dictionary
-- keys (character) -> symbols
-- values (string) -> replacement rules
Returns string of symbols.
"""
# In each iteration: check every character in states, replace valid symbol
# with rewrite rule or copy character, and update states
for _ in range(lvl + 1):
states = ''.join([rewrite_rules.get(symbol, symbol) for symbol in states])
# Clean states form rewrite rule flags/symbols
drawing_rules = 'F+-'
states = ''.join([symbol for symbol in states if symbol in drawing_rules])
return states
def generate_points(alpha, theta, length, states):
"""
Inputs:
alpha
-- integer or float number
-- angle (in degrees) between the positive x axis
and initial displacement vector
theta
-- integer or float number
-- angle (in degrees) of a single rotation
length
-- integer or float number
-- length of a displacement vector for one step
states -- string of symbols
Retrurns numpy array of coordinates of points on a plane.
Notes:
** Initial displacement vector starting point is allways
in the origin of the coordinate system.
** Only character F in states (alphabet) generates a new point.
"""
# Convert angles from degrees to radians
alpha = np.radians(alpha)
theta = np.radians(theta)
# Displacement vector, 2x1 numpy array
vec = np.array([[np.cos(alpha)], [np.sin(alpha)]], dtype=PRECISION)
vec_len = np.sqrt(vec[0] ** 2 + vec[1] ** 2)
# Rescale displacement vector
vec = vec / vec_len * length
# Rotation matrices for positive and negative angles
rot_left = calc_rot_matrix(theta)
rot_right = calc_rot_matrix(-theta)
# Container to store xy components/coordinates of points on a plane
points = np.zeros(shape=(2, states.count('F') + 1), dtype=PRECISION)
point_index = 1
for st in states:
if st == '+':
vec = np.dot(rot_right, vec)
elif st == '-':
vec = np.dot(rot_left, vec)
else:
points[:, point_index] = points[:, point_index - 1] + vec[:, 0]
point_index += 1
return points
def lindemayer(lvl, length, init_angle, angle, init_state,
title='LINDENMAYER FRACTAL', color='#0080FF', **rewrite_rules):
"""
Inputs:
lvl
-- integer number
-- the number of times (iterations) rewrite rules will be applied
length
-- integer or float number
-- length of a displacement vector of each step
init_angle
-- integer or float number
-- initial angle (in degrees) measured from the positive x axis
angle
-- integer or float number
-- angle (in degrees) of a single rotation
-- positive number gives counter-clockwise direction of rotation (turns left)
-- negative number gives clockwise direction of rotation (turns right)
init_state -- string, the initial state (axiom) of the system
title -- string, title of the plot
color -- string, valid matplotlib color
rewrite_rules
-- keyword arguments
-- keys (character) hold flags/symbols
-- values (string) hold rules for production/replacement
Displays the plot of calculated sequence of points.
This function does not return any value.
"""
states = generate_pattern(lvl, init_state, rewrite_rules)
points = generate_points(init_angle, angle, length, states)
plt.ioff()
plt.figure(num=title, facecolor='white', frameon=False, clear=True)
plt.style.use('fivethirtyeight')
plt.grid(False)
plt.axis('off')
plt.axis('equal')
plot_options = {
'color': color,
'alpha': 0.5,
'linestyle': '-',
'linewidth': 1.3,
'marker': '',
'antialiased': False,
}
plt.plot(points[0, :], points[1, :], **plot_options)
plt.show()
def heighway_dragon(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'FX', 'HEIGHWAY DRAGON',
X='X+YF+', Y='-FX-Y')
def twin_dragon(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'FX+FX+', 'TWIN DRAGON',
X='X+YF', Y='FX-Y')
def tetra_dragon(lvl, length=1, init_angle=0, angle=120):
lindemayer(lvl, length, init_angle, angle, 'F', 'TETRA DRAGON',
F='F+F-F')
def levy_dragon(lvl, length=1, init_angle=90, angle=45):
lindemayer(lvl, length, init_angle, angle, 'F', 'LEVY DRAGON',
F='+F--F+')
def koch_snowflake(lvl, length=1, init_angle=0, angle=60):
lindemayer(lvl, length, init_angle, angle, 'F++F++F', 'KOCH SNOWFLAKE',
F='F-F++F-F')
def koch_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'F+F+F+F', 'KOCH CURVE',
F='F+F-F-FF+F+F-F')
def sierpinski_triangle(lvl, length=1, init_angle=0, angle=120):
lindemayer(lvl, length, init_angle, angle, 'F+F+F', 'SIERPINSKI TRIANGLE',
F='F+F-F-F+F')
def hilbert_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'X', 'HILBERT CURVE',
X='-YF+XFX+FY-', Y='+XF-YFY-FX+')
def moor_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'XFX+F+XFX', 'MOOR CURVE',
X='-YF+XFX+FY-', Y='+XF-YFY-FX+')
def peano_curve(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'X', 'PEANO CURVE',
X='XFYFX+F+YFXFY-F-XFYFX', Y='YFXFY-F-XFYFX+F+YFXFY')
def tiles(lvl, length=1, init_angle=0, angle=90):
lindemayer(lvl, length, init_angle, angle, 'F+F+F+F', 'TILES',
F='FF+F-F+F+FF')
def pentadendryt(lvl, length=2, init_angle=0, angle=72):
lindemayer(lvl, length, init_angle, angle, 'F', 'PENTADENDRYT',
F='F+F-F--F+F+F')
if __name__ == '__main__':
heighway_dragon(7)
# twin_dragon(10)
# tetra_dragon(6)
# levy_dragon(13)
koch_snowflake(2)
# koch_curve(2)
# sierpinski_triangle(5)
# hilbert_curve(5)
moor_curve(4)
# peano_curve(3)
# tiles(2)
# pentadendryt(4)
| mit |
joopert/home-assistant | homeassistant/components/rflink/binary_sensor.py | 7 | 3339 | """Support for Rflink binary sensors."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorDevice,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_FORCE_UPDATE, CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.event as evt
from . import CONF_ALIASES, CONF_DEVICES, RflinkDevice
CONF_OFF_DELAY = "off_delay"
DEFAULT_FORCE_UPDATE = False
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE
): cv.boolean,
vol.Optional(CONF_OFF_DELAY): cv.positive_int,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
}
},
extra=vol.ALLOW_EXTRA,
)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink sensor devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
device = RflinkBinarySensor(device_id, **config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
class RflinkBinarySensor(RflinkDevice, BinarySensorDevice):
"""Representation of an Rflink binary sensor."""
def __init__(
self, device_id, device_class=None, force_update=False, off_delay=None, **kwargs
):
"""Handle sensor specific args and super init."""
self._state = None
self._device_class = device_class
self._force_update = force_update
self._off_delay = off_delay
self._delay_listener = None
super().__init__(device_id, **kwargs)
def _handle_event(self, event):
"""Domain specific event handler."""
command = event["command"]
if command == "on":
self._state = True
elif command == "off":
self._state = False
if self._state and self._off_delay is not None:
def off_delay_listener(now):
"""Switch device off after a delay."""
self._delay_listener = None
self._state = False
self.async_schedule_update_ha_state()
if self._delay_listener is not None:
self._delay_listener()
self._delay_listener = evt.async_call_later(
self.hass, self._off_delay, off_delay_listener
)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def force_update(self):
"""Force update."""
return self._force_update
| apache-2.0 |
CarlosCardosoV/IA | villanos2.py | 1 | 4388 | #BSME: Beltran Sarmiento Mario Edwin
#CVCE: Cardoso Valencia Carlos ErIc
#MPVS: Martinez Pozos Victor Sebastian
#MI: Marin Ivan
import random
def granSuperVillano(numero_villanos,numero_superVillanos):
habilidades = ["MALDAD","VOLAR","FUERZA","EGO","MONOLOGAR","TRIUNFOS","INGENIO","PODER MENTAL","AUDACIA","PODER ECONOMICO"]
columnas = len(habilidades)
renglones = numero_villanos
tabla = primeraGeneracion(renglones,columnas) #PASO 1: GENERAR UNA PRIMERA GENERACION
#Mostramos la tabla de la primera generacion de villanos
print habilidades
for i in tabla:
print i
megamente = [None]*columnas #Almacena las habilidades de todos los Megamentes
sedusa = [None]*columnas #Almacena las habilidades de todos las Sedusas
sedumente = [None]*columnas #Almacena las habilidades de todos los Sedumentes
aleatorio = [None]*columnas
#Repetir pasos 2,3,4 para tener 10 Sedumentes (PASO 5)
for i in range(numero_superVillanos):
megamente[i] = crearNuevoVillano(tabla,renglones,columnas) #PASO 2: CREACION DE UN NUEVO VILLANO1 DADA LA PRIMERA GENERACION
print "Habilidades Megamente %d" %(i+1)
print megamente[i]
sedusa[i] = crearNuevoVillano(tabla,renglones,columnas) #PASO 3: CREACION DE UN NUEVO VILLANO2 DADA LA PRIMERA GENERACION
print "Habilidades Sedusa %d" %(i+1)
print sedusa[i]
[ sedumente[i],aleatorio[i] ]= mezclarVillanos(megamente[i],sedusa[i],columnas) #PASO 4. Mezclar las habilidades de los anteriores 2 super villanos
#dado un numero aleatorio genera un nuevo supervillano
#Megamente[i] +Sedusa[i] = Sedumente[i]
print "Habilidades Sedumente[%d]. (Mezcla de Megamente %d y Sedusa %d con aleatorio %d)" %((i+1),(i+1),(i+1),aleatorio[i])
print sedumente[i]
#PASO 6 (aplicar los pasos 2,3,4) sobre la poblacion de 10 supervillanos (sedumentes)
super_Villano1 = crearNuevoVillano(sedumente,numero_superVillanos,columnas) #Creacion de Gran supervillano1 dada la tabla de Supervillanos (Sedumentes)
super_Villano2 = crearNuevoVillano(sedumente,numero_superVillanos,columnas) #Creacion de Gran supervillano2 dada la tabla de Supervillanos
[granVillano,aleatorio] = mezclarVillanos(super_Villano1,super_Villano2,columnas) #Mezcla de las habilidades de los anteriores dos supervillanos generea
print "Gran Supervillano1 dada la generacion de Sedumentes"
print super_Villano1
print "Gran Supervillano2 dada la generacion de Sedumentes"
print super_Villano2
print "El mejor Supervillano (Gran Supervillano1 + Gran Supervillano2)con Aleatorio = %d es:" %(aleatorio) #El GRAN VILLANO (hijo de superVillano1+supervillano2)
print granVillano
#Crea una primera generacion de villanos con habilidades aleatorias del 0 al 9.
def primeraGeneracion(renglones,columnas):
tabla = []
for i in range(renglones):
tabla.append([])
for j in range(columnas):
tabla[i].append(None)
for i in range(renglones):
for j in range(columnas):
tabla[i][j] =random.randrange(10) #Numeros aleatorios del 0 al 9
return tabla
#Crea un nuevo villano a partir de los datos de una tabla
def crearNuevoVillano(tabla,renglones,columnas):
habilidades_villano = [None]*columnas #Creacion de una lista que almacene los valores de cada habilidad del nuevo villano.
for i in range(columnas):
habilidades_villano[i] = tabla[random.randrange(renglones)][i] #Para llenar cada habilidad del nuevo villano tomando aleatoriamente los valores sobre cada habilidad
return habilidades_villano
#Genera un supervillano, la mezcla entre dos villanos que se generaron de la primera generacion
def mezclarVillanos(habilidades_villano1,habilidades_villano2,columnas):
aleatorio = random.randint(1,10)
habilidades_superVillano = [None]*columnas
for i in range(columnas):
if i+1<=aleatorio:
habilidades_superVillano[i] = habilidades_villano1[i] #Aqui nos aseguramos que dado las habiliades del nuevo supervillano se repartan entre
else: #las del primer y segundo villanos dado un numero aleatorio
habilidades_superVillano[i] = habilidades_villano2[i]
return habilidades_superVillano,aleatorio
numero_villanos = input("Numero de villanos iniciales: ")
numero_superVillanos = input("Numero de supervillano(Sedumentes)")
granSuperVillano(numero_villanos,numero_superVillanos) | gpl-3.0 |
loretoparisi/nupic | nupic/encoders/scalarspace.py | 40 | 1946 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from nupic.encoders.base import Encoder
from nupic.encoders.delta import DeltaEncoder
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
class ScalarSpaceEncoder(Encoder):
"""An encoder that can be used to permute the encodings through different spaces
These include absolute value,delta, log space, etc.
"""
SPACE_ABSOLUTE="absolute"
SPACE_DELTA="delta"
def __init__(self):
pass
def __new__(self, w, minval=None, maxval=None, periodic=False, n=0, radius=0,
resolution=0, name=None, verbosity=0, clipInput=False,
space="absolute", forced=False):
self._encoder = None
if space == "absolute":
ret = AdaptiveScalarEncoder(w,minval,maxval,periodic,n,radius,
resolution,name,verbosity,clipInput, forced=forced)
else:
ret = DeltaEncoder(w,minval,maxval,periodic,n,radius,resolution,name,verbosity,clipInput, forced=forced)
return ret
| agpl-3.0 |
MiLk/mineos | stock_profiles.py | 1 | 2041 | """A python script to manage minecraft servers
Designed for use with MineOS: http://minecraft.codeemo.com
"""
__author__ = "William Dizon"
__license__ = "GNU GPL v3.0"
__version__ = "0.6.0"
__email__ = "wdchromium@gmail.com"
STOCK_PROFILES = {
'vanilla179': {
'name': 'vanilla179',
'type': 'standard_jar',
'url': 'https://s3.amazonaws.com/Minecraft.Download/versions/1.7.9/minecraft_server.1.7.9.jar',
'save_as': 'minecraft_server.1.7.9.jar',
'run_as': 'minecraft_server.1.7.9.jar',
'ignore': '',
'desc': 'official minecraft_server.jar'
},
'vanilla164': {
'name': 'vanilla164',
'type': 'standard_jar',
'url': 'https://s3.amazonaws.com/Minecraft.Download/versions/1.6.4/minecraft_server.1.6.4.jar',
'save_as': 'minecraft_server.jar',
'run_as': 'minecraft_server.jar',
'ignore': '',
'desc': 'official minecraft_server.jar'
},
'bukkit-recommended': {
'name': 'bukkit-recommended',
'type': 'standard_jar',
'url': 'http://dl.bukkit.org/latest-rb/craftbukkit.jar',
'save_as': 'craftbukkit.jar',
'run_as': 'craftbukkit.jar',
'ignore': '',
},
'bukkit-beta': {
'name': 'bukkit-beta',
'type': 'standard_jar',
'url': 'http://dl.bukkit.org/latest-beta/craftbukkit.jar',
'save_as': 'craftbukkit.jar',
'run_as': 'craftbukkit.jar',
'ignore': '',
},
'bukkit-dev': {
'name': 'bukkit-dev',
'type': 'standard_jar',
'url': 'http://dl.bukkit.org/latest-dev/craftbukkit.jar',
'save_as': 'craftbukkit.jar',
'run_as': 'craftbukkit.jar',
'ignore': '',
},
'spigot': {
'name': 'spigot',
'type': 'standard_jar',
'url': 'http://ci.md-5.net/job/Spigot/lastSuccessfulBuild/artifact/Spigot-Server/target/spigot.jar',
'save_as': 'spigot.jar',
'run_as': 'spigot.jar',
'ignore': '',
}
}
| gpl-3.0 |
lkumar93/Deep_Learning_Crazyflie | src/vision_opencv/opencv_tests/nodes/broadcast.py | 3 | 2918 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# Copyright (c) 2016, Tal Regev.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import math
import rospy
import cv2
import sensor_msgs.msg
from cv_bridge import CvBridge
# Send each image by iterate it from given array of files names to a given topic,
# as a regular and compressed ROS Images msgs.
class Source:
def __init__(self, topic, filenames):
self.pub = rospy.Publisher(topic, sensor_msgs.msg.Image)
self.pub_compressed = rospy.Publisher(topic + "/compressed", sensor_msgs.msg.CompressedImage)
self.filenames = filenames
def spin(self):
time.sleep(1.0)
cvb = CvBridge()
while not rospy.core.is_shutdown():
cvim = cv2.imload(self.filenames[0])
self.pub.publish(cvb.cv2_to_imgmsg(cvim))
self.pub_compressed.publish(cvb.cv2_to_compressed_imgmsg(cvim))
self.filenames = self.filenames[1:] + [self.filenames[0]]
time.sleep(1)
def main(args):
s = Source(args[1], args[2:])
rospy.init_node('Source')
try:
s.spin()
rospy.spin()
outcome = 'test completed'
except KeyboardInterrupt:
print "shutting down"
outcome = 'keyboard interrupt'
rospy.core.signal_shutdown(outcome)
if __name__ == '__main__':
main(sys.argv)
| mit |
guillermooo/dart-sublime-bundle-releases | format.py | 3 | 1070 | # Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
from subprocess import PIPE
from subprocess import Popen
import sublime
import sublime_plugin
from Dart.sublime_plugin_lib import PluginLogger
from Dart.sublime_plugin_lib.plat import supress_window
from Dart import analyzer
from Dart.lib.sdk import DartFormat
_logger = PluginLogger(__name__)
class DartFormatCommand(sublime_plugin.WindowCommand):
'''Formats the selected text in Sublime Text using `dartfmt`.
Notes:
- Can be used as a build system.
'''
def run(self, **kwargs):
view = self.window.active_view()
if not view:
return
analyzer.g_server.send_format_file(view)
class DartReplaceRegion(sublime_plugin.TextCommand):
def run(self, edit, region, text):
reg = sublime.Region(*region)
self.view.replace(edit, reg, text)
self.view.run_command('reindent')
| bsd-3-clause |
naokimiyasaka/sublime-text | Backup/20130904110636/PHP Getters and Setters/getter-setter.py | 1 | 11621 | import sublime
import sublime_plugin
import re
import os
import json
def msg(msg):
print "[PHP Getters and Setters] %s" % msg
class Prefs:
"""
Plugin preferences
"""
@staticmethod
def load():
settings = sublime.load_settings('php-getters-setters.sublime-settings')
Prefs.typeHintIgnore = settings.get('type_hint_ignore')
templatePath = settings.get('templates')
if False == os.path.isabs(templatePath) :
templatePath = os.path.join(sublime.packages_path(), "PHP Getters and Setters", templatePath)
Prefs.templates = templatePath
Prefs.style = settings.get('style')
msg ("ignored type hinting var types %s" % Prefs.typeHintIgnore)
msg ("code style is %s" % Prefs.style)
msg ("templates are in %s" % Prefs.templates)
Prefs.load()
class Template(object):
def __init__(self, name, style = "camelCase"):
msg("opening template %s" % os.path.join(Prefs.templates, style, name))
self.content = open(os.path.join(Prefs.templates, style, name)).read()
def replace(self, args):
return self.content % args
class DocBlock(object):
"""
docblock text to a class
"""
def __init__(self):
self.tags = {}
self.description = ''
def hasTag(self, name) :
return name in self.tags
def hasDescription(self):
return len(self.description) > 0
def addTag(self, name, value):
self.tags[name] = value
def getTag(self, name):
if not self.hasTag(name):
return None
return self.tags[name]
def setDescription(self, value):
self.description = value
def getDescription(self):
return self.description
def fromText(self, content):
lines = content.split("\n")
description = []
for line in lines:
line = line.strip(' */')
if (line.startswith('@')) :
nameMatches = re.findall('\@(\w+) (:?.*)[ ]?.*', line)
if len(nameMatches) > 0 :
name = nameMatches[0][0]
value = nameMatches[0][1]
self.addTag(name.strip('@'), value)
# [name, value, other] = line.split(" ", 2)
else:
msg("Error: could not parse line %s" %line)
else:
if len(line) > 0:
description.append(line)
self.setDescription("\n".join(description).rstrip("\n"))
class Parser(object):
"""
parses text to get class variables so that make the magic can happen
"""
def __init__(self, content):
self.content = content
self.functionRegExp = ".*function.*%s"
self.variableRegExp = '((?:private|public|protected)[ ]{0,}(?:final|static)?[ ]{0,}(?:\$.*?)[ |=|;].*)\n'
def getContent(self):
return self.content
def hasFunction(self, name):
"""
returns true if the function with the name _name_ is found in the code
"""
content = self.getContent()
regExp = self.functionRegExp % name
return re.search(regExp, content) is not None
def _getDockBlockOfVariable(self, line):
content = self.getContent()
matchPos = content.find(line)
lineByLine = content[:matchPos].split("\n")
lineByLine.reverse()
commentStart = 0
commentEnd = 0
for n in range(len(lineByLine)) :
line = lineByLine[n].strip()
if "\n" == line :
continue
elif "\r\n" == line :
continue
elif "" == line :
continue
elif '*/' == line :
commentStart = n +1
elif '/**' == line :
commentEnd = n
break
elif 0 == commentStart:
break
if commentStart == commentEnd :
return ""
if (commentStart == 0) or (commentEnd == 0) :
return ""
result = lineByLine[commentStart:commentEnd]
result.reverse()
return "\n".join(result)
def _processVariable(self, line):
"""
Returns a Variable object populated from the parsed code
"""
nameMatches = re.findall('\$(.*?)[ |=|;]', line)
name = "Unknown"
if len(nameMatches) >= 0 :
name = nameMatches[0]
dockBlockText = self._getDockBlockOfVariable(line)
docblock = DocBlock()
docblock.fromText(dockBlockText)
typeName = 'mixed'
if docblock.hasTag('var'):
typeName = docblock.getTag('var')
description = docblock.getDescription()
return Variable(name = name, typeName = typeName, description = description)
def getClassVariables(self):
"""
returns a list of Variable objects, created from the parsed code
"""
content = self.getContent()
variablesList = []
matches = re.findall(self.variableRegExp, content, re.IGNORECASE)
for match in matches :
variable = self._processVariable(match)
variablesList.append(variable)
return variablesList
class Variable(object):
def __init__(self, name, typeName = None, description=None):
self.name = name
self.type = typeName
self.description = description
def getName(self):
return self.name
def getDescription(self):
if self.description is None or "" == self.description:
self.description = 'value of %s' %self.getName() #get description from name
return self.description
def getPartialFunctionName(self):
style = Prefs.style
# print style
name = self.getName()
if 'camelCase' == style :
var = name[0].upper() + name[1:]
else :
var = name
return var
def getGetterFunctionName(self):
style = Prefs.style
if 'camelCase' == style :
return "get%s" % self.getPartialFunctionName()
return "get_%s" % self.getPartialFunctionName()
def getSetterFunctionName(self):
style = Prefs.style
if 'camelCase' == style :
return "set%s" % self.getPartialFunctionName()
return "set_%s" % self.getPartialFunctionName()
def getType(self):
return self.type
def GetTypeHint(self):
if self.type in Prefs.typeHintIgnore :
return ''
if self.type.find(" ") > -1 or self.type.find("|") > -1:
msg("'%s' is more thatn one type, switching to no type hint" % self.type)
return ""
return self.type
class Base(sublime_plugin.TextCommand):
def __init__(self, arg):
sublime_plugin.TextCommand.__init__(self, arg)
self.variables = None
self.parser = None
def getContent(self):
return self.view.substr(sublime.Region(0, self.view.size()))
def getParser(self, content = ''):
self.parser = Parser(content)
return self.parser
def findLastBracket(self):
view =self.view
pos = long(0)
lastPos = 0
while pos is not None:
pos = view.find('\}', pos);
if pos is not None:
lastPos = pos
if type(pos) == sublime.Region:
pos = pos.end()
return lastPos.begin()
def getVariables(self, parser):
filename = self.view.file_name()
parser = self.getParser(open(filename).read())
self.variables = parser.getClassVariables()
return self.variables
def generateFunctionCode(self, template, variable):
substitutions = {
"name" : variable.getName(),
"type" : variable.getType(),
"normalizedName" : variable.getPartialFunctionName(),
"description" : variable.getDescription(),
"typeHint" : variable.GetTypeHint()
}
return template.replace(substitutions)
def generateGetterFunction(self, parser, variable):
if parser.hasFunction(variable.getGetterFunctionName()):
msg("function %s already present, skipping" % variable.getGetterFunctionName())
return ''
template = Template('getter.tpl', Prefs.style)
code = self.generateFunctionCode(template, variable)
return code
def generateSetterFunction(self, parser, variable):
if parser.hasFunction(variable.getSetterFunctionName()):
msg("function %s already present, skipping" % variable.getSetterFunctionName())
return ''
template = Template('setter.tpl', Prefs.style)
code = self.generateFunctionCode(template, variable)
# if type hinting is not to be show we get "( " instead of (
code = code.replace('( ', '(')
return code
def writeAtEnd(self, edit, text):
lastPos = self.findLastBracket()
self.view.insert(edit, lastPos, text)
def isPhpSyntax(self):
return re.search(".*\PHP.tmLanguage", self.view.settings().get('syntax')) is not None
def is_enabled(self):
return self.isPhpSyntax()
def is_visible(self):
return self.is_enabled()
class PhpGenerateFor(Base):
what = 'getter'
def run(self, edit):
self.edit = edit
parser = self.getParser(self.getContent())
self.vars = []
for variable in parser.getClassVariables():
item =[ variable.getName(), variable.getDescription( )]
self.vars.append(item)
self.view.window().show_quick_panel(self.vars, self.write)
def write(self, index):
name = self.vars[index][0]
parser = self.getParser(self.getContent())
for variable in parser.getClassVariables():
if name == variable.getName():
if 'getter' == self.what :
code = self.generateGetterFunction(parser, variable)
else:
code = self.generateSetterFunction(parser, variable)
self.writeAtEnd(self.edit, code)
class PhpGenerateGetterForCommand(PhpGenerateFor):
what = 'getter'
class PhpGenerateSetterForCommand(PhpGenerateFor):
what = 'setter'
class PhpGenerateGettersCommand(Base):
def run(self, edit):
parser = self.getParser(self.getContent())
code = ''
for variable in parser.getClassVariables():
code += self.generateGetterFunction(parser, variable)
self.writeAtEnd(edit, code)
class PhpGenerateSettersCommand(Base):
def run(self, edit):
parser = self.getParser(self.getContent())
code = ''
for variable in parser.getClassVariables():
code += self.generateSetterFunction(parser, variable)
self.writeAtEnd(edit, code)
class PhpGenerateGettersSettersCommand(Base):
def run(self, edit):
parser = self.getParser(self.getContent())
code = ''
for variable in parser.getClassVariables():
code += self.generateGetterFunction(parser, variable)
code += self.generateSetterFunction(parser, variable)
self.writeAtEnd(edit, code)
class PhpGenerateGettersSetterUnavailable(Base):
def run(self, edit):
pass
def is_enabled(self):
return False
def is_visible(self):
return not self.isPhpSyntax()
def description(self):
return "Only available for PHP syntax buffers" | mit |
rahushen/ansible | lib/ansible/modules/system/ufw.py | 35 | 11228 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
# Copyright: (c) 2013, James Martin <jmartin@basho.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ufw
short_description: Manage firewall with UFW
description:
- Manage firewall with UFW.
version_added: 1.6
author:
- Aleksey Ovcharenko (@ovcharenko)
- Jarno Keskikangas (@pyykkis)
- Ahti Kitsik (@ahtik)
notes:
- See C(man ufw) for more examples.
requirements:
- C(ufw) package
options:
state:
description:
- C(enabled) reloads firewall and enables firewall on boot.
- C(disabled) unloads firewall and disables firewall on boot.
- C(reloaded) reloads firewall.
- C(reset) disables and resets firewall to installation defaults.
choices: [ disabled, enabled, reloaded, reset ]
policy:
description:
- Change the default policy for incoming or outgoing traffic.
aliases: [ default ]
choices: [ allow, deny, reject ]
direction:
description:
- Select direction for a rule or default policy command.
choices: [ in, incoming, out, outgoing, routed ]
logging:
description:
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
choices: [ on, off, low, medium, high, full ]
insert:
description:
- Insert the corresponding rule as rule number NUM
rule:
description:
- Add firewall rule
choices: ['allow', 'deny', 'limit', 'reject']
log:
description:
- Log new connections matched to this rule
type: bool
from_ip:
description:
- Source IP address.
aliases: [ from, src ]
default: any
from_port:
description:
- Source port.
to_ip:
description:
- Destination IP address.
aliases: [ dest, to]
default: any
to_port:
description:
- Destination port.
aliases: [ port ]
proto:
description:
- TCP/IP protocol.
choices: [ any, tcp, udp, ipv6, esp, ah ]
name:
description:
- Use profile located in C(/etc/ufw/applications.d).
aliases: [ app ]
delete:
description:
- Delete rule.
type: bool
interface:
description:
- Specify interface for rule.
aliases: [ if ]
route:
description:
- Apply the rule to routed/forwarded packets.
type: bool
comment:
description:
- Add a comment to the rule. Requires UFW version >=0.35.
version_added: "2.4"
'''
EXAMPLES = '''
- name: Allow everything and enable UFW
ufw:
state: enabled
policy: allow
- name: Set logging
ufw:
logging: on
# Sometimes it is desirable to let the sender know when traffic is
# being denied, rather than simply ignoring it. In these cases, use
# reject instead of deny. In addition, log rejected connections:
- ufw:
rule: reject
port: auth
log: yes
# ufw supports connection rate limiting, which is useful for protecting
# against brute-force login attacks. ufw will deny connections if an IP
# address has attempted to initiate 6 or more connections in the last
# 30 seconds. See http://www.debian-administration.org/articles/187
# for details. Typical usage is:
- ufw:
rule: limit
port: ssh
proto: tcp
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
# a rule=allow task can leave those ports exposed. Either use delete=yes
# or a separate state=reset task)
- ufw:
rule: allow
name: OpenSSH
- name: Delete OpenSSH rule
ufw:
rule: allow
name: OpenSSH
delete: yes
- name: Deny all access to port 53
ufw:
rule: deny
port: 53
- name: Allow port range 60000-61000
ufw:
rule: allow
port: 60000:61000
- name: Allow all access to tcp port 80
ufw:
rule: allow
port: 80
proto: tcp
- name: Allow all access from RFC1918 networks to this host
ufw:
rule: allow
src: '{{ item }}'
with_items:
- 10.0.0.0/8
- 172.16.0.0/12
- 192.168.0.0/16
- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
ufw:
rule: deny
proto: udp
src: 1.2.3.4
port: 514
comment: Block syslog
- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
ufw:
rule: allow
interface: eth0
direction: in
proto: udp
src: 1.2.3.5
from_port: 5469
dest: 1.2.3.4
to_port: 5469
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
ufw:
rule: deny
proto: tcp
src: 2001:db8::/32
port: 25
# Can be used to further restrict a global FORWARD policy set to allow
- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
ufw:
rule: deny
route: yes
src: 1.2.3.0/24
dest: 4.5.6.0/24
'''
import re
from operator import itemgetter
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
delete=dict(type='bool', default=False),
route=dict(type='bool', default=False),
insert=dict(type='str'),
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
interface=dict(type='str', aliases=['if']),
log=dict(type='bool', default=False),
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
from_port=dict(type='str'),
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
to_port=dict(type='str', aliases=['port']),
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp']),
app=dict(type='str', aliases=['name']),
comment=dict(type='str'),
),
supports_check_mode=True,
mutually_exclusive=[
['app', 'proto', 'logging']
],
)
cmds = []
def execute(cmd):
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
cmds.append(cmd)
(rc, out, err) = module.run_command(cmd)
if rc != 0:
module.fail_json(msg=err or out)
def ufw_version():
"""
Returns the major and minor version of ufw installed on the system.
"""
rc, out, err = module.run_command("%s --version" % ufw_bin)
if rc != 0:
module.fail_json(
msg="Failed to get ufw version.", rc=rc, out=out, err=err
)
lines = [x for x in out.split('\n') if x.strip() != '']
if len(lines) == 0:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
if matches is None:
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
# Convert version to numbers
major = int(matches.group(1))
minor = int(matches.group(2))
rev = 0
if matches.group(3) is not None:
rev = int(matches.group(3))
return major, minor, rev
params = module.params
# Ensure at least one of the command arguments are given
command_keys = ['state', 'default', 'rule', 'logging']
commands = dict((key, params[key]) for key in command_keys if params[key])
if len(commands) < 1:
module.fail_json(msg="Not any of the command arguments %s given" % commands)
if (params['interface'] is not None and params['direction'] is None):
module.fail_json(msg="Direction must be specified when creating a rule on an interface")
# Ensure ufw is available
ufw_bin = module.get_bin_path('ufw', True)
# Save the pre state and rules in order to recognize changes
(_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
# Execute commands
for (command, value) in commands.items():
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
if command == 'state':
states = {'enabled': 'enable', 'disabled': 'disable',
'reloaded': 'reload', 'reset': 'reset'}
execute(cmd + [['-f'], [states[value]]])
elif command == 'logging':
execute(cmd + [[command], [value]])
elif command == 'default':
execute(cmd + [[command], [value], [params['direction']]])
elif command == 'rule':
# Rules are constructed according to the long format
#
# ufw [--dry-run] [delete] [insert NUM] [route] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
# [proto protocol] [app application] [comment COMMENT]
cmd.append([module.boolean(params['delete']), 'delete'])
cmd.append([module.boolean(params['route']), 'route'])
cmd.append([params['insert'], "insert %s" % params['insert']])
cmd.append([value])
cmd.append([params['direction'], "%s" % params['direction']])
cmd.append([params['interface'], "on %s" % params['interface']])
cmd.append([module.boolean(params['log']), 'log'])
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
('to_ip', "to %s"), ('to_port', "port %s"),
('proto', "proto %s"), ('app', "app '%s'")]:
value = params[key]
cmd.append([value, template % (value)])
ufw_major, ufw_minor, _ = ufw_version()
# comment is supported only in ufw version after 0.35
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
cmd.append([params['comment'], "comment '%s'" % params['comment']])
execute(cmd)
# Get the new state
(_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
(_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user.rules /lib/ufw/user6.rules /etc/ufw/user.rules /etc/ufw/user6.rules")
changed = (pre_state != post_state) or (pre_rules != post_rules)
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
if __name__ == '__main__':
main()
| gpl-3.0 |
fmfi-svt/votr | aisikl/components/actionablecontrol.py | 1 | 1040 |
from .control import Control
class ActionableControl(Control):
def __init__(self, dialog, id, type, parent_id, properties, element):
super().__init__(dialog, id, type, parent_id, properties, element)
self.action_name = properties.get('an')
@property
def action(self):
if not self.action_name: return None
return self.dialog.components[self.action_name]
def try_execute_action(self, params=None):
if self.action:
self.action.execute(self.id, params)
return True
return False
def is_really_enabled(self):
return (super().is_really_enabled() and
self.get_enabled_by_owner_container())
def get_enabled_by_owner_container(self):
action = self.action
if not action: return True
action_list = action.parent
if not action_list: return True
owner_container = action_list.owner_container
if not owner_container: return True
return owner_container.is_really_enabled()
| apache-2.0 |
olivergs/lotroassist | src/plugins/xpcount/__init__.py | 1 | 2435 | # -*- coding: utf-8 -*-
###############################################################################
# (C) 2010 Oliver Gutiérrez <ogutsua@gmail.com>
# LOTROAssist experience plugin
###############################################################################
# Python Imports
import re
# GTK Imports
import gobject
import gtk
# EVOGTK Imports
from evogtk.gui import GUIClass
class Plugin(GUIClass):
"""
LOTROAssist experience plugin class
"""
metadata={
'PLUGIN_NAME': 'XP Counter',
'PLUGIN_CODENAME': 'xpcount',
'PLUGIN_VERSION': '0.1',
'PLUGIN_DESC': 'Lord Of The Rings Online Assistant plugin for XP counting',
'PLUGIN_COPYRIGHT': '(C) 2010 Oliver Gutiérrez <ogutsua@gmail.com>',
'PLUGIN_WEBSITE': 'http://www.evosistemas.com',
'PLUGIN_DOCK': 'status',
}
def initialize(self):
"""
Initialization function
"""
self.regexp=re.compile(r'You\'ve earned (?P<gain>\d+) XP for a total of (?P<total>\d*(,)*\d*) XP\.$')
self.refellowlevup=re.compile(r'Your fellow, (?P<player>.*), is now level (?P<level>\d+)\.$')
self.reselflevup=re.compile(r'Your level has changed to (?P<level>\d+)\.$')
def newLine(self,line):
"""
New line analysing function
"""
# Analyze log line
return (self.gotxp(line) or self.fellowlevup(line) or self.selflevup(line))
def gotxp(self,line):
resp=self.regexp.search(line)
if resp:
# Get line information
# gain=resp.group('gain')
total=resp.group('total')
self.ui.lblTotalXP=total
return True
def fellowlevup(self,line):
resp=self.refellowlevup.search(line)
if resp:
# Get line information
player=resp.group('player')
level=resp.group('level')
# Show notification
self.maingui.showNotification('%s is now level %s' % (player,level),icon='plugins/xpcount/pixmaps/levelup.png')
return True
def selflevup(self,line):
resp=self.reselflevup.search(line)
if resp:
# Get line information
level=resp.group('level')
# Show notification
self.maingui.showNotification('Reached level %s' % (level),icon='plugins/xpcount/pixmaps/levelup.png')
return True
| mit |
MattBlack85/tic-tac-toe | tic_tac_toe.py | 1 | 4692 | import itertools
import random
ASCII_TABLE = " | | \n" \
" | | \n" \
"_____|_____|_____\n" \
" | | \n" \
" | | \n" \
"_____|_____|_____\n" \
" | | \n" \
" | | \n" \
" | | \n" \
"\n" \
" TIC-TAC-TOE\n"
class Table(object):
def __init__(self):
self.grid = list(range(1, 10))
self.table_representation = ASCII_TABLE
self.win_combinations = [
[1, 2, 3],
[1, 4, 7],
[1, 5, 9],
[2, 5, 8],
[3, 6, 9],
[3, 5, 7],
[4, 5, 6],
[7, 8, 9],
]
def mark_ascii_cell(self, grid_place, player_mark):
return {
1: self._replace_cell(21, grid_place, player_mark),
2: self._replace_cell(27, grid_place, player_mark),
3: self._replace_cell(33, grid_place, player_mark),
4: self._replace_cell(75, grid_place, player_mark),
5: self._replace_cell(81, grid_place, player_mark),
6: self._replace_cell(87, grid_place, player_mark),
7: self._replace_cell(129, grid_place, player_mark),
8: self._replace_cell(135, grid_place, player_mark),
9: self._replace_cell(141, grid_place, player_mark),
}
def _replace_cell(self, ascii_place, grid_place, player_mark):
return self.table_representation[:ascii_place - 1] + \
player_mark + self.table_representation[ascii_place:]
def occupy_cell(self, grid_place, player_mark):
self.grid.remove(grid_place)
self.table_representation = self.mark_ascii_cell(grid_place, player_mark).get(grid_place)
class AbstractPlayer(object):
def __init__(self, mark, name=None, human=False):
self.human = human
self.owned_cells = []
self.name = name or "AI" + str(random.randint(1, 10))
self.mark = mark
def _check_if_win(self):
if table.grid:
c = itertools.combinations(self.owned_cells, 3)
for combination in c:
if list(combination) in table.win_combinations:
print("Player " + self.name + " wins!")
print(table.table_representation)
exit(0)
def move(self, N=None):
self._occupy_cell(N)
self._finish_move(N)
def _occupy_cell(self, N):
raise NotImplementedError("You must define the way"
"to occupy a cell on the"
"grid")
def _finish_move(self, N):
self.owned_cells.append(N)
self.owned_cells.sort()
self._check_if_win()
print(table.table_representation)
class HumanPlayer(AbstractPlayer):
def _occupy_cell(self, N):
if N in table.grid and 0 < N < 10:
table.occupy_cell(N, self.mark)
else:
print("You are choosing an occupied cell or a non existing one")
class AIPlayer(AbstractPlayer):
def _occupy_cell(self, N):
auto_choice = random.choice(table.grid)
table.occupy_cell(auto_choice)
print("Player " + self.name + " chooses " + str(auto_choice) + ".")
def main():
global table
table = Table()
start = None
print(ASCII_TABLE)
while not start:
players = int(input("How many human players will play? (0-2): "))
if 0 <= players < 3:
if players == 0:
player1 = AIPlayer(mark='X')
player2 = AIPlayer(mark='O')
elif players == 1:
name = input("Please type the name of the player: ")
player1 = HumanPlayer('X', name, True)
player2 = AIPlayer(mark='O')
elif players == 2:
name = input("Please type the name of the first player: ")
player1 = HumanPlayer('X', name, True)
name = input("Please type the name of the second player: ")
player2 = HumanPlayer('O', name, True)
start = True
for turn in range(9):
if turn % 2 == 0:
N = int(input("Please choose a cell " + player1.name + ": ")
) if player1.human else None
player1.move(N)
else:
N = int(input("Please choose a cell " + player2.name + ": ")
) if player2.human else None
player2.move(N)
print("What a match, we have a draw!")
print(table.table_representation)
if __name__ == "__main__":
main()
| mit |
Dev-Cloud-Platform/Dev-Cloud | dev_cloud/cc1/src/clm/views/guest/message.py | 2 | 1473 | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2010-2014] Institute of Nuclear Physics PAN, Krakow, Poland
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""@package src.clm.views.guest.message
@alldecoratedby{src.clm.utils.decorators.guest_log}
"""
from clm.models.message import Message
from clm.models.user import User
from clm.utils.exception import CLMException
from clm.utils.decorators import guest_log
from clm.utils import mail
@guest_log(log=True)
def add(request):
"""
Creates and saves new message.
@clmview_guest
@param_post{request}
"""
data = request.data
if data['code'] in ['farm_create', 'vm_create', 'vm_save', 'vm_destroy']:
for admin in User.objects.filter(is_superuser__gte=1):
mail.send(admin.email, 'VM failed, do something!', 'VM failed')
m = Message.create(data)
try:
m.save()
except:
raise CLMException('message_add')
| apache-2.0 |
yg257/Pangea | templates/root/ec2/lib/boto-2.34.0/tests/integration/dynamodb/test_layer1.py | 114 | 11776 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Layer1 of DynamoDB
"""
import time
import base64
from tests.unit import unittest
from boto.dynamodb.exceptions import DynamoDBKeyNotFoundError
from boto.dynamodb.exceptions import DynamoDBConditionalCheckFailedError
from boto.dynamodb.exceptions import DynamoDBValidationError
from boto.dynamodb.layer1 import Layer1
class DynamoDBLayer1Test(unittest.TestCase):
dynamodb = True
def setUp(self):
self.dynamodb = Layer1()
self.table_name = 'test-%d' % int(time.time())
self.hash_key_name = 'forum_name'
self.hash_key_type = 'S'
self.range_key_name = 'subject'
self.range_key_type = 'S'
self.read_units = 5
self.write_units = 5
self.schema = {'HashKeyElement': {'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type},
'RangeKeyElement': {'AttributeName': self.range_key_name,
'AttributeType': self.range_key_type}}
self.provisioned_throughput = {'ReadCapacityUnits': self.read_units,
'WriteCapacityUnits': self.write_units}
def tearDown(self):
pass
def create_table(self, table_name, schema, provisioned_throughput):
result = self.dynamodb.create_table(table_name, schema, provisioned_throughput)
self.addCleanup(self.dynamodb.delete_table, table_name)
return result
def test_layer1_basic(self):
print('--- running DynamoDB Layer1 tests ---')
c = self.dynamodb
# First create a table
table_name = self.table_name
hash_key_name = self.hash_key_name
hash_key_type = self.hash_key_type
range_key_name = self.range_key_name
range_key_type = self.range_key_type
read_units = self.read_units
write_units = self.write_units
schema = self.schema
provisioned_throughput = self.provisioned_throughput
result = self.create_table(table_name, schema, provisioned_throughput)
assert result['TableDescription']['TableName'] == table_name
result_schema = result['TableDescription']['KeySchema']
assert result_schema['HashKeyElement']['AttributeName'] == hash_key_name
assert result_schema['HashKeyElement']['AttributeType'] == hash_key_type
assert result_schema['RangeKeyElement']['AttributeName'] == range_key_name
assert result_schema['RangeKeyElement']['AttributeType'] == range_key_type
result_thruput = result['TableDescription']['ProvisionedThroughput']
assert result_thruput['ReadCapacityUnits'] == read_units
assert result_thruput['WriteCapacityUnits'] == write_units
# Wait for table to become active
result = c.describe_table(table_name)
while result['Table']['TableStatus'] != 'ACTIVE':
time.sleep(5)
result = c.describe_table(table_name)
# List tables and make sure new one is there
result = c.list_tables()
assert table_name in result['TableNames']
# Update the tables ProvisionedThroughput
new_read_units = 10
new_write_units = 5
new_provisioned_throughput = {'ReadCapacityUnits': new_read_units,
'WriteCapacityUnits': new_write_units}
result = c.update_table(table_name, new_provisioned_throughput)
# Wait for table to be updated
result = c.describe_table(table_name)
while result['Table']['TableStatus'] == 'UPDATING':
time.sleep(5)
result = c.describe_table(table_name)
result_thruput = result['Table']['ProvisionedThroughput']
assert result_thruput['ReadCapacityUnits'] == new_read_units
assert result_thruput['WriteCapacityUnits'] == new_write_units
# Put an item
item1_key = 'Amazon DynamoDB'
item1_range = 'DynamoDB Thread 1'
item1_data = {
hash_key_name: {hash_key_type: item1_key},
range_key_name: {range_key_type: item1_range},
'Message': {'S': 'DynamoDB thread 1 message text'},
'LastPostedBy': {'S': 'User A'},
'Views': {'N': '0'},
'Replies': {'N': '0'},
'Answered': {'N': '0'},
'Tags': {'SS': ["index", "primarykey", "table"]},
'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
}
result = c.put_item(table_name, item1_data)
# Now do a consistent read and check results
key1 = {'HashKeyElement': {hash_key_type: item1_key},
'RangeKeyElement': {range_key_type: item1_range}}
result = c.get_item(table_name, key=key1, consistent_read=True)
for name in item1_data:
assert name in result['Item']
# Try to get an item that does not exist.
invalid_key = {'HashKeyElement': {hash_key_type: 'bogus_key'},
'RangeKeyElement': {range_key_type: item1_range}}
self.assertRaises(DynamoDBKeyNotFoundError,
c.get_item, table_name, key=invalid_key)
# Try retrieving only select attributes
attributes = ['Message', 'Views']
result = c.get_item(table_name, key=key1, consistent_read=True,
attributes_to_get=attributes)
for name in result['Item']:
assert name in attributes
# Try to delete the item with the wrong Expected value
expected = {'Views': {'Value': {'N': '1'}}}
self.assertRaises(DynamoDBConditionalCheckFailedError,
c.delete_item, table_name, key=key1,
expected=expected)
# Now update the existing object
attribute_updates = {'Views': {'Value': {'N': '5'},
'Action': 'PUT'},
'Tags': {'Value': {'SS': ['foobar']},
'Action': 'ADD'}}
result = c.update_item(table_name, key=key1,
attribute_updates=attribute_updates)
# Try and update an item, in a fashion which makes it too large.
# The new message text is the item size limit minus 32 bytes and
# the current object is larger than 32 bytes.
item_size_overflow_text = 'Text to be padded'.zfill(64 * 1024 - 32)
attribute_updates = {'Message': {'Value': {'S': item_size_overflow_text},
'Action': 'PUT'}}
self.assertRaises(DynamoDBValidationError,
c.update_item, table_name, key=key1,
attribute_updates=attribute_updates)
# Put a few more items into the table
item2_key = 'Amazon DynamoDB'
item2_range = 'DynamoDB Thread 2'
item2_data = {
hash_key_name: {hash_key_type: item2_key},
range_key_name: {range_key_type: item2_range},
'Message': {'S': 'DynamoDB thread 2 message text'},
'LastPostedBy': {'S': 'User A'},
'Views': {'N': '0'},
'Replies': {'N': '0'},
'Answered': {'N': '0'},
'Tags': {'SS': ["index", "primarykey", "table"]},
'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
}
result = c.put_item(table_name, item2_data)
key2 = {'HashKeyElement': {hash_key_type: item2_key},
'RangeKeyElement': {range_key_type: item2_range}}
item3_key = 'Amazon S3'
item3_range = 'S3 Thread 1'
item3_data = {
hash_key_name: {hash_key_type: item3_key},
range_key_name: {range_key_type: item3_range},
'Message': {'S': 'S3 Thread 1 message text'},
'LastPostedBy': {'S': 'User A'},
'Views': {'N': '0'},
'Replies': {'N': '0'},
'Answered': {'N': '0'},
'Tags': {'SS': ['largeobject', 'multipart upload']},
'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
}
result = c.put_item(table_name, item3_data)
key3 = {'HashKeyElement': {hash_key_type: item3_key},
'RangeKeyElement': {range_key_type: item3_range}}
# Try a few queries
result = c.query(table_name, {'S': 'Amazon DynamoDB'},
{'AttributeValueList': [{'S': 'DynamoDB'}],
'ComparisonOperator': 'BEGINS_WITH'})
assert 'Count' in result
assert result['Count'] == 2
# Try a few scans
result = c.scan(table_name,
{'Tags': {'AttributeValueList': [{'S': 'table'}],
'ComparisonOperator': 'CONTAINS'}})
assert 'Count' in result
assert result['Count'] == 2
# Now delete the items
result = c.delete_item(table_name, key=key1)
result = c.delete_item(table_name, key=key2)
result = c.delete_item(table_name, key=key3)
print('--- tests completed ---')
def test_binary_attributes(self):
c = self.dynamodb
result = self.create_table(self.table_name, self.schema,
self.provisioned_throughput)
# Wait for table to become active
result = c.describe_table(self.table_name)
while result['Table']['TableStatus'] != 'ACTIVE':
time.sleep(5)
result = c.describe_table(self.table_name)
# Put an item
item1_key = 'Amazon DynamoDB'
item1_range = 'DynamoDB Thread 1'
item1_data = {
self.hash_key_name: {self.hash_key_type: item1_key},
self.range_key_name: {self.range_key_type: item1_range},
'Message': {'S': 'DynamoDB thread 1 message text'},
'LastPostedBy': {'S': 'User A'},
'Views': {'N': '0'},
'Replies': {'N': '0'},
'BinaryData': {'B': base64.b64encode(b'\x01\x02\x03\x04').decode('utf-8')},
'Answered': {'N': '0'},
'Tags': {'SS': ["index", "primarykey", "table"]},
'LastPostDateTime': {'S': '12/9/2011 11:36:03 PM'}
}
result = c.put_item(self.table_name, item1_data)
# Now do a consistent read and check results
key1 = {'HashKeyElement': {self.hash_key_type: item1_key},
'RangeKeyElement': {self.range_key_type: item1_range}}
result = c.get_item(self.table_name, key=key1, consistent_read=True)
self.assertEqual(result['Item']['BinaryData'],
{'B': base64.b64encode(b'\x01\x02\x03\x04').decode('utf-8')})
| apache-2.0 |
nirmeshk/oh-mainline | vendor/packages/sphinx/sphinx/directives/code.py | 16 | 7800 | # -*- coding: utf-8 -*-
"""
sphinx.directives.code
~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import codecs
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.util import parselinenos
from sphinx.util.nodes import set_source_info
class Highlight(Directive):
"""
Directive to set the highlighting language for code blocks, as well
as the threshold for line numbers.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'linenothreshold': directives.unchanged,
}
def run(self):
if 'linenothreshold' in self.options:
try:
linenothreshold = int(self.options['linenothreshold'])
except Exception:
linenothreshold = 10
else:
linenothreshold = sys.maxint
return [addnodes.highlightlang(lang=self.arguments[0].strip(),
linenothreshold=linenothreshold)]
class CodeBlock(Directive):
"""
Directive for a code block with special highlighting or line numbering
settings.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'linenos': directives.flag,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
code = u'\n'.join(self.content)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(self.content)
hl_lines = [x+1 for x in parselinenos(linespec, nlines)]
except ValueError, err:
document = self.state.document
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
literal = nodes.literal_block(code, code)
literal['language'] = self.arguments[0]
literal['linenos'] = 'linenos' in self.options
if hl_lines is not None:
literal['highlight_args'] = {'hl_lines': hl_lines}
set_source_info(self, literal)
return [literal]
class LiteralInclude(Directive):
"""
Like ``.. include:: :literal:``, but only warns if the include file is
not found, and does not raise errors. Also has several options for
selecting what to include.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'linenos': directives.flag,
'tab-width': int,
'language': directives.unchanged_required,
'encoding': directives.encoding,
'pyobject': directives.unchanged_required,
'lines': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'prepend': directives.unchanged_required,
'append': directives.unchanged_required,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
rel_filename, filename = env.relfn2path(self.arguments[0])
if 'pyobject' in self.options and 'lines' in self.options:
return [document.reporter.warning(
'Cannot use both "pyobject" and "lines" options',
line=self.lineno)]
encoding = self.options.get('encoding', env.config.source_encoding)
codec_info = codecs.lookup(encoding)
f = None
try:
f = codecs.StreamReaderWriter(open(filename, 'rb'),
codec_info[2], codec_info[3], 'strict')
lines = f.readlines()
except (IOError, OSError):
return [document.reporter.warning(
'Include file %r not found or reading it failed' % filename,
line=self.lineno)]
except UnicodeError:
return [document.reporter.warning(
'Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option' %
(encoding, filename))]
finally:
if f is not None:
f.close()
objectname = self.options.get('pyobject')
if objectname is not None:
from sphinx.pycode import ModuleAnalyzer
analyzer = ModuleAnalyzer.for_file(filename, '')
tags = analyzer.find_tags()
if objectname not in tags:
return [document.reporter.warning(
'Object named %r not found in include file %r' %
(objectname, filename), line=self.lineno)]
else:
lines = lines[tags[objectname][1]-1 : tags[objectname][2]-1]
linespec = self.options.get('lines')
if linespec is not None:
try:
linelist = parselinenos(linespec, len(lines))
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
# just ignore nonexisting lines
nlines = len(lines)
lines = [lines[i] for i in linelist if i < nlines]
if not lines:
return [document.reporter.warning(
'Line spec %r: no lines pulled from include file %r' %
(linespec, filename), line=self.lineno)]
linespec = self.options.get('emphasize-lines')
if linespec:
try:
hl_lines = [x+1 for x in parselinenos(linespec, len(lines))]
except ValueError, err:
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
startafter = self.options.get('start-after')
endbefore = self.options.get('end-before')
prepend = self.options.get('prepend')
append = self.options.get('append')
if startafter is not None or endbefore is not None:
use = not startafter
res = []
for line in lines:
if not use and startafter and startafter in line:
use = True
elif use and endbefore and endbefore in line:
use = False
break
elif use:
res.append(line)
lines = res
if prepend:
lines.insert(0, prepend + '\n')
if append:
lines.append(append + '\n')
text = ''.join(lines)
if self.options.get('tab-width'):
text = text.expandtabs(self.options['tab-width'])
retnode = nodes.literal_block(text, text, source=filename)
set_source_info(self, retnode)
if self.options.get('language', ''):
retnode['language'] = self.options['language']
if 'linenos' in self.options:
retnode['linenos'] = True
if hl_lines is not None:
retnode['highlight_args'] = {'hl_lines': hl_lines}
env.note_dependency(rel_filename)
return [retnode]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', Highlight) # old
directives.register_directive('code-block', CodeBlock)
directives.register_directive('sourcecode', CodeBlock)
directives.register_directive('literalinclude', LiteralInclude)
| agpl-3.0 |
carlmod/Analys24h | analys24tim/wsgi.py | 1 | 1144 | """
WSGI config for analys24tim project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "analys24tim.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| agpl-3.0 |
angr/angr | angr/simos/userland.py | 1 | 7356 | import logging
from typing import Dict, Tuple
from ..calling_conventions import SYSCALL_CC, SimCCSyscall
from ..errors import AngrUnsupportedSyscallError, SimSolverError
from ..procedures import SIM_PROCEDURES as P
from .simos import SimOS
_l = logging.getLogger(name=__name__)
class SimUserland(SimOS):
"""
This is a base class for any SimOS that wants to support syscalls.
It uses the CLE kernel object to provide addresses for syscalls. Syscalls will be emulated as a jump to one of these
addresses, where a SimProcedure from the syscall library provided at construction time will be executed.
"""
def __init__(self, project, syscall_library=None, syscall_addr_alignment=4, **kwargs):
super(SimUserland, self).__init__(project, **kwargs)
self.syscall_library = syscall_library.copy()
self.syscall_addr_alignment = syscall_addr_alignment
self.kernel_base = None
self.unknown_syscall_number = None
self.syscall_abis: Dict[str,Tuple[int,int,int]] = {}
# syscall_abis is a dict of tuples {name: (base_number, min_number, max_number)}
# min_number and max_number are just cached from SimSyscallLibrary.{min,max}imum_sysall_number
# base_number is used to map the syscalls into the syscall address space - it's a "base address"
# but a number. to convert from syscall number to address it's (number - min_num + base_num) * alignment + kernel_base
def configure_project(self, abi_list=None): # pylint: disable=arguments-differ
if abi_list is None:
abi_list = list(self.syscall_library.syscall_number_mapping)
assert len(abi_list) == 1, "More than one ABI is available for this target - you need to specify which ones are valid"
self.kernel_base = self.project.loader.kernel_object.mapped_base
base_no = 0
for abi in abi_list:
assert abi in self.syscall_library.syscall_number_mapping
min_no = self.syscall_library.minimum_syscall_number(abi)
max_no = self.syscall_library.maximum_syscall_number(abi)
self.syscall_abis[abi] = (base_no, min_no, max_no)
base_no += max_no - min_no + 1 # since max is the actual max and not the array length
self.unknown_syscall_number = base_no
# configure_project() involves lightweight symbolic execution, which may ends up using syscall ABIs. hence, we
# need to fill in self.syscall_abis before calling configure_project().
super().configure_project()
def syscall_cc(self, state) -> SimCCSyscall:
if state.os_name in SYSCALL_CC[state.arch.name]:
cc = SYSCALL_CC[state.arch.name][state.os_name](state.arch)
else:
# Use the default syscall calling convention - it may bring problems
_l.warning("No syscall calling convention available for %s/%s", state.arch.name, state.os_name)
cc = SYSCALL_CC[state.arch.name]['default'](state.arch)
return cc
def syscall(self, state, allow_unsupported=True):
"""
Given a state, return the procedure corresponding to the current syscall.
This procedure will have .syscall_number, .display_name, and .addr set.
:param state: The state to get the syscall number from
:param allow_unsupported: Whether to return a "dummy" sycall instead of raising an unsupported exception
"""
abi = self.syscall_abi(state)
cc = self.syscall_cc(state)
sym_num = cc.syscall_num(state)
try:
num = state.solver.eval_one(sym_num)
except SimSolverError:
if allow_unsupported:
num = self.unknown_syscall_number
else:
if not state.solver.satisfiable():
raise AngrUnsupportedSyscallError("The program state is not satisfiable")
else:
raise AngrUnsupportedSyscallError("Got a symbolic syscall number")
proc = self.syscall_from_number(num, allow_unsupported=allow_unsupported, abi=abi)
if proc.cc is not None:
cc.func_ty = proc.cc.func_ty
proc.cc = cc
return proc
def syscall_abi(self, state): # pylint: disable=unused-argument,no-self-use
"""
Optionally, override this function to determine which abi is being used for the state's current syscall.
"""
return None
def is_syscall_addr(self, addr):
"""
Return whether or not the given address corresponds to a syscall implementation.
"""
if self.kernel_base is None or addr < self.kernel_base:
return False
addr -= self.kernel_base
if addr % self.syscall_addr_alignment != 0:
return False
addr //= self.syscall_addr_alignment
return addr <= self.unknown_syscall_number
def syscall_from_addr(self, addr, allow_unsupported=True):
"""
Get a syscall SimProcedure from an address.
:param addr: The address to convert to a syscall SimProcedure
:param allow_unsupported: Whether to return a dummy procedure for an unsupported syscall instead of raising an
exception.
:return: The SimProcedure for the syscall, or None if the address is not a syscall address.
"""
if not self.is_syscall_addr(addr):
return None
number = (addr - self.kernel_base) // self.syscall_addr_alignment
for abi in self.syscall_abis:
baseno, minno, maxno = self.syscall_abis[abi]
if baseno <= number <= baseno + maxno - minno:
number += minno
number -= baseno
break
else:
abi = None
return self.syscall_from_number(number, allow_unsupported=allow_unsupported, abi=abi)
def syscall_from_number(self, number, allow_unsupported=True, abi=None):
"""
Get a syscall SimProcedure from its number.
:param number: The syscall number
:param allow_unsupported: Whether to return a "stub" syscall for unsupported numbers instead of throwing an error
:param abi: The name of the abi to use. If None, will assume that the abis have disjoint
numbering schemes and pick the right one.
:return: The SimProcedure for the syscall
"""
abilist = self.syscall_abis if abi is None else [abi]
if self.syscall_library is None:
if not allow_unsupported:
raise AngrUnsupportedSyscallError("%s does not have a library of syscalls implemented" % self.name)
proc = P['stubs']['syscall']()
elif not allow_unsupported and not self.syscall_library.has_implementation(number, self.arch, abilist):
raise AngrUnsupportedSyscallError("No implementation for syscall %d" % number)
else:
proc = self.syscall_library.get(number, self.arch, abilist)
if proc.abi is not None:
baseno, minno, _ = self.syscall_abis[proc.abi]
mapno = number - minno + baseno
else:
mapno = self.unknown_syscall_number
proc.addr = mapno * self.syscall_addr_alignment + self.kernel_base
return proc
| bsd-2-clause |
rvanharen/wrfpy | wrfpy/cylc/wps_post.py | 2 | 1722 | #!/usr/bin/env python
import argparse
import datetime
import time
from wrfpy import utils
from wrfpy.config import config
import os
#from urb import urb
import shutil
import glob
class wps_post(config):
''''
Main function to initialize WPS timestep:
- converts cylc timestring to datetime object
- calls wps_init()
'''
def __init__(self):
config.__init__(self)
rundir = self.config['filesystem']['wrf_run_dir']
wpsdir = os.path.join(self.config['filesystem']['work_dir'], 'wps')
## wrf run dir
# cleanup old met_em files
# create list of files to remove
#files = [glob.glob(os.path.join(rundir, ext))
# for ext in ['met_em*']]
# flatten list
#files_flat = [item for sublist in files for item in sublist]
# remove files silently
#[ utils.silentremove(filename) for filename in files_flat ]
# copy new met_em files
# create list of files to copy
files = [glob.glob(os.path.join(wpsdir, ext))
for ext in ['met_em*']]
# flatten list
files_flat = [item for sublist in files for item in sublist]
[ shutil.copyfile(filename, os.path.join(rundir, os.path.basename(filename))) for filename in files_flat ]
## wps workdir
# create list of files to remove
files = [glob.glob(os.path.join(wpsdir, ext))
for ext in ['met_em*', 'FILE*', 'PFILE*', 'GRIBFILE*']]
# flatten list
files_flat = [item for sublist in files for item in sublist]
# remove files silently
[ utils.silentremove(filename) for filename in files_flat ]
if __name__=="__main__":
wps_post()
| apache-2.0 |
vmon/demystegofier | src/features/feature_1grams.py | 1 | 1358 | """
For each packet, compute all 1grams of 256 characters and return it
as 256 element dict
AUTHORS::
- Vmon (vmon@equalit.ie)
-2012 Initial version
-2013 adopted for demystegofier
"""
from demystegofier_feature import DemystegofierFeature
class FeatureAverage1Grams(DemystegofierFeature):
def __init__(self):
"""
Simply calls the parent constructor
"""
DemystegofierFeature.__init__(self)
#Each feature need to have unique index as the field number
#in ip_feature_db
self._FEATURE_INDEX = 1
def compute(self, pcap_packet_buf):
"""
compute the average 1gram statistics (that is the relative quantity
occurance of each character) and return it in form of a dict consisting of
256 {(1, char_no), average)} items
"""
if (not len(pcap_packet_buf)):
raise ValueError, "Can not compute the 1-garms stat of an empty packet"
#init the dict to zero for each character
stat_result_dict = {(self._FEATURE_INDEX, k): 0 for k in range(0,256)}
for i in pcap_packet_buf:
stat_result_dict[(self._FEATURE_INDEX, ord(i))] += 1
for i in range(0,256):
stat_result_dict[(self._FEATURE_INDEX, i)] /= float(len(pcap_packet_buf))
return stat_result_dict
| gpl-3.0 |
graphite/TeX4Web-INVENIO | modules/websubmit/lib/functions/Get_Recid.py | 6 | 12683 | ## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Get the recid of a record with a given report-number (from the global 'rn'),
and store it into the global 'sysno'.
"""
__revision__ = "$Id$"
from os import access, rename, F_OK, R_OK
import re
from invenio.search_engine import \
record_exists, \
search_pattern, \
get_field_tags
from invenio.websubmit_config import \
InvenioWebSubmitFunctionStop, \
InvenioWebSubmitFunctionError
## JavaScript action and message to be passed to "InvenioWebSubmitFunctionStop"
## when a document recid for the given report-number cannot be found:
CFG_ALERT_DOCUMENT_NOT_FOUND = """\n<script type="text/javascript">
document.forms[0].action="/submit";
document.forms[0].curpage.value=1;
document.forms[0].step.value=0;
user_must_confirm_before_leaving_page = false;
alert('The document with report-number [%s] cannot be found in our """ \
"""database.\\nPerhaps it has not yet been integrated?\\nYou can choose """ \
"""another report number or retry this action in a few minutes.');\n
document.forms[0].submit();
</script>"""
## JavaScript action and message to be passed to "InvenioWebSubmitFunctionStop"
## when multiple document recids for the given report-number are found found:
CFG_ALERT_MULTIPLE_DOCUMENTS_FOUND = """\n<script type="text/javascript">
document.forms[0].action="/submit";
document.forms[0].curpage.value=1;
document.forms[0].step.value=0;
user_must_confirm_before_leaving_page = false;
alert('Multiple documents with the report number [%s] have been found.\\n""" \
"""You can choose another report number or retry this action in a few """ \
"""minutes.');\n
document.forms[0].submit();
</script>"""
## JavaScript action and message to be passed to "InvenioWebSubmitFunctionStop"
## when the recid found doesn't match the type of document that should be
## handled by this submission
CFG_ALERT_WRONG_RECORD_FOR_THIS_SUBMISSION = """
<script type="text/javascript">
document.forms[0].action="/submit";
document.forms[0].curpage.value=1;
document.forms[0].step.value=0;
user_must_confirm_before_leaving_page = false;
document.forms[0].submit();
alert('This document can not be handled using this submission interface.\\n""" \
"""You can choose another report number or retry this action in a few """ \
"""minutes.');\n</script>"""
def Get_Recid(parameters, curdir, form, user_info=None):
"""
Given the report number of a record (the global "rn"), retrieve the
"recid" (001).
The function first of all checks for the existence of the file "SN" in
the current submission's working directory. If it exists, it is read in
and used as the "recid".
Otherwise, this function will contact the database in order to obtain the
recid of a record. In this case, a check will be done in order to assure
that the recid can be handled by this submission.
Parameters: record_search_pattern - this enforces restrictions on which type
of documents can be modified via a certain submission interface. If the
record_search_pattern is not defined, no restriction will be enforced.
The record_search_pattern can be anything that can be used by
search_pattern to search for. Also, one can use variables stored locally,
like <comboDEMOJRN> to denote the category or subcategory.
Ex:
reportnumber:DEMO-<comboDEMOJRN>-*
collection:ATLANTISTIMESNEWS
reportnumber:DEMO-<comboDEMOJRN>-* | collection:ATLANTISTIMESNEWS
As a note, you can test your pattern, using the search engine
and see if it retrieves the expected results.
Unless file curdir/SN exists, the function depends upon the global
value 'rn' having been set (for eg. by calling Get_Report_Number'
prior to this function) It will use this value when searching for
a record. Note: If 'rn' is empty, the search for the document will
not be conducted.
Exceptions raised:
+ InvenioWebSubmitFunctionError:
- if unable to open curdir/SN for reading;
- if unable to open curdir/SN for writing;
+ InvenioWebSubmitFunctionStop:
- if the global 'rn' is empty (no rn to search with);
- if no recid found for 'rn' value;
- if multiple recids found for 'rn' value;
- if recid should not be handled by the current submission;
"""
global rn, sysno
## initialize sysno
sysno = ""
if access("%s/SN" % curdir, F_OK|R_OK):
## SN exists and should contain the recid; get it from there.
try:
fptr = open("%s/SN" % curdir, "r")
except IOError:
## Unable to read the SN file's contents
msg = """Unable to correctly read the current submission's recid"""
raise InvenioWebSubmitFunctionError(msg)
else:
## read in the submission details:
sysno = fptr.read().strip()
fptr.close()
else:
## SN doesn't exist; Check the DB for a record with this reportnumber.
## First, if rn is empty, don't conduct the search:
if rn.strip() in ("", None):
## No report-numer provided:
raise InvenioWebSubmitFunctionStop(CFG_ALERT_DOCUMENT_NOT_FOUND \
% "NO REPORT NUMBER PROVIDED")
## Get a list of recids of LIVE records associated with the report num
recids = get_existing_records_for_reportnumber(rn)
## There should only be 1 _existing_ record for the report-number:
if len(recids) == 1:
## Only one record found - save it to a text file called SN
## in the current submission's working directory:
try:
fptr = open("%s/SN" % curdir, "w")
except IOError:
## Unable to read the SN file's contents
msg = """Unable to save the recid for report [%s]""" \
% rn
raise InvenioWebSubmitFunctionError(msg)
else:
## Save recid to SN and to the global scope:
sysno = recids[0]
fptr.write("%s" % sysno)
fptr.flush()
fptr.close()
elif len(recids) < 1:
## No recid found for this report number:
msg = CFG_ALERT_DOCUMENT_NOT_FOUND % rn
raise InvenioWebSubmitFunctionStop(msg)
else:
## Multiple recids found for this report-number:
msg = CFG_ALERT_MULTIPLE_DOCUMENTS_FOUND % rn
raise InvenioWebSubmitFunctionStop(msg)
## Everything seems to have run smoothly:
## check if the record needs to comply to any restriction
## basically checks if this record can/should be handled by this submission
if parameters['record_search_pattern']:
if not is_record_matching_pattern(parameters['record_search_pattern'], sysno, curdir):
# delete the SN file and reset the sysno,
# because this record is not the good record to be hadled by this submission
rename("%s/SN" % curdir, "%s/SN_WRONG" % curdir)
sysno = ""
raise InvenioWebSubmitFunctionStop(CFG_ALERT_WRONG_RECORD_FOR_THIS_SUBMISSION)
return ""
def get_existing_records_for_reportnumber(reportnum):
"""Given a report number, return a list of recids of real (live) records
that are associated with it.
That's to say if the record does not exist (prehaps deleted, for example)
its recid will now be returned in the list.
@param reportnum: the report number for which recids are to be returned.
@type reportnum: string
@return: list of recids.
@rtype: list
@note: If reportnum was not found in phrase indexes, the function searches
directly in bibxxx tables via MARC tags, so that the record does not
have to be phrase-indexed.
"""
existing_records = [] ## List of the report numbers of existing records
## Get list of records with the report-number: (first in phrase indexes)
reclist = list(search_pattern(req=None,
p=reportnum,
f="reportnumber",
m="e"))
if not reclist:
# Maybe the record has not been indexed yet? (look in bibxxx tables)
tags = get_field_tags("reportnumber")
for tag in tags:
recids = list(search_pattern(req=None,
p=reportnum,
f=tag,
m="e"))
reclist.extend(recids)
reclist = dict.fromkeys(reclist).keys() # Remove duplicates
## Loop through all recids retrieved and testing to see whether the record
## actually exists or not. If none of the records exist, there is no record
## with this reportnumber; If more than one of the records exists, then
## there are multiple records with the report-number; If only one record
## exists, then everything is OK,
for rec in reclist:
rec_exists = record_exists(rec)
if rec_exists == 1:
## This is a live record record the recid and augment the counter of
## records found:
existing_records.append(rec)
return existing_records
def is_record_matching_pattern(record_pattern, recid, curdir):
"""Given a pattern and a recid, returns True if the recid
can be retrieved using the record_pattern. This enforces
restrictions on which type of documents can be modified via a
certain submission interface.
The record_pattern can be anything that can be used by
search_pattern to search for.
Also, one can use variables stored locally, like <comboDEMOJRN>
to denote the category or subcategory.
Ex:
reportnumber:DEMO-<comboDEMOJRN>-*
collection:ATLANTISTIMESNEWS
reportnumber:DEMO-<comboDEMOJRN>-* | collection:ATLANTISTIMESNEWS
As a note, you can test your pattern, using the search engine
and see if it retrieves the expected results.
"""
# if no pattern is configured, then do not do any checks
if not record_pattern:
return True
# check for local variables embedded in the pattern (ex: <comboXYZ>)
# and replace them with the value read from the corresponding file
pattern_local_variables = '<\w+>'
local_vars = re.findall(pattern_local_variables, record_pattern)
final_record_pattern = record_pattern
if local_vars:
for local_var in local_vars:
if record_pattern.find(local_var) > -1:
file_name = local_var[1:-1].strip()
try:
f = open("%s/%s" %(curdir, file_name), "r")
local_variable_content = f.read().strip()
final_record_pattern = final_record_pattern.replace(local_var, local_variable_content)
f.close()
except IOError:
msg = "Record pattern badly defined. There is no local file: %s." % file_name
raise InvenioWebSubmitFunctionError(msg)
# check to see if nested <> tags were used, in this case throw an error -not supported
if final_record_pattern.find('<') > -1 or final_record_pattern.find('>') > -1:
msg = "Record pattern badly defined -> the local variables tags should be revised." % file_name
raise InvenioWebSubmitFunctionError(msg)
# get the list of records that match the final record pattern
reclist = list(search_pattern(p=final_record_pattern))
# check to see if our recid is part of this list or not
if recid in reclist:
return True
else:
return False
| gpl-2.0 |
RTHMaK/RPGOne | deep_qa-master/tests/layers/vector_matrix_split_test.py | 1 | 3608 | # pylint: disable=no-self-use,invalid-name
import numpy
from keras.layers import Input
from keras.models import Model
from deep_qa.layers.time_distributed_embedding import TimeDistributedEmbedding
from deep_qa.layers.vector_matrix_split import VectorMatrixSplit
from deep_qa.layers.wrappers.output_mask import OutputMask
class TestVectorMatrixSplit:
def test_split_works_correctly_on_word_indices(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
split_layer = VectorMatrixSplit(split_axis=2)
words, characters = split_layer(sentence_input)
model = Model(inputs=[sentence_input], outputs=[words, characters])
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
word_tensor, character_tensor = model.predict([sentence_tensor])
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:])
def test_split_works_correctly_with_negative_axis(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
split_layer = VectorMatrixSplit(split_axis=-1)
words, characters = split_layer(sentence_input)
model = Model(inputs=[sentence_input], outputs=[words, characters])
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
word_tensor, character_tensor = model.predict([sentence_tensor])
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:])
def test_split_works_correctly_on_word_embeddings_with_masking(self):
vocabulary_size = 10
sentence_length = 10
word_length = 5
embedding_dim = 10
num_sentences = 7
sentence_input = Input(shape=(sentence_length, word_length), dtype='int32')
embedding = TimeDistributedEmbedding(input_dim=vocabulary_size, output_dim=embedding_dim, mask_zero=True)
embedded_sentence = embedding(sentence_input) # (batch_size, sentence_length, word_length, embedding_dim)
sentence_mask = OutputMask()(embedded_sentence)
# Note that this mask_split_axis doesn't make practical sense; I'm just testing the code
# with a different axis for the mask and the input.
split_layer = VectorMatrixSplit(split_axis=2, mask_split_axis=1)
words, characters = split_layer(embedded_sentence)
word_mask = OutputMask()(words)
character_mask = OutputMask()(characters)
outputs = [embedded_sentence, words, characters, sentence_mask, word_mask, character_mask]
model = Model(inputs=[sentence_input], outputs=outputs)
sentence_tensor = numpy.random.randint(0, vocabulary_size, (num_sentences, sentence_length, word_length))
actual_outputs = model.predict([sentence_tensor])
sentence_tensor, word_tensor, character_tensor, sentence_mask, word_mask, character_mask = actual_outputs
assert numpy.array_equal(word_tensor, sentence_tensor[:, :, 0, :])
assert numpy.array_equal(character_tensor, sentence_tensor[:, :, 1:, :])
assert numpy.array_equal(word_mask, sentence_mask[:, 0, :])
assert numpy.array_equal(character_mask, sentence_mask[:, 1:, :])
| apache-2.0 |
pgiri/asyncoro | examples/chat_server.py | 1 | 1752 | #!/usr/bin/env python
import asyncoro, socket, sys, time
def client_send(clients, conn, coro=None):
coro.set_daemon()
asyncoro.logger.debug('%s/%s started with %s', coro.name, id(coro), conn._fileno)
while True:
line = yield conn.recv_msg()
if not line:
asyncoro.logger.debug('removing %s', conn._fileno)
clients.discard(conn)
break
# asyncoro.logger.debug('got line "%s"', line)
for client in clients:
if client != conn:
# asyncoro.logger.debug('sending "%s" to %s', line, client._fileno)
yield client.send_msg(line)
def chat(host='localhost', port=1234, coro=None):
coro.set_daemon()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock = asyncoro.AsynCoroSocket(sock)
sock.bind((host, port))
sock.listen(128)
asyncoro.logger.debug('server at %s', str(sock.getsockname()))
clients = set()
try:
while True:
conn, addr = yield sock.accept()
clients.add(conn)
asyncoro.Coro(client_send, clients, conn)
except:
for client in clients:
asyncoro.logger.debug('closing %s', client._fileno)
client.shutdown(socket.SHUT_RDWR)
client.close()
raise
if __name__ == '__main__':
asyncoro.logger.setLevel(asyncoro.Logger.debug)
asyncoro.Coro(chat)
if sys.version_info.major > 2:
read_input = input
else:
read_input = raw_input
while True:
try:
cmd = read_input()
if cmd.strip().lower() in ('quit', 'exit'):
break
except:
break
| mit |
littlstar/chromium.src | tools/perf/benchmarks/media.py | 32 | 3214 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import media
import page_sets
from telemetry import benchmark
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
class _MSEMeasurement(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
media_metric = tab.EvaluateJavaScript('window.__testMetrics')
trace = media_metric['id'] if 'id' in media_metric else None
metrics = media_metric['metrics'] if 'metrics' in media_metric else []
for m in metrics:
trace_name = '%s.%s' % (m, trace)
if isinstance(metrics[m], list):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, trace_name, units='ms',
values=[float(v) for v in metrics[m]],
important=True))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, trace_name, units='ms',
value=float(metrics[m]), important=True))
@benchmark.Disabled('android')
class Media(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesPageSet
@benchmark.Disabled
class MediaNetworkSimulation(benchmark.Benchmark):
"""Obtains media metrics under different network simulations."""
test = media.Media
page_set = page_sets.MediaCnsCasesPageSet
@benchmark.Enabled('android')
@benchmark.Disabled('l')
class MediaAndroid(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios on Android."""
test = media.Media
tag = 'android'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_4k and 50 fps media files (garden* & crowd*).
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
@benchmark.Enabled('chromeos')
class MediaChromeOS4kOnly(benchmark.Benchmark):
"""Benchmark for media performance on ChromeOS using only is_4k test content.
"""
test = media.Media
tag = 'chromeOS4kOnly'
page_set = page_sets.ToughVideoCasesPageSet
options = {
'page_label_filter': 'is_4k',
# Exclude is_50fps test files: crbug/331816
'page_label_filter_exclude': 'is_50fps'
}
@benchmark.Enabled('chromeos')
class MediaChromeOS(benchmark.Benchmark):
"""Benchmark for media performance on all ChromeOS platforms.
This benchmark does not run is_4k content, there's a separate benchmark for
that.
"""
test = media.Media
tag = 'chromeOS'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_50fps test files: crbug/331816
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
class MediaSourceExtensions(benchmark.Benchmark):
"""Obtains media metrics for key media source extensions functions."""
test = _MSEMeasurement
page_set = page_sets.MseCasesPageSet
def CustomizeBrowserOptions(self, options):
# Needed to allow XHR requests to return stream objects.
options.AppendExtraBrowserArgs(
['--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'])
| bsd-3-clause |
JorgeCoock/django | tests/auth_tests/test_middleware.py | 217 | 2176 | from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
class TestSessionAuthenticationMiddleware(TestCase):
def setUp(self):
self.user_password = 'test_password'
self.user = User.objects.create_user('test_user',
'test@example.com',
self.user_password)
self.middleware = AuthenticationMiddleware()
self.assertTrue(self.client.login(
username=self.user.username,
password=self.user_password,
))
self.request = HttpRequest()
self.request.session = self.client.session
def test_changed_password_doesnt_invalidate_session(self):
"""
Changing a user's password shouldn't invalidate the session if session
verification isn't activated.
"""
session_key = self.request.session.session_key
self.middleware.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous())
# After password change, user should remain logged in.
self.user.set_password('new_password')
self.user.save()
self.middleware.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertFalse(self.request.user.is_anonymous())
self.assertEqual(session_key, self.request.session.session_key)
def test_changed_password_invalidates_session_with_middleware(self):
with self.modify_settings(MIDDLEWARE_CLASSES={'append': ['django.contrib.auth.middleware.SessionAuthenticationMiddleware']}):
# After password change, user should be anonymous
self.user.set_password('new_password')
self.user.save()
self.middleware.process_request(self.request)
self.assertIsNotNone(self.request.user)
self.assertTrue(self.request.user.is_anonymous())
# session should be flushed
self.assertIsNone(self.request.session.session_key)
| bsd-3-clause |
heia-fr/sirano | sirano/plugins/actions/sip_identity.py | 1 | 3783 | # -*- coding: utf-8 -*-
#
# This file is a part of Sirano.
#
# Copyright (C) 2015 HES-SO // HEIA-FR
# Copyright (C) 2015 Loic Gremaud <loic.gremaud@grelinfo.ch>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import re
from sirano.action import Action
from sirano.exception import UnsupportedFormatException
class SIPIdentityAction(Action):
"""Action plugin for the field Identity of the SIP protocol"""
name = 'sip-identity'
re_sip_identity = re.compile(
r'^"?(?P<display>.+?)??"?\s*<?sips?:(?:(?P<user>.+)@)?(?P<host>[^;\?]+?)(?::\d{1,6})?>?\s*(?:;(?:'
r'tag|epid|expires|transport|user|\+av-dse-enh|[^;\?]*instance|[^;\?]*model|[^;\?=]*devicename|video|audio|'
r'ms-opaque|privacy|screen|reason|counter'
r')[^;\?]*|;maddr=(?P<maddr>[\d\.]+)|;[^;\?=]*="(?P<devicename>[^;\?]*)"|>)*$',
re.IGNORECASE)
"""
Regex for SIP URI;reason=unconditional;privacy=off;screen=yes
"""
def __init__(self, app):
super(SIPIdentityAction, self).__init__(app)
def anonymize(self, value):
match = self.re_sip_identity.match(value)
if match is None:
raise UnsupportedFormatException("The regular expression does not match")
display = match.group('display')
user = match.group('user')
host = match.group('host')
maddr = match.group('maddr')
devicename = match.group('devicename')
if display is not None:
value = value.replace(display, self.app.manager.data.get_replacement(display))
if devicename is not None:
value = value.replace(devicename, self.app.manager.data.get_data('name').get_replacement(devicename))
if user is not None:
value = value.replace(user, self.app.manager.data.get_replacement(user))
if host is not None:
value = value.replace(host, self.app.manager.data.get_replacement(host))
else:
UnsupportedFormatException("The SIP identity should have an host part")
if maddr is not None:
value = value.replace(maddr, self.app.manager.data.get_data('ip').get_replacement(maddr))
return value
def discover(self, value):
match = self.re_sip_identity.match(value)
if match is None:
raise UnsupportedFormatException("The regular expression does not match")
display = match.group('display')
user = match.group('user')
host = match.group('host')
maddr = match.group('maddr')
devicename = match.group('devicename')
if devicename is not None:
self.app.manager.data.get_data('name').add_value(devicename)
if display is not None:
self.app.manager.data.add_value(display)
if user is not None:
self.app.manager.data.add_value(user)
if host is not None:
self.app.manager.data.add_value(host)
else:
UnsupportedFormatException("The SIP identity should have an host part")
if maddr is not None:
self.app.manager.data.get_data('ip').add_value(maddr)
return value
| gpl-2.0 |
jos4uke/getSeqFlankBlatHit | lib/python2.7/site-packages/numpy/ma/mrecords.py | 45 | 27945 | """:mod:`numpy.ma..mrecords`
Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
where fields can be accessed as attributes.
Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
and the masking of individual fields.
:author: Pierre Gerard-Marchant
"""
from __future__ import division, absolute_import, print_function
#!!!: * We should make sure that no field is called '_mask','mask','_fieldmask',
#!!!: or whatever restricted keywords.
#!!!: An idea would be to no bother in the first place, and then rename the
#!!!: invalid fields with a trailing underscore...
#!!!: Maybe we could just overload the parser function ?
__author__ = "Pierre GF Gerard-Marchant"
import sys
import warnings
import numpy as np
import numpy.core.numerictypes as ntypes
from numpy.compat import basestring
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
)
from numpy.core.records import (
fromarrays as recfromarrays, fromrecords as recfromrecords
)
_byteorderconv = np.core.records._byteorderconv
_typestr = ntypes._typestr
import numpy.ma as ma
from numpy.ma import MAError, MaskedArray, masked, nomask, masked_array, \
getdata, getmaskarray, filled
_check_fill_value = ma.core._check_fill_value
__all__ = ['MaskedRecords', 'mrecarray',
'fromarrays', 'fromrecords', 'fromtextfile', 'addfield',
]
reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
def _getformats(data):
"Returns the formats of each array of arraylist as a comma-separated string."
if hasattr(data, 'dtype'):
return ",".join([desc[1] for desc in data.dtype.descr])
formats = ''
for obj in data:
obj = np.asarray(obj)
formats += _typestr[obj.dtype.type]
if issubclass(obj.dtype.type, ntypes.flexible):
formats += repr(obj.itemsize)
formats += ','
return formats[:-1]
def _checknames(descr, names=None):
"""Checks that the field names of the descriptor ``descr`` are not some
reserved keywords. If this is the case, a default 'f%i' is substituted.
If the argument `names` is not None, updates the field names to valid names.
"""
ndescr = len(descr)
default_names = ['f%i' % i for i in range(ndescr)]
if names is None:
new_names = default_names
else:
if isinstance(names, (tuple, list)):
new_names = names
elif isinstance(names, str):
new_names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
nnames = len(new_names)
if nnames < ndescr:
new_names += default_names[nnames:]
ndescr = []
for (n, d, t) in zip(new_names, default_names, descr.descr):
if n in reserved_fields:
if t[0] in reserved_fields:
ndescr.append((d, t[1]))
else:
ndescr.append(t)
else:
ndescr.append((n, t[1]))
return np.dtype(ndescr)
def _get_fieldmask(self):
mdescr = [(n, '|b1') for n in self.dtype.names]
fdmask = np.empty(self.shape, dtype=mdescr)
fdmask.flat = tuple([False] * len(mdescr))
return fdmask
class MaskedRecords(MaskedArray, object):
"""
*IVariables*:
_data : {recarray}
Underlying data, as a record array.
_mask : {boolean array}
Mask of the records. A record is masked when all its fields are masked.
_fieldmask : {boolean recarray}
Record array of booleans, setting the mask of each individual field of each record.
_fill_value : {record}
Filling values for each field.
"""
#............................................
def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False,
mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
copy=False,
**options):
#
self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
strides=strides, formats=formats, names=names,
titles=titles, byteorder=byteorder,
aligned=aligned,)
#
mdtype = ma.make_mask_descr(self.dtype)
if mask is nomask or not np.size(mask):
if not keep_mask:
self._mask = tuple([False] * len(mdtype))
else:
mask = np.array(mask, copy=copy)
if mask.shape != self.shape:
(nd, nm) = (self.size, mask.size)
if nm == 1:
mask = np.resize(mask, self.shape)
elif nm == nd:
mask = np.reshape(mask, self.shape)
else:
msg = "Mask and data not compatible: data size is %i, " + \
"mask size is %i."
raise MAError(msg % (nd, nm))
copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
else:
if mask.dtype == mdtype:
_mask = mask
else:
_mask = np.array([tuple([m] * len(mdtype)) for m in mask],
dtype=mdtype)
self._mask = _mask
return self
#......................................................
def __array_finalize__(self, obj):
# Make sure we have a _fieldmask by default ..
_mask = getattr(obj, '_mask', None)
if _mask is None:
objmask = getattr(obj, '_mask', nomask)
_dtype = ndarray.__getattribute__(self, 'dtype')
if objmask is nomask:
_mask = ma.make_mask_none(self.shape, dtype=_dtype)
else:
mdescr = ma.make_mask_descr(_dtype)
_mask = narray([tuple([m] * len(mdescr)) for m in objmask],
dtype=mdescr).view(recarray)
# Update some of the attributes
_dict = self.__dict__
_dict.update(_mask=_mask)
self._update_from(obj)
if _dict['_baseclass'] == ndarray:
_dict['_baseclass'] = recarray
return
def _getdata(self):
"Returns the data as a recarray."
return ndarray.view(self, recarray)
_data = property(fget=_getdata)
def _getfieldmask(self):
"Alias to mask"
return self._mask
_fieldmask = property(fget=_getfieldmask)
def __len__(self):
"Returns the length"
# We have more than one record
if self.ndim:
return len(self._data)
# We have only one record: return the nb of fields
return len(self.dtype)
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
# So far, so good...
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
if obj.dtype.fields:
raise NotImplementedError("MaskedRecords is currently limited to"\
"simple records...")
# Get some special attributes
# Reset the object's mask
hasmasked = False
_mask = _localdict.get('_mask', None)
if _mask is not None:
try:
_mask = _mask[attr]
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
obj._isfield = True
obj._mask = _mask
# Reset the field values
_fill_value = _localdict.get('_fill_value', None)
if _fill_value is not None:
try:
obj._fill_value = _fill_value[attr]
except ValueError:
obj._fill_value = None
else:
obj = obj.item()
return obj
def __setattr__(self, attr, val):
"Sets the attribute attr to the value val."
# Should we call __setmask__ first ?
if attr in ['mask', 'fieldmask']:
self.__setmask__(val)
return
# Create a shortcut (so that we don't have to call getattr all the time)
_localdict = object.__getattribute__(self, '__dict__')
# Check whether we're creating a new field
newattr = attr not in _localdict
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
except:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
if not (attr in fielddict or attr in optinfo):
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
# Get the list of names ......
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
# Check the attribute
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
# Let's try to set the field
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
#
if val is masked:
_fill_value = _localdict['_fill_value']
if _fill_value is not None:
dval = _localdict['_fill_value'][attr]
else:
dval = val
mval = True
else:
dval = filled(val)
mval = getmaskarray(val)
obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
_localdict['_mask'].__setitem__(attr, mval)
return obj
def __getitem__(self, indx):
"""Returns all the fields sharing the same fieldname base.
The fieldname base is either `_data` or `_mask`."""
_localdict = self.__dict__
_mask = ndarray.__getattribute__(self, '_mask')
_data = ndarray.view(self, _localdict['_baseclass'])
# We want a field ........
if isinstance(indx, basestring):
#!!!: Make sure _sharedmask is True to propagate back to _fieldmask
#!!!: Don't use _set_mask, there are some copies being made...
#!!!: ...that break propagation
#!!!: Don't force the mask to nomask, that wrecks easy masking
obj = _data[indx].view(MaskedArray)
obj._mask = _mask[indx]
obj._sharedmask = True
fval = _localdict['_fill_value']
if fval is not None:
obj._fill_value = fval[indx]
# Force to masked if the mask is True
if not obj.ndim and obj._mask:
return masked
return obj
# We want some elements ..
# First, the data ........
obj = np.array(_data[indx], copy=False).view(mrecarray)
obj._mask = np.array(_mask[indx], copy=False).view(recarray)
return obj
#....
def __setitem__(self, indx, value):
"Sets the given record to value."
MaskedArray.__setitem__(self, indx, value)
if isinstance(indx, basestring):
self._mask[indx] = ma.getmaskarray(value)
def __str__(self):
"Calculates the string representation."
if self.size > 1:
mstr = ["(%s)" % ",".join([str(i) for i in s])
for s in zip(*[getattr(self, f) for f in self.dtype.names])]
return "[%s]" % ", ".join(mstr)
else:
mstr = ["%s" % ",".join([str(i) for i in s])
for s in zip([getattr(self, f) for f in self.dtype.names])]
return "(%s)" % ", ".join(mstr)
#
def __repr__(self):
"Calculates the repr representation."
_names = self.dtype.names
fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
reprstr.insert(0, 'masked_records(')
reprstr.extend([fmt % (' fill_value', self.fill_value),
' )'])
return str("\n".join(reprstr))
# #......................................................
def view(self, dtype=None, type=None):
"""Returns a view of the mrecarray."""
# OK, basic copy-paste from MaskedArray.view...
if dtype is None:
if type is None:
output = ndarray.view(self)
else:
output = ndarray.view(self, type)
# Here again...
elif type is None:
try:
if issubclass(dtype, ndarray):
output = ndarray.view(self, dtype)
dtype = None
else:
output = ndarray.view(self, dtype)
# OK, there's the change
except TypeError:
dtype = np.dtype(dtype)
# we need to revert to MaskedArray, but keeping the possibility
# ...of subclasses (eg, TimeSeriesRecords), so we'll force a type
# ...set to the first parent
if dtype.fields is None:
basetype = self.__class__.__bases__[0]
output = self.__array__().view(dtype, basetype)
output._update_from(self)
else:
output = ndarray.view(self, dtype)
output._fill_value = None
else:
output = ndarray.view(self, dtype, type)
# Update the mask, just like in MaskedArray.view
if (getattr(output, '_mask', nomask) is not nomask):
mdtype = ma.make_mask_descr(output.dtype)
output._mask = self._mask.view(mdtype, ndarray)
output._mask.shape = output.shape
return output
def harden_mask(self):
"Forces the mask to hard"
self._hardmask = True
def soften_mask(self):
"Forces the mask to soft"
self._hardmask = False
def copy(self):
"""Returns a copy of the masked record."""
_localdict = self.__dict__
copied = self._data.copy().view(type(self))
copied._mask = self._mask.copy()
return copied
def tolist(self, fill_value=None):
"""Copy the data portion of the array to a hierarchical python
list and returns that list.
Data items are converted to the nearest compatible Python
type. Masked values are converted to fill_value. If
fill_value is None, the corresponding entries in the output
list will be ``None``.
"""
if fill_value is not None:
return self.filled(fill_value).tolist()
result = narray(self.filled().tolist(), dtype=object)
mask = narray(self._mask.tolist())
result[mask] = None
return result.tolist()
#--------------------------------------------
# Pickling
def __getstate__(self):
"""Return the internal state of the masked array, for pickling purposes.
"""
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
self._data.tobytes(),
self._mask.tobytes(),
self._fill_value,
)
return state
#
def __setstate__(self, state):
"""Restore the internal state of the masked array, for pickling purposes.
``state`` is typically the output of the ``__getstate__`` output, and is a
5-tuple:
- class name
- a tuple giving the shape of the data
- a typecode for the data
- a binary string for the data
- a binary string for the mask.
"""
(ver, shp, typ, isf, raw, msk, flv) = state
ndarray.__setstate__(self, (shp, typ, isf, raw))
mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
self.fill_value = flv
#
def __reduce__(self):
"""Return a 3-tuple for pickling a MaskedArray.
"""
return (_mrreconstruct,
(self.__class__, self._baseclass, (0,), 'b',),
self.__getstate__())
def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
"""Internal function that builds a new MaskedArray from the
information stored in a pickle.
"""
_data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
# _data._mask = ndarray.__new__(ndarray, baseshape, 'b1')
# return _data
_mask = ndarray.__new__(ndarray, baseshape, 'b1')
return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
mrecarray = MaskedRecords
#####---------------------------------------------------------------------------
#---- --- Constructors ---
#####---------------------------------------------------------------------------
def fromarrays(arraylist, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None,
fill_value=None):
"""Creates a mrecarray from a (flat) list of masked arrays.
Parameters
----------
arraylist : sequence
A list of (masked) arrays. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None, integer}, optional
Number of records. If None, shape is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
datalist = [getdata(x) for x in arraylist]
masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
_array = recfromarrays(datalist,
dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._mask.flat = list(zip(*masklist))
if fill_value is not None:
_array.fill_value = fill_value
return _array
#..............................................................................
def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None,
fill_value=None, mask=nomask):
"""Creates a MaskedRecords from a list of records.
Parameters
----------
reclist : sequence
A list of records. Each element of the sequence is first converted
to a masked array if needed. If a 2D array is passed as argument, it is
processed line by line
dtype : {None, dtype}, optional
Data type descriptor.
shape : {None,int}, optional
Number of records. If None, ``shape`` is defined from the shape of the
first array in the list.
formats : {None, sequence}, optional
Sequence of formats for each individual field. If None, the formats will
be autodetected by inspecting the fields and selecting the highest dtype
possible.
names : {None, sequence}, optional
Sequence of the names of each field.
fill_value : {None, sequence}, optional
Sequence of data to be used as filling values.
mask : {nomask, sequence}, optional.
External mask to apply on the data.
Notes
-----
Lists of tuples should be preferred over lists of lists for faster processing.
"""
# Grab the initial _fieldmask, if needed:
_mask = getattr(reclist, '_mask', None)
# Get the list of records.....
try:
nfields = len(reclist[0])
except TypeError:
nfields = len(reclist[0].dtype)
if isinstance(reclist, ndarray):
# Make sure we don't have some hidden mask
if isinstance(reclist, MaskedArray):
reclist = reclist.filled().view(ndarray)
# Grab the initial dtype, just in case
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
mrec.fill_value = fill_value
# Now, let's deal w/ the mask
if mask is not nomask:
mask = np.array(mask, copy=False)
maskrecordlength = len(mask.dtype)
if maskrecordlength:
mrec._mask.flat = mask
elif len(mask.shape) == 2:
mrec._mask.flat = [tuple(m) for m in mask]
else:
mrec.__setmask__(mask)
if _mask is not None:
mrec._mask[:] = _mask
return mrec
def _guessvartypes(arr):
"""Tries to guess the dtypes of the str_ ndarray `arr`, by testing element-wise
conversion. Returns a list of dtypes.
The array is first converted to ndarray. If the array is 2D, the test is performed
on the first line. An exception is raised if the file is 3D or more.
"""
vartypes = []
arr = np.asarray(arr)
if len(arr.shape) == 2 :
arr = arr[0]
elif len(arr.shape) > 2:
raise ValueError("The array should be 2D at most!")
# Start the conversion loop .......
for f in arr:
try:
int(f)
except ValueError:
try:
float(f)
except ValueError:
try:
val = complex(f)
except ValueError:
vartypes.append(arr.dtype)
else:
vartypes.append(np.dtype(complex))
else:
vartypes.append(np.dtype(float))
else:
vartypes.append(np.dtype(int))
return vartypes
def openfile(fname):
"Opens the file handle of file `fname`"
# A file handle ...................
if hasattr(fname, 'readline'):
return fname
# Try to open the file and guess its type
try:
f = open(fname)
except IOError:
raise IOError("No such file: '%s'" % fname)
if f.readline()[:2] != "\\x":
f.seek(0, 0)
return f
f.close()
raise NotImplementedError("Wow, binary file")
def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""Creates a mrecarray from data stored in the file `filename`.
Parameters
----------
filename : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
Alphanumeric character used to mark the start of a comment.
missingchar : {'', string}, optional
String indicating missing data, and used to create the masks.
varnames : {None, sequence}, optional
Sequence of the variable names. If None, a list will be created from
the first non empty line of the file.
vartypes : {None, sequence}, optional
Sequence of the variables dtypes. If None, it will be estimated from
the first non-commented line.
Ultra simple: the varnames are in the header, one line"""
# Try to open the file ......................
f = openfile(fname)
# Get the first non-empty line as the varnames
while True:
line = f.readline()
firstline = line[:line.find(commentchar)].strip()
_varnames = firstline.split(delimitor)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data ..............................
_variables = masked_array([line.strip().split(delimitor) for line in f
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
f.close()
# Try to guess the dtype ....................
if vartypes is None:
vartypes = _guessvartypes(_variables[0])
else:
vartypes = [np.dtype(v) for v in vartypes]
if len(vartypes) != nfields:
msg = "Attempting to %i dtypes for %i fields!"
msg += " Reverting to default."
warnings.warn(msg % (len(vartypes), nfields))
vartypes = _guessvartypes(_variables[0])
# Construct the descriptor ..................
mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
mfillv = [ma.default_fill_value(f) for f in vartypes]
# Get the data and the mask .................
# We just need a list of masked_arrays. It's easier to create it like that:
_mask = (_variables.T == missingchar)
_datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
return fromarrays(_datalist, dtype=mdescr)
#....................................................................
def addfield(mrecord, newfield, newfieldname=None):
"""Adds a new field to the masked record array, using `newfield` as data
and `newfieldname` as name. If `newfieldname` is None, the new field name is
set to 'fi', where `i` is the number of existing fields.
"""
_data = mrecord._data
_mask = mrecord._mask
if newfieldname is None or newfieldname in reserved_fields:
newfieldname = 'f%i' % len(_data.dtype)
newfield = ma.array(newfield)
# Get the new data ............
# Create a new empty recarray
newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
newdata = recarray(_data.shape, newdtype)
# Add the exisintg field
[newdata.setfield(_data.getfield(*f), *f)
for f in _data.dtype.fields.values()]
# Add the new field
newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
newdata = newdata.view(MaskedRecords)
# Get the new mask .............
# Create a new empty recarray
newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
newmask = recarray(_data.shape, newmdtype)
# Add the old masks
[newmask.setfield(_mask.getfield(*f), *f)
for f in _mask.dtype.fields.values()]
# Add the mask of the new field
newmask.setfield(getmaskarray(newfield),
*newmask.dtype.fields[newfieldname])
newdata._mask = newmask
return newdata
| gpl-2.0 |
SymbiFlow/pycapnp | test/test_capability_context.py | 2 | 8444 | import os
import pytest
import capnp
this_dir = os.path.dirname(__file__)
# flake8: noqa: E501
@pytest.fixture
def capability():
capnp.cleanup_global_schema_parser()
return capnp.load(os.path.join(this_dir, 'test_capability.capnp'))
class Server:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
extra = 0
if context.params.j:
extra = 1
context.results.x = str(context.params.i * 5 + extra + self.val)
def buz_context(self, context):
context.results.x = context.params.i.host + '_test'
class PipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
return context.params.inCap.foo(i=context.params.n).then(_then)
def test_client_context(capability):
client = capability.TestInterface._new_client(Server())
req = client._request('foo')
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
req = client.foo_request()
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
with pytest.raises(AttributeError):
client.foo2_request()
req = client.foo_request()
with pytest.raises(Exception):
req.i = 'foo'
req = client.foo_request()
with pytest.raises(AttributeError):
req.baz = 1
def test_simple_client_context(capability):
client = capability.TestInterface._new_client(Server())
remote = client._send('foo', i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(5, True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.buz(capability.TestSturdyRefHostId.new_message(host='localhost'))
response = remote.wait()
assert response.x == 'localhost_test'
with pytest.raises(Exception):
remote = client.foo(5, 10)
with pytest.raises(Exception):
remote = client.foo(5, True, 100)
with pytest.raises(Exception):
remote = client.foo(i='foo')
with pytest.raises(AttributeError):
remote = client.foo2(i=5)
with pytest.raises(Exception):
remote = client.foo(baz=5)
@pytest.mark.xfail
def test_pipeline_context(capability):
'''
E capnp.lib.capnp.KjException: capnp/lib/capnp.pyx:61: failed: <class 'Failed'>:Fixture "capability" called directly. Fixtures are not meant to be called directly,
E but are created automatically when test functions request them as parameters.
E See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and
E https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.
E stack: 7f87c1ac6e40 7f87c17c3250 7f87c17be260 7f87c17c49f0 7f87c17c0f50 7f87c17c5540 7f87c17d7bf0 7f87c1acb768 7f87c1aaf185 7f87c1aaf2dc 7f87c1a6da1d 7f87c3895459 7f87c3895713 7f87c38c72eb 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c38fdb77 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c3901409 7f87c38b6632 7f87c38c71cf 7f87c3901409
'''
client = capability.TestPipeline._new_client(PipelineServer())
foo_client = capability.TestInterface._new_client(Server())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
response = pipelinePromise.wait()
assert response.x == '150'
response = remote.wait()
assert response.s == '26_foo'
class BadServer:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
context.results.x = str(context.params.i * 5 + self.val)
context.results.x2 = 5 # raises exception
def test_exception_client_context(capability):
client = capability.TestInterface._new_client(BadServer())
remote = client._send('foo', i=5)
with pytest.raises(capnp.KjException):
remote.wait()
class BadPipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
def _error(error):
raise Exception('test was a success')
return context.params.inCap.foo(i=context.params.n).then(_then, _error)
def test_exception_chain_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
try:
remote.wait()
except Exception as e:
assert 'test was a success' in str(e)
def test_pipeline_exception_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
with pytest.raises(Exception):
pipelinePromise.wait()
with pytest.raises(Exception):
remote.wait()
def test_casting_context(capability):
client = capability.TestExtends._new_client(Server())
client2 = client.upcast(capability.TestInterface)
_ = client2.cast_as(capability.TestInterface)
with pytest.raises(Exception):
client.upcast(capability.TestPipeline)
class TailCallOrder:
def __init__(self):
self.count = -1
def getCallSequence_context(self, context):
self.count += 1
context.results.n = self.count
class TailCaller:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
tail = context.params.callee.foo_request(i=context.params.i, t='from TailCaller')
return context.tail_call(tail)
class TailCallee:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
results = context.results
results.i = context.params.i
results.t = context.params.t
results.c = capability().TestCallOrder._new_server(TailCallOrder())
@pytest.mark.xfail
def test_tail_call(capability):
'''
E capnp.lib.capnp.KjException: capnp/lib/capnp.pyx:75: failed: <class 'Failed'>:Fixture "capability" called directly. Fixtures are not meant to be called directly,
E but are created automatically when test functions request them as parameters.
E See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and
E https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.
E stack: 7f87c17c5540 7f87c17c51b0 7f87c17c5540 7f87c17d7bf0 7f87c1acb768 7f87c1aaf185 7f87c1aaf2dc 7f87c1a6da1d 7f87c3895459 7f87c3895713 7f87c38c72eb 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c38fdb77 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c3901409 7f87c38b6632 7f87c38c71cf 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c388ace7
'''
callee_server = TailCallee()
caller_server = TailCaller()
callee = capability.TestTailCallee._new_client(callee_server)
caller = capability.TestTailCaller._new_client(caller_server)
promise = caller.foo(i=456, callee=callee)
dependent_call1 = promise.c.getCallSequence()
response = promise.wait()
assert response.i == 456
assert response.i == 456
dependent_call2 = response.c.getCallSequence()
dependent_call3 = response.c.getCallSequence()
result = dependent_call1.wait()
assert result.n == 0
result = dependent_call2.wait()
assert result.n == 1
result = dependent_call3.wait()
assert result.n == 2
assert callee_server.count == 1
assert caller_server.count == 1
| bsd-2-clause |
aksaxena80/test | tensorflow/python/summary/event_accumulator.py | 5 | 15424 | """Takes a generator of values, and accumulates them for a frontend."""
import collections
import threading
from tensorflow.python.platform import gfile
from tensorflow.python.platform import logging
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import event_file_loader
from tensorflow.python.summary.impl import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent',
['wall_time', 'step', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue',
['min', 'max', 'num', 'sum', 'sum_squares',
'bucket_limit', 'bucket'])
ImageEvent = namedtuple('ImageEvent',
['wall_time', 'step', 'encoded_image_string',
'width', 'height'])
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
SCALARS = 'scalars'
GRAPH = 'graph'
## normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
SCALARS: 10000,
HISTOGRAMS: 1,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
SCALARS: 0,
HISTOGRAMS: 0,
}
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file."""
return 'tfevents' in path
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
Before usage, the `EventAccumulator` must be activated via `Reload()` or
`AutoUpdate(interval)`.
If activated via `Reload()`, it loads synchronously, so calls to `Values` or
`Tags` will block until all outstanding events are processed. Afterwards,
`Reload()` may be called again to load any new data.
If activated via `AutoUpdate(interval)`, it loads asynchronously, so calls to
`Values` or `Tags` will immediately return a valid subset of the outstanding
event data. It reloads new data every `interval` seconds.
Histograms and images are very large, so storing all of them is not
recommended.
@@Reload
@@AutoUpdate
@@Tags
@@Scalars
@@Graph
@@Histograms
@@CompressedHistograms
@@Images
"""
def __init__(self, path, size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
self._graph = None
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS])
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._is_autoupdating = False
self._activated = False
self._compression_bps = compression_bps
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Calling `Reload` activates the `EventAccumulator`.
Returns:
The `EventAccumulator`.
"""
self._activated = True
with self._generator_mutex:
for event in self._generator.Load():
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run.'
'Overwritting the graph with the newest event'))
self._graph = event.graph_def
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('simple_value'):
self._ProcessScalar(value.tag, event.wall_time, event.step,
value.simple_value)
elif value.HasField('histo'):
self._ProcessHistogram(value.tag, event.wall_time, event.step,
value.histo)
self._ProcessCompressedHistogram(value.tag, event.wall_time,
event.step, value.histo)
elif value.HasField('image'):
self._ProcessImage(value.tag, event.wall_time, event.step,
value.image)
return self
def AutoUpdate(self, interval=60):
"""Asynchronously load all events, and periodically reload.
Calling this function is not thread safe.
Calling this function activates the `EventAccumulator`.
Args:
interval: how many seconds after each successful reload to load new events
(default 60)
Returns:
The `EventAccumulator`.
"""
if self._is_autoupdating:
return
self._is_autoupdating = True
self._activated = True
def Update():
self.Reload()
logging.info('EventAccumulator update triggered')
t = threading.Timer(interval, Update)
t.daemon = True
t.start()
# Asynchronously start the update process, so that the accumulator can
# immediately serve data, even if there is a very large event file to parse
t = threading.Timer(0, Update)
t.daemon = True
t.start()
return self
def Tags(self):
"""Return all tags found in the value stream.
Raises:
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
self._VerifyActivated()
return {IMAGES: self._images.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
GRAPH: self._graph is not None}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ScalarEvent`s.
"""
self._VerifyActivated()
return self._scalars.Items(tag)
def Graph(self):
"""Return the graph definition, if there is one.
Raises:
ValueError: If there is no graph for this run.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
The `graph_def` proto.
"""
self._VerifyActivated()
if self._graph is None:
raise ValueError('There is no graph in this EventAccumulator')
return self._graph
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `HistogramEvent`s.
"""
self._VerifyActivated()
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `CompressedHistogramEvent`s.
"""
self._VerifyActivated()
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ImageEvent`s.
"""
self._VerifyActivated()
return self._images.Items(tag)
def _VerifyActivated(self):
if not self._activated:
raise RuntimeError('Accumulator must be activated before it may be used.')
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding it to accumulated state."""
histogram_value = HistogramValue(
min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
# convert from proto repeated to list
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket),
)
histogram_event = HistogramEvent(
wall_time=wall_time,
step=step,
histogram_value=histogram_value,
)
self._histograms.AddItem(tag, histogram_event)
def _Remap(self, x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0)/(x1 - x0)
def _Percentile(self, compression_bps, bucket_limit, cumsum_weights,
histo_min, histo_max, histo_num):
"""Linearly interpolates a histogram weight for a particular basis point.
Uses clamping methods on `histo_min` and `histo_max` to produce tight
linear estimates of the histogram weight at a particular basis point.
Args:
compression_bps: The desired basis point at which to estimate the weight
bucket_limit: An array of the RHS histogram bucket limits
cumsum_weights: A cumulative sum of the fraction of weights in each
histogram bucket, represented in basis points.
histo_min: The minimum weight observed in the weight histogram
histo_max: The maximum weight observed in the weight histogram
histo_num: The number of items in the weight histogram
Returns:
A linearly interpolated value of the histogram weight estimate.
"""
if histo_num == 0: return 0
for i, cumsum in enumerate(cumsum_weights):
if cumsum >= compression_bps:
cumsum_prev = cumsum_weights[i-1] if i > 0 else 0
# Prevent cumsum = 0, cumsum_prev = 0, lerp divide by zero.
if cumsum == cumsum_prev: continue
# Calculate the lower bound of interpolation
lhs = bucket_limit[i-1] if (i > 0 and cumsum_prev > 0) else histo_min
lhs = max(lhs, histo_min)
# Calculate the upper bound of interpolation
rhs = bucket_limit[i]
rhs = min(rhs, histo_max)
weight = self._Remap(compression_bps, cumsum_prev, cumsum, lhs, rhs)
return weight
## We have not exceeded cumsum, so return the max observed.
return histo_max
def _ProcessCompressedHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding a compression to accumulated state.
Adds a compressed histogram by linearly interpolating histogram buckets to
represent the histogram weight at multiple compression points. Uses
self._compression_bps (passed to EventAccumulator constructor) as the
compression points (represented in basis points, 1/100ths of a precent).
Args:
tag: A string name of the tag for which histograms are retrieved.
wall_time: Time in seconds since epoch
step: Number of steps that have passed
histo: proto2 histogram Object
"""
def _CumulativeSum(arr):
return [sum(arr[:i+1]) for i in range(len(arr))]
# Convert from proto repeated field into a Python list.
bucket = list(histo.bucket)
bucket_limit = list(histo.bucket_limit)
bucket_total = sum(bucket)
fraction_weights = [float(10000*x)/bucket_total for x in bucket]
cumsum_weights = _CumulativeSum(fraction_weights)
percentiles = [
self._Percentile(bps, bucket_limit, cumsum_weights, histo.min,
histo.max, histo.num) for bps in self._compression_bps
]
compressed_histogram_values = [CompressedHistogramValue(
basis_point=bps,
value=value) for bps, value in zip(self._compression_bps, percentiles)]
histogram_event = CompressedHistogramEvent(
wall_time=wall_time,
step=step,
compressed_histogram_values=compressed_histogram_values)
self._compressed_histograms.AddItem(tag, histogram_event)
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(
wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height
)
self._images.AddItem(tag, event)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
loader_factory = event_file_loader.EventFileLoader
if gfile.IsDirectory(path):
return directory_watcher.DirectoryWatcher(path, loader_factory,
IsTensorFlowEventsFile)
else:
return loader_factory(path)
| apache-2.0 |
Jenselme/servo | tests/wpt/css-tests/tools/py/py/_code/_assertionnew.py | 217 | 12384 | """
Find intermediate evalutation results in assert statements through builtin AST.
This should replace _assertionold.py eventually.
"""
import sys
import ast
import py
from py._code.assertion import _format_explanation, BuiltinAssertionError
if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --no-assert)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(failure):
explanation = _format_explanation(failure.explanation)
value = failure.cause[1]
if str(value):
lines = explanation.splitlines()
if not lines:
lines.append("")
lines[0] += " << %s" % (value,)
explanation = "\n".join(lines)
text = "%s: %s" % (failure.cause[0].__name__, explanation)
if text.startswith("AssertionError: assert "):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = False
if not local:
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not result:
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
rcomp = py.code._reprcompare
if rcomp:
res = rcomp(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = True
if from_instance:
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
if test_explanation.startswith("False\n{False =") and \
test_explanation.endswith("\n"):
test_explanation = test_explanation[15:-2]
explanation = "assert %s" % (test_explanation,)
if not test_result:
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result
| mpl-2.0 |
tturowski/gwide | gwide/gwidePlot.py | 1 | 8934 | #!/usr/bin/env python
import argparse
from argparse import RawTextHelpFormatter
import os
import gwide.methods as gtm
from gwide.Classes.gwidePlot_class import GenomeWidePlot
def plot():
""" Script working with concat file generated by pileupsToConcat.py script. Read concat file and according to options.
Can plot intron, and peaks found by pypeaks script."""
#setup option parser
usage = "Usage: gwide function -i input -o output [options]"
parser = argparse.ArgumentParser(usage=usage)
files = parser.add_argument_group('Options for input files')
files.add_argument("-g", "--gtf_file", dest="gtf_file", help="Provide the path to your gtf file.",
type=str, default=None)
files.add_argument("-i", "--input_file", dest="input_file", help="Provide the path to your input file. Required.",
metavar="FILE", default=None, required=True)
# files.add_argument("--input_type", dest="input_type", choices=['concat'], help="Type of input file. Default: concat",
# type=str, default='concat')
files.add_argument("--5flank", dest="five_prime_flank", type=int, help="Set up 5 prime flank. Default = 250", default=250)
files.add_argument("--3flank", dest="three_prime_flank", type=int, help="Set up 3 prime flank. Default = 250", default=250)
files.add_argument("-l", "--list_file", dest="list_file", help="Provide the path to your (tab) file genes.list. Only listed genes will be plotted. Can be aligned as second column",
type=str)
peaks = parser.add_argument_group('Option for peaks finder (pypeaks')
peaks.add_argument("--lookahead", dest="lookahead", type=int, help="Set up lookahead parameter for pypeaks function. Default = 20", default=20)
universal = parser.add_argument_group('Universal options')
universal.add_argument("-t", "--hits_threshold", dest="hits_threshold", type=int, help="Set up threshold for pileup. Default 100 reads. Genes with highest peak below are not included",
default=0)
universal.add_argument("-r", "--readthrough", dest="readthrough", type=int, help="Set up nt when readthrough should start countin. Default: 15",
default=15)
universal.add_argument("-n", "--normalized", dest="normalized", action="store_true", help="to work on data normalized 'reads per Milion'. Default: False", default=False)
output = parser.add_argument_group('Options for output files')
output.add_argument("-p", "--prefix", dest="out_prefix", type=str, help="Prefix for output files. Default to standard output. Not supported for -o ratio.", default=None)
output.add_argument("-o", "--output", dest="output", choices=["std", "ratio", "aligner", "RTendalign", "table", "Tdensity", "makeGTF", "transcript_length", "makeRTGTF"], help="Select from following options:"+'\n'
"(1) std - 5` and 3` end aligned only; (2) ratio - plot gwide ratio a exp / b exp"+'\n'
"(3) aligner - std plus chosen aligner from file (-l option)"+'\n'
"(4) RTendalign - std and aligned to 3` end of read-through (-l option). -e works to choose experiment to align and filter"+'\n'
"(5) table - make *.csv file to plot heatmaps; (6) Tdensity - calculate p-value for non-canonical termination"+'\n'
"(7) makeGTF - make GTF file with transcripts length ; (8) transcript_length - save *.txt file with trancripts length for all experiment; "
"(9) makeRTGTF - make GTF with tRNA extensions only", default="std")
special = parser.add_argument_group('Special options for some -o choices')
special.add_argument("--ntotal", dest="ntotal", action="store_true", help="Normalize to sum of all reads (sum = 1). Default: False", default=False)
special.add_argument("--nmax", dest="nmax", action="store_true", help="Normalize to maximal value (max = 1). Default: False", default=False)
special.add_argument("--publish", dest="publish", action="store_true", help="Print plots as separate figures in publication quality. Works with -o ratio and std", default=False)
special.add_argument("--LRR", dest="left_right_ratio", action="store_true", help="Print ratio between left and right part of the metaprofiles (before and after aligning line)", default=False)
special.add_argument("-f", dest="filter", type=str, help="Filter in results factor_above_value; type i.e. RT_above_0.25 or a_below_1.5. To chose: RT, a, b, i, e, f, intron", default=None)
special.add_argument("-e", dest="experiment", type=str, help="Filter according to values from one experiment only", default=None)
special.add_argument("-a", dest="to_divide", type=str, help="experiment to divide by -b (-o ratio)",
default=None)
special.add_argument("-b", dest="divisor", type=str, help="experiment being divisor for -a (-o ratio)", default=None)
special.add_argument("--select", dest="select", type=str, help="To print additional plot with selecter area and no titles keep form 200_300 (range from 200 to 300)", default=None)
special.add_argument("--peak_min", dest="peak_min", type=int, help="minimum of peak average for -o Tdensity. Default = 300", default=300)
special.add_argument("--peak_size", dest="peak_size", type=int, help="peak size for -o Tdensity. Default = 20", default=20)
options = parser.parse_args()
gtf_file = gtm.getGTF(options.gtf_file)
list_file = options.list_file
#preparing naming of output files
if options.out_prefix:
prefix = options.out_prefix+'_'
filename = options.out_prefix+'_rt'+str(options.readthrough)+'_l'+str(options.lookahead)+'_t'+str(options.hits_threshold)+'.list'
else:
prefix = str()
filename = 'rt'+str(options.readthrough)+'_l'+str(options.lookahead)+'_t'+str(options.hits_threshold)+'.list'
if options.normalized == True:
prefix = 'nRpM_'+prefix
data = GenomeWidePlot(gtf_file=gtf_file, five_prime_flank=options.five_prime_flank, readthrough_start=options.readthrough,
three_prime_flank=options.three_prime_flank, hits_threshold=options.hits_threshold, lookahead=options.lookahead,
prefix=prefix, normalized=options.normalized, publish=options.publish, left_right_ratio=options.left_right_ratio)
#setting up dependencies
if options.output == "ratio":
options.normalized = True
#reading csv file
data.read_csv(options.input_file, skip_nucleotide=True)
#plotting
if options.output == 'std':
data.calculate(details=False, ntotal=True, nmax=True)
data.std(filter=options.filter, experiment_to_filter=options.experiment)
if options.ntotal == True:
data.std(filter=options.filter, experiment_to_filter=options.experiment, exp_to_use='_ntotal')
if options.nmax == True:
data.std(filter=options.filter, experiment_to_filter=options.experiment, exp_to_use='_nmax')
if options.output == 'aligner':
if not list_file:
print "Please provide path how to align files using -l file.list"
else:
data.calculate(details=True, ntotal=False, nmax=False)
data.read_list(list_file)
data.aligner(file=os.path.basename(list_file), filter=options.filter, experiment_to_filter=options.experiment)
if options.output == 'RTendalign':
data.calculate(details=True, ntotal=False, nmax=False)
data.RT_aligner(filter=options.filter, experiment_to_align=options.experiment)
if options.output == "ratio":
data.calculate(details=False, ntotal=True, nmax=True, pscounts=True)
if options.ntotal == True:
data.ratio(to_divide=options.to_divide, divisor=options.divisor, exp_to_use='_ntotal', filter=options.filter)
if options.select:
data.ratio(to_divide=options.to_divide, divisor=options.divisor, exp_to_use='_ntotal', select=options.select, filter=options.filter)
if options.nmax == True:
data.ratio(to_divide=options.to_divide, divisor=options.divisor, exp_to_use='_nmax', filter=options.filter)
data.ratio(to_divide=options.to_divide, divisor=options.divisor, filter=options.filter)
if options.output == "makeRTGTF":
data.find_peaks()
data.makeRTGTF()
if options.output == "table":
data.table(filter=options.filter, experiment_to_filter=options.experiment)
if options.output == "Tdensity":
data.find_peaks()
# data.calculate()
data.Tdensity(peak_min=options.peak_min, size=options.peak_size)
if options.output == "makeGTF":
data.find_peaks()
data.maketranscriptGTF()
if options.output == "transcript_length":
data.find_peaks()
data.printTrancriptLength()
print '# Done.'
| apache-2.0 |
gauribhoite/personfinder | env/google_appengine/lib/django-1.5/django/contrib/admin/templatetags/admin_modify.py | 101 | 2428 | from django import template
register = template.Library()
@register.inclusion_tag('admin/prepopulated_fields_js.html', takes_context=True)
def prepopulated_fields_js(context):
"""
Creates a list of prepopulated_fields that should render Javascript for
the prepopulated fields for both the admin form and inlines.
"""
prepopulated_fields = []
if context['add'] and 'adminform' in context:
prepopulated_fields.extend(context['adminform'].prepopulated_fields)
if 'inline_admin_formsets' in context:
for inline_admin_formset in context['inline_admin_formsets']:
for inline_admin_form in inline_admin_formset:
if inline_admin_form.original is None:
prepopulated_fields.extend(inline_admin_form.prepopulated_fields)
context.update({'prepopulated_fields': prepopulated_fields})
return context
@register.inclusion_tag('admin/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
if context.get('original') is not None:
ctx['original'] = context['original']
return ctx
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
| apache-2.0 |
smartdata-x/robots | pylib/Twisted/twisted/cred/portal.py | 60 | 5339 | # -*- test-case-name: twisted.test.test_newcred -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
The point of integration of application and authentication.
"""
from twisted.internet import defer
from twisted.internet.defer import maybeDeferred
from twisted.python import failure, reflect
from twisted.cred import error
from zope.interface import providedBy, Interface
class IRealm(Interface):
"""
The realm connects application-specific objects to the
authentication system.
"""
def requestAvatar(avatarId, mind, *interfaces):
"""
Return avatar which provides one of the given interfaces.
@param avatarId: a string that identifies an avatar, as returned by
L{ICredentialsChecker.requestAvatarId<twisted.cred.checkers.ICredentialsChecker.requestAvatarId>}
(via a Deferred). Alternatively, it may be
C{twisted.cred.checkers.ANONYMOUS}.
@param mind: usually None. See the description of mind in
L{Portal.login}.
@param interfaces: the interface(s) the returned avatar should
implement, e.g. C{IMailAccount}. See the description of
L{Portal.login}.
@returns: a deferred which will fire a tuple of (interface,
avatarAspect, logout), or the tuple itself. The interface will be
one of the interfaces passed in the 'interfaces' argument. The
'avatarAspect' will implement that interface. The 'logout' object
is a callable which will detach the mind from the avatar.
"""
class Portal:
"""
A mediator between clients and a realm.
A portal is associated with one Realm and zero or more credentials checkers.
When a login is attempted, the portal finds the appropriate credentials
checker for the credentials given, invokes it, and if the credentials are
valid, retrieves the appropriate avatar from the Realm.
This class is not intended to be subclassed. Customization should be done
in the realm object and in the credentials checker objects.
"""
def __init__(self, realm, checkers=()):
"""
Create a Portal to a L{IRealm}.
"""
self.realm = realm
self.checkers = {}
for checker in checkers:
self.registerChecker(checker)
def listCredentialsInterfaces(self):
"""
Return list of credentials interfaces that can be used to login.
"""
return self.checkers.keys()
def registerChecker(self, checker, *credentialInterfaces):
if not credentialInterfaces:
credentialInterfaces = checker.credentialInterfaces
for credentialInterface in credentialInterfaces:
self.checkers[credentialInterface] = checker
def login(self, credentials, mind, *interfaces):
"""
@param credentials: an implementor of
L{twisted.cred.credentials.ICredentials}
@param mind: an object which implements a client-side interface for
your particular realm. In many cases, this may be None, so if the
word 'mind' confuses you, just ignore it.
@param interfaces: list of interfaces for the perspective that the mind
wishes to attach to. Usually, this will be only one interface, for
example IMailAccount. For highly dynamic protocols, however, this
may be a list like (IMailAccount, IUserChooser, IServiceInfo). To
expand: if we are speaking to the system over IMAP, any information
that will be relayed to the user MUST be returned as an
IMailAccount implementor; IMAP clients would not be able to
understand anything else. Any information about unusual status
would have to be relayed as a single mail message in an
otherwise-empty mailbox. However, in a web-based mail system, or a
PB-based client, the ``mind'' object inside the web server
(implemented with a dynamic page-viewing mechanism such as a
Twisted Web Resource) or on the user's client program may be
intelligent enough to respond to several ``server''-side
interfaces.
@return: A deferred which will fire a tuple of (interface,
avatarAspect, logout). The interface will be one of the interfaces
passed in the 'interfaces' argument. The 'avatarAspect' will
implement that interface. The 'logout' object is a callable which
will detach the mind from the avatar. It must be called when the
user has conceptually disconnected from the service. Although in
some cases this will not be in connectionLost (such as in a
web-based session), it will always be at the end of a user's
interactive session.
"""
for i in self.checkers:
if i.providedBy(credentials):
return maybeDeferred(self.checkers[i].requestAvatarId, credentials
).addCallback(self.realm.requestAvatar, mind, *interfaces
)
ifac = providedBy(credentials)
return defer.fail(failure.Failure(error.UnhandledCredentials(
"No checker for %s" % ', '.join(map(reflect.qual, ifac)))))
| apache-2.0 |
semanticize/semanticizer | semanticizer/wpm/namespace.py | 3 | 5589 | # Copyright 2012-2013, University of Amsterdam. This program is free software:
# you can redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class WpmNS:
def __init__(self, db, langcode, version=None):
self.sep = ':'
self.lc = langcode
self.db = db
self.manual_version = version
def version (self):
if self.manual_version:
return self.manual_version
version = self.db.get(self.db_version())
if not version:
raise Exception("No database version")
return version
def db_version(self):
"""
key
<langcode>:db:version
value
string(cache version)
"""
return self.sep.join( (self.lc, "db", "version") )
def wiki_language_name(self):
"""
key
<langcode>:<version>:wiki:lname
value
string(wiki name)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "lname") )
def wiki_path(self):
"""
key
<langcode>:<version>:wiki:path
value
string(wiki path)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "path") )
def wiki_stats(self, statName):
"""
key
<langcode>:<version>:wiki:stats:<statName>
value
string(stats)
"""
return self.sep.join( (self.lc, self.version(), "wiki", "stats", statName) )
def label(self, name):
"""
key
<langcode>:<version>:label:<name>
value
list( LinkOccCount, LinkDocCount, TextOccCount, TextDocCount, SenseId, SenseId, ..)
"""
return self.sep.join( (self.lc, self.version(), "label", name) )
def label_sense(self, name, senseid):
"""
key
<langcode>:<version>:label:<name>:<senseid>
value
list( sLinkDocCount, sLinkOccCount, FromTitle, FromRedirect)
"""
return self.sep.join( (self.lc, self.version(), "label", name, senseid) )
def normalized(self, name):
"""
key
<langcode>:<version>:norm:<name>
value
set( name, name, ... )
"""
return self.sep.join( (self.lc, self.version(), "norm", name) )
def translation_sense(self, senseid):
"""
key
<langcode>:<version>:trnsl:<senseid>
value
list( langcode, langcode, ... )
"""
return self.sep.join( (self.lc, self.version(), "trnsl", senseid) )
def translation_sense_language(self, senseid, langcode):
"""
key
<langcode>:<version>:trnsl:<senseid>:<langcode>
value
string(name)
"""
return self.sep.join( (self.lc, self.version(), "trnsl", senseid, langcode) )
def page_id(self, name):
"""
key
<langcode>:<version>:page:id<name>
value
string(id)
"""
return self.sep.join( (self.lc, self.version(), "page", "id", name) )
def page_title(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:name
value
string(name)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "name") )
def page_labels(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:labels
value
list( json([title, occurances, fromRedirect, fromTitle isPrimary, proportion]), ...)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "labels") )
def page_definition(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:definition
value
string(synopsis)
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "definition") )
def page_inlinks(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:inlinks
value
list( pageid, pageid, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "inlinks") )
def page_outlinks(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:outlinks
value
list( pageid, pageid, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "outlinks") )
def page_categories(self, pageid):
"""
key
<langcode>:<version>:page:<pageid>:categories
value
list( category, category, ... )
"""
return self.sep.join( (self.lc, self.version(), "page", pageid, "categories") )
def ngramscore(self, n):
"""
key
<langcode>:<version>:<n>grms
value
zset([words{score}, [...]])translation_sense
"""
return self.sep.join( (self.lc, self.version(), "%sgrms" % n) )
| gpl-3.0 |
szeged/servo | tests/wpt/web-platform-tests/tools/third_party/h2/examples/twisted/twisted-server.py | 25 | 5431 | # -*- coding: utf-8 -*-
"""
twisted-server.py
~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server written for Twisted.
"""
import functools
import mimetypes
import os
import os.path
import sys
from OpenSSL import crypto
from twisted.internet.defer import Deferred, inlineCallbacks
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import endpoints, reactor, ssl
from h2.config import H2Configuration
from h2.connection import H2Connection
from h2.events import (
RequestReceived, DataReceived, WindowUpdated
)
def close_file(file, d):
file.close()
READ_CHUNK_SIZE = 8192
class H2Protocol(Protocol):
def __init__(self, root):
config = H2Configuration(client_side=False)
self.conn = H2Connection(config=config)
self.known_proto = None
self.root = root
self._flow_control_deferreds = {}
def connectionMade(self):
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def dataReceived(self, data):
if not self.known_proto:
self.known_proto = True
events = self.conn.receive_data(data)
if self.conn.data_to_send:
self.transport.write(self.conn.data_to_send())
for event in events:
if isinstance(event, RequestReceived):
self.requestReceived(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.dataFrameReceived(event.stream_id)
elif isinstance(event, WindowUpdated):
self.windowUpdated(event)
def requestReceived(self, headers, stream_id):
headers = dict(headers) # Invalid conversion, fix later.
assert headers[b':method'] == b'GET'
path = headers[b':path'].lstrip(b'/')
full_path = os.path.join(self.root, path)
if not os.path.exists(full_path):
response_headers = (
(':status', '404'),
('content-length', '0'),
('server', 'twisted-h2'),
)
self.conn.send_headers(
stream_id, response_headers, end_stream=True
)
self.transport.write(self.conn.data_to_send())
else:
self.sendFile(full_path, stream_id)
return
def dataFrameReceived(self, stream_id):
self.conn.reset_stream(stream_id)
self.transport.write(self.conn.data_to_send())
def sendFile(self, file_path, stream_id):
filesize = os.stat(file_path).st_size
content_type, content_encoding = mimetypes.guess_type(file_path)
response_headers = [
(':status', '200'),
('content-length', str(filesize)),
('server', 'twisted-h2'),
]
if content_type:
response_headers.append(('content-type', content_type))
if content_encoding:
response_headers.append(('content-encoding', content_encoding))
self.conn.send_headers(stream_id, response_headers)
self.transport.write(self.conn.data_to_send())
f = open(file_path, 'rb')
d = self._send_file(f, stream_id)
d.addErrback(functools.partial(close_file, f))
def windowUpdated(self, event):
"""
Handle a WindowUpdated event by firing any waiting data sending
callbacks.
"""
stream_id = event.stream_id
if stream_id and stream_id in self._flow_control_deferreds:
d = self._flow_control_deferreds.pop(stream_id)
d.callback(event.delta)
elif not stream_id:
for d in self._flow_control_deferreds.values():
d.callback(event.delta)
self._flow_control_deferreds = {}
return
@inlineCallbacks
def _send_file(self, file, stream_id):
"""
This callback sends more data for a given file on the stream.
"""
keep_reading = True
while keep_reading:
while not self.conn.remote_flow_control_window(stream_id):
yield self.wait_for_flow_control(stream_id)
chunk_size = min(
self.conn.remote_flow_control_window(stream_id), READ_CHUNK_SIZE
)
data = file.read(chunk_size)
keep_reading = len(data) == chunk_size
self.conn.send_data(stream_id, data, not keep_reading)
self.transport.write(self.conn.data_to_send())
if not keep_reading:
break
file.close()
def wait_for_flow_control(self, stream_id):
"""
Returns a Deferred that fires when the flow control window is opened.
"""
d = Deferred()
self._flow_control_deferreds[stream_id] = d
return d
class H2Factory(Factory):
def __init__(self, root):
self.root = root
def buildProtocol(self, addr):
return H2Protocol(self.root)
root = sys.argv[1]
with open('server.crt', 'r') as f:
cert_data = f.read()
with open('server.key', 'r') as f:
key_data = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_data)
key = crypto.load_privatekey(crypto.FILETYPE_PEM, key_data)
options = ssl.CertificateOptions(
privateKey=key,
certificate=cert,
acceptableProtocols=[b'h2'],
)
endpoint = endpoints.SSL4ServerEndpoint(reactor, 8080, options, backlog=128)
endpoint.listen(H2Factory(root))
reactor.run()
| mpl-2.0 |
bsmrstu-warriors/Moytri--The-Drone-Aider | Lib/subprocess.py | 41 | 54544 | # subprocess - Subprocesses with accessible I/O streams
#
# For more information about this module, see PEP 324.
#
# This module should remain compatible with Python 2.2, see PEP 291.
#
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
#
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/2.4/license for licensing details.
r"""subprocess - Subprocesses with accessible I/O streams
This module allows you to spawn processes, connect to their
input/output/error pipes, and obtain their return codes. This module
intends to replace several other, older modules and functions, like:
os.system
os.spawn*
os.popen*
popen2.*
commands.*
Information about how the subprocess module can be used to replace these
modules and functions can be found below.
Using the subprocess module
===========================
This module defines one class called Popen:
class Popen(args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
Arguments are:
args should be a string, or a sequence of program arguments. The
program to execute is normally the first item in the args sequence or
string, but can be explicitly set by using the executable argument.
On UNIX, with shell=False (default): In this case, the Popen class
uses os.execvp() to execute the child program. args should normally
be a sequence. A string will be treated as a sequence with the string
as the only item (the program to execute).
On UNIX, with shell=True: If args is a string, it specifies the
command string to execute through the shell. If args is a sequence,
the first item specifies the command string, and any additional items
will be treated as additional shell arguments.
On Windows: the Popen class uses CreateProcess() to execute the child
program, which operates on strings. If args is a sequence, it will be
converted to a string using the list2cmdline method. Please note that
not all MS Windows applications interpret the command line the same
way: The list2cmdline is designed for applications using the same
rules as the MS C runtime.
bufsize, if given, has the same meaning as the corresponding argument
to the built-in open() function: 0 means unbuffered, 1 means line
buffered, any other positive value means use a buffer of
(approximately) that size. A negative bufsize means to use the system
default, which usually means fully buffered. The default value for
bufsize is 0 (unbuffered).
stdin, stdout and stderr specify the executed programs' standard
input, standard output and standard error file handles, respectively.
Valid values are PIPE, an existing file descriptor (a positive
integer), an existing file object, and None. PIPE indicates that a
new pipe to the child should be created. With None, no redirection
will occur; the child's file handles will be inherited from the
parent. Additionally, stderr can be STDOUT, which indicates that the
stderr data from the applications should be captured into the same
file handle as for stdout.
If preexec_fn is set to a callable object, this object will be called
in the child process just before the child is executed.
If close_fds is true, all file descriptors except 0, 1 and 2 will be
closed before the child process is executed.
if shell is true, the specified command will be executed through the
shell.
If cwd is not None, the current directory will be changed to cwd
before the child is executed.
If env is not None, it defines the environment variables for the new
process.
If universal_newlines is true, the file objects stdout and stderr are
opened as a text files, but lines may be terminated by any of '\n',
the Unix end-of-line convention, '\r', the Macintosh convention or
'\r\n', the Windows convention. All of these external representations
are seen as '\n' by the Python program. Note: This feature is only
available if Python is built with universal newline support (the
default). Also, the newlines attribute of the file objects stdout,
stdin and stderr are not updated by the communicate() method.
The startupinfo and creationflags, if given, will be passed to the
underlying CreateProcess() function. They can specify things such as
appearance of the main window and priority for the new process.
(Windows only)
This module also defines some shortcut functions:
call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
check_call(*popenargs, **kwargs):
Run command with arguments. Wait for command to complete. If the
exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
check_output(*popenargs, **kwargs):
Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
output = check_output(["ls", "-l", "/dev/null"])
Exceptions
----------
Exceptions raised in the child process, before the new program has
started to execute, will be re-raised in the parent. Additionally,
the exception object will have one extra attribute called
'child_traceback', which is a string containing traceback information
from the childs point of view.
The most common exception raised is OSError. This occurs, for
example, when trying to execute a non-existent file. Applications
should prepare for OSErrors.
A ValueError will be raised if Popen is called with invalid arguments.
check_call() and check_output() will raise CalledProcessError, if the
called process returns a non-zero return code.
Security
--------
Unlike some other popen functions, this implementation will never call
/bin/sh implicitly. This means that all characters, including shell
metacharacters, can safely be passed to child processes.
Popen objects
=============
Instances of the Popen class have the following methods:
poll()
Check if child process has terminated. Returns returncode
attribute.
wait()
Wait for child process to terminate. Returns returncode attribute.
communicate(input=None)
Interact with process: Send data to stdin. Read data from stdout
and stderr, until end-of-file is reached. Wait for process to
terminate. The optional input argument should be a string to be
sent to the child process, or None, if no data should be sent to
the child.
communicate() returns a tuple (stdout, stderr).
Note: The data read is buffered in memory, so do not use this
method if the data size is large or unlimited.
The following attributes are also available:
stdin
If the stdin argument is PIPE, this attribute is a file object
that provides input to the child process. Otherwise, it is None.
stdout
If the stdout argument is PIPE, this attribute is a file object
that provides output from the child process. Otherwise, it is
None.
stderr
If the stderr argument is PIPE, this attribute is file object that
provides error output from the child process. Otherwise, it is
None.
pid
The process ID of the child process.
returncode
The child return code. A None value indicates that the process
hasn't terminated yet. A negative value -N indicates that the
child was terminated by signal N (UNIX only).
Replacing older functions with the subprocess module
====================================================
In this section, "a ==> b" means that b can be used as a replacement
for a.
Note: All functions in this section fail (more or less) silently if
the executed program cannot be found; this module raises an OSError
exception.
In the following examples, we assume that the subprocess module is
imported with "from subprocess import *".
Replacing /bin/sh shell backquote
---------------------------------
output=`mycmd myarg`
==>
output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
Replacing shell pipe line
-------------------------
output=`dmesg | grep hda`
==>
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
output = p2.communicate()[0]
Replacing os.system()
---------------------
sts = os.system("mycmd" + " myarg")
==>
p = Popen("mycmd" + " myarg", shell=True)
pid, sts = os.waitpid(p.pid, 0)
Note:
* Calling the program through the shell is usually not required.
* It's easier to look at the returncode attribute than the
exitstatus.
A more real-world example would look like this:
try:
retcode = call("mycmd" + " myarg", shell=True)
if retcode < 0:
print >>sys.stderr, "Child was terminated by signal", -retcode
else:
print >>sys.stderr, "Child returned", retcode
except OSError, e:
print >>sys.stderr, "Execution failed:", e
Replacing os.spawn*
-------------------
P_NOWAIT example:
pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
==>
pid = Popen(["/bin/mycmd", "myarg"]).pid
P_WAIT example:
retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
==>
retcode = call(["/bin/mycmd", "myarg"])
Vector example:
os.spawnvp(os.P_NOWAIT, path, args)
==>
Popen([path] + args[1:])
Environment example:
os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
==>
Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
Replacing os.popen*
-------------------
pipe = os.popen("cmd", mode='r', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdout=PIPE).stdout
pipe = os.popen("cmd", mode='w', bufsize)
==>
pipe = Popen("cmd", shell=True, bufsize=bufsize, stdin=PIPE).stdin
(child_stdin, child_stdout) = os.popen2("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
(child_stdin,
child_stdout,
child_stderr) = os.popen3("cmd", mode, bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
(child_stdin,
child_stdout,
child_stderr) = (p.stdin, p.stdout, p.stderr)
(child_stdin, child_stdout_and_stderr) = os.popen4("cmd", mode,
bufsize)
==>
p = Popen("cmd", shell=True, bufsize=bufsize,
stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
On Unix, os.popen2, os.popen3 and os.popen4 also accept a sequence as
the command to execute, in which case arguments will be passed
directly to the program without shell intervention. This usage can be
replaced as follows:
(child_stdin, child_stdout) = os.popen2(["/bin/ls", "-l"], mode,
bufsize)
==>
p = Popen(["/bin/ls", "-l"], bufsize=bufsize, stdin=PIPE, stdout=PIPE)
(child_stdin, child_stdout) = (p.stdin, p.stdout)
Return code handling translates as follows:
pipe = os.popen("cmd", 'w')
...
rc = pipe.close()
if rc is not None and rc % 256:
print "There were some errors"
==>
process = Popen("cmd", 'w', shell=True, stdin=PIPE)
...
process.stdin.close()
if process.wait() != 0:
print "There were some errors"
Replacing popen2.*
------------------
(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
==>
p = Popen(["somestring"], shell=True, bufsize=bufsize
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
On Unix, popen2 also accepts a sequence as the command to execute, in
which case arguments will be passed directly to the program without
shell intervention. This usage can be replaced as follows:
(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize,
mode)
==>
p = Popen(["mycmd", "myarg"], bufsize=bufsize,
stdin=PIPE, stdout=PIPE, close_fds=True)
(child_stdout, child_stdin) = (p.stdout, p.stdin)
The popen2.Popen3 and popen2.Popen4 basically works as subprocess.Popen,
except that:
* subprocess.Popen raises an exception if the execution fails
* the capturestderr argument is replaced with the stderr argument.
* stdin=PIPE and stdout=PIPE must be specified.
* popen2 closes all filedescriptors by default, but you have to specify
close_fds=True with subprocess.Popen.
"""
import sys, os
mswindows = (sys.platform == "win32" or (sys.platform == "cli" and os.name == 'nt'))
import types
import traceback
import gc
import signal
import errno
# Exception classes used by this module.
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
import _subprocess
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
_has_poll = hasattr(select, 'poll')
import fcntl
import pickle
# When select or poll has indicated that the file is writable,
# we can write up to _PIPE_BUF bytes without risk of blocking.
# POSIX defines PIPE_BUF as >= 512.
_PIPE_BUF = getattr(select, 'PIPE_BUF', 512)
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call",
"check_output", "CalledProcessError"]
if mswindows:
from _subprocess import (CREATE_NEW_CONSOLE, CREATE_NEW_PROCESS_GROUP,
STD_INPUT_HANDLE, STD_OUTPUT_HANDLE,
STD_ERROR_HANDLE, SW_HIDE,
STARTF_USESTDHANDLES, STARTF_USESHOWWINDOW)
__all__.extend(["CREATE_NEW_CONSOLE", "CREATE_NEW_PROCESS_GROUP",
"STD_INPUT_HANDLE", "STD_OUTPUT_HANDLE",
"STD_ERROR_HANDLE", "SW_HIDE",
"STARTF_USESTDHANDLES", "STARTF_USESHOWWINDOW"])
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
_active = []
def _cleanup():
for inst in _active[:]:
res = inst._internal_poll(_deadstate=sys.maxint)
if res is not None and res >= 0:
try:
_active.remove(inst)
except ValueError:
# This can happen if two threads create a new Popen instance.
# It's harmless that it was already removed, so ignore.
pass
else:
print inst.args
PIPE = -1
STDOUT = -2
def _eintr_retry_call(func, *args):
while True:
try:
return func(*args)
except OSError, e:
if e.errno == errno.EINTR:
continue
raise
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete, then
return the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
retcode = call(["ls", "-l"])
"""
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. If
the exit code was zero then return, otherwise raise
CalledProcessError. The CalledProcessError object will have the
return code in the returncode attribute.
The arguments are the same as for the Popen constructor. Example:
check_call(["ls", "-l"])
"""
retcode = call(*popenargs, **kwargs)
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
return 0
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = Popen(stdout=PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def list2cmdline(seq):
"""
Translate a sequence of arguments into a command line
string, using the same rules as the MS C runtime:
1) Arguments are delimited by white space, which is either a
space or a tab.
2) A string surrounded by double quotation marks is
interpreted as a single argument, regardless of white space
contained within. A quoted string can be embedded in an
argument.
3) A double quotation mark preceded by a backslash is
interpreted as a literal double quotation mark.
4) Backslashes are interpreted literally, unless they
immediately precede a double quotation mark.
5) If backslashes immediately precede a double quotation mark,
every pair of backslashes is interpreted as a literal
backslash. If the number of backslashes is odd, the last
backslash escapes the next double quotation mark as
described in rule 3.
"""
# See
# http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
# or search http://msdn.microsoft.com for
# "Parsing C++ Command-Line Arguments"
result = []
needquote = False
for arg in seq:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
"""Create new Popen instance."""
_cleanup()
self.args = args
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows "
"platforms")
if close_fds and (stdin is not None or stdout is not None or
stderr is not None):
raise ValueError("close_fds is not supported on Windows "
"platforms if you redirect stdin/stdout/stderr")
else:
# POSIX
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows "
"platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows "
"platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
# Input and output objects. The general principle is like
# this:
#
# Parent Child
# ------ -----
# p2cwrite ---stdin---> p2cread
# c2pread <--stdout--- c2pwrite
# errread <--stderr--- errwrite
#
# On POSIX, the child objects are file descriptors. On
# Windows, these are Windows file handles. The parent objects
# are file descriptors on both platforms. The parent objects
# are None when not using PIPEs. The child objects are None
# when not redirecting.
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if p2cwrite is not None:
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if c2pread is not None:
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if errread is not None:
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if p2cwrite is not None:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread is not None:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread is not None:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, _maxint=sys.maxint, _active=_active):
if not self._child_created:
# We didn't get to successfully create a child process.
return
# In case the child hasn't been waited on, check if it's done.
self._internal_poll(_deadstate=_maxint)
if self.returncode is None and _active is not None:
# Child is still running, keep us alive until we can wait on it.
_active.append(self)
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def poll(self):
return self._internal_poll()
if mswindows:
#
# Windows methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
if p2cread is None:
p2cread, _ = _subprocess.CreatePipe(None, 0)
elif stdin == PIPE:
p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
# Assuming file-like object
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
if c2pwrite is None:
_, c2pwrite = _subprocess.CreatePipe(None, 0)
elif stdout == PIPE:
c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
# Assuming file-like object
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
if errwrite is None:
_, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == PIPE:
errread, errwrite = _subprocess.CreatePipe(None, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
# Assuming file-like object
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
"""Return a duplicate of handle, which is inheritable"""
return _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
handle, _subprocess.GetCurrentProcess(), 0, 1,
_subprocess.DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
"""Find and return absolut path to w9xpopen.exe"""
w9xpopen = os.path.join(
os.path.dirname(_subprocess.GetModuleFileName(0)),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
# Eeek - file-not-found - possibly an embedding
# situation - see if we can locate it in sys.exec_prefix
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
"w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is "
"needed for Popen to work with your "
"shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (MS Windows version)"""
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
# Process startup details
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= _subprocess.STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = '{} /c "{}"'.format (comspec, args)
if (_subprocess.GetVersion() >= 0x80000000 or
os.path.basename(comspec).lower() == "command.com"):
# Win9x, or using command.com on NT. We need to
# use the w9xpopen intermediate program. For more
# information, see KB Q150956
# (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
# Not passing CREATE_NEW_CONSOLE has been known to
# cause random failures on win9x. Specifically a
# dialog: "Your program accessed mem currently in
# use at xxx" and a hopeful warning about the
# stability of your system. Cost is Ctrl+C wont
# kill children.
creationflags |= _subprocess.CREATE_NEW_CONSOLE
# Start the process
try:
hp, ht, pid, tid = _subprocess.CreateProcess(executable, args,
# no special security
None, None,
int(not close_fds),
creationflags,
env,
cwd,
startupinfo)
except pywintypes.error, e:
# Translate pywintypes.error to WindowsError, which is
# a subclass of OSError. FIXME: We should really
# translate errno using _sys_errlist (or similar), but
# how can this be done from Python?
raise WindowsError(*e.args)
finally:
# Child is launched. Close the parent's copy of those pipe
# handles that only the child should have open. You need
# to make sure that no handles to the write end of the
# output pipe are maintained in this process or else the
# pipe will not close when the child process exits and the
# ReadFile will hang.
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
# Retain the process handle, but close the thread handle
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
def _internal_poll(self, _deadstate=None,
_WaitForSingleObject=_subprocess.WaitForSingleObject,
_WAIT_OBJECT_0=_subprocess.WAIT_OBJECT_0,
_GetExitCodeProcess=_subprocess.GetExitCodeProcess):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it can only refer to objects
in its local scope.
"""
if self.returncode is None:
if _WaitForSingleObject(self._handle, 0) == _WAIT_OBJECT_0:
self.returncode = _GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
_subprocess.WaitForSingleObject(self._handle,
_subprocess.INFINITE)
self.returncode = _subprocess.GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None # Return
stderr = None # Return
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread,
args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread,
args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
if sig == signal.SIGTERM:
self.terminate()
elif sig == signal.CTRL_C_EVENT:
os.kill(self.pid, signal.CTRL_C_EVENT)
elif sig == signal.CTRL_BREAK_EVENT:
os.kill(self.pid, signal.CTRL_BREAK_EVENT)
else:
raise ValueError("Unsupported signal: {}".format(sig))
def terminate(self):
"""Terminates the process
"""
_subprocess.TerminateProcess(self._handle, 1)
kill = terminate
else:
#
# POSIX methods
#
def _get_handles(self, stdin, stdout, stderr):
"""Construct and return tuple with IO objects:
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
"""
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
# Assuming file-like object
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
# Assuming file-like object
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
# Assuming file-like object
errwrite = stderr.fileno()
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _set_cloexec_flag(self, fd, cloexec=True):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
if cloexec:
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
else:
fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
def _close_fds(self, but):
if hasattr(os, 'closerange'):
os.closerange(3, but)
os.closerange(but + 1, MAXFD)
else:
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
"""Execute program (POSIX version)"""
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable:
args[0] = executable
if executable is None:
executable = args[0]
# For transferring possible exec failure from child to parent
# The first char specifies the exception type: 0 means
# OSError, 1 means some other error.
errpipe_read, errpipe_write = os.pipe()
try:
try:
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
# Disable gc to avoid bug where gc -> file_dealloc ->
# write to stderr -> hang. http://bugs.python.org/issue1336
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
# Child
try:
# Close parent's pipe ends
if p2cwrite is not None:
os.close(p2cwrite)
if c2pread is not None:
os.close(c2pread)
if errread is not None:
os.close(errread)
os.close(errpipe_read)
# Dup fds for child
def _dup2(a, b):
# dup2() removes the CLOEXEC flag but
# we must do it ourselves if dup2()
# would be a no-op (issue #10806).
if a == b:
self._set_cloexec_flag(a, False)
elif a is not None:
os.dup2(a, b)
_dup2(p2cread, 0)
_dup2(c2pwrite, 1)
_dup2(errwrite, 2)
# Close pipe fds. Make sure we don't close the
# same fd more than once, or standard fds.
closed = { None }
for fd in [p2cread, c2pwrite, errwrite]:
if fd not in closed and fd > 2:
os.close(fd)
closed.add(fd)
# Close all other fds, if asked for
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
preexec_fn()
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
# Save the traceback and attach it to the exception object
exc_lines = traceback.format_exception(exc_type,
exc_value,
tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
# This exitcode won't be reported to applications, so it
# really doesn't matter what we return.
os._exit(255)
# Parent
if gc_was_enabled:
gc.enable()
finally:
# be sure the FD is closed no matter what
os.close(errpipe_write)
if p2cread is not None and p2cwrite is not None:
os.close(p2cread)
if c2pwrite is not None and c2pread is not None:
os.close(c2pwrite)
if errwrite is not None and errread is not None:
os.close(errwrite)
# Wait for exec to fail or succeed; possibly raising exception
# Exception limited to 1M
data = _eintr_retry_call(os.read, errpipe_read, 1048576)
finally:
# be sure the FD is closed no matter what
os.close(errpipe_read)
if data != "":
try:
_eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
child_exception = pickle.loads(data)
for fd in (p2cwrite, c2pread, errread):
if fd is not None:
os.close(fd)
raise child_exception
def _handle_exitstatus(self, sts, _WIFSIGNALED=os.WIFSIGNALED,
_WTERMSIG=os.WTERMSIG, _WIFEXITED=os.WIFEXITED,
_WEXITSTATUS=os.WEXITSTATUS):
# This method is called (indirectly) by __del__, so it cannot
# refer to anything outside of its local scope."""
if _WIFSIGNALED(sts):
self.returncode = -_WTERMSIG(sts)
elif _WIFEXITED(sts):
self.returncode = _WEXITSTATUS(sts)
else:
# Should never happen
raise RuntimeError("Unknown child exit status!")
def _internal_poll(self, _deadstate=None, _waitpid=os.waitpid,
_WNOHANG=os.WNOHANG, _os_error=os.error):
"""Check if child process has terminated. Returns returncode
attribute.
This method is called by __del__, so it cannot reference anything
outside of the local scope (nor can any methods it calls).
"""
if self.returncode is None:
try:
pid, sts = _waitpid(self.pid, _WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except _os_error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
"""Wait for child process to terminate. Returns returncode
attribute."""
if self.returncode is None:
try:
pid, sts = _eintr_retry_call(os.waitpid, self.pid, 0)
except OSError as e:
if e.errno != errno.ECHILD:
raise
# This happens if SIGCLD is set to be ignored or waiting
# for child processes has otherwise been disabled for our
# process. This child is dead, we can't get the status.
sts = 0
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
if self.stdin:
# Flush stdio buffer. This might block, if the user has
# been writing to .stdin in an uncontrolled fashion.
self.stdin.flush()
if not input:
self.stdin.close()
if _has_poll:
stdout, stderr = self._communicate_with_poll(input)
else:
stdout, stderr = self._communicate_with_select(input)
# All data exchanged. Translate lists into strings.
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
# Translate newlines, if requested. We cannot let the file
# object do the translation: It is based on stdio, which is
# impossible to combine with select (unless forcing no
# buffering).
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
def _communicate_with_poll(self, input):
stdout = None # Return
stderr = None # Return
fd2file = {}
fd2output = {}
poller = select.poll()
def register_and_append(file_obj, eventmask):
poller.register(file_obj.fileno(), eventmask)
fd2file[file_obj.fileno()] = file_obj
def close_unregister_and_remove(fd):
poller.unregister(fd)
fd2file[fd].close()
fd2file.pop(fd)
if self.stdin and input:
register_and_append(self.stdin, select.POLLOUT)
select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI
if self.stdout:
register_and_append(self.stdout, select_POLLIN_POLLPRI)
fd2output[self.stdout.fileno()] = stdout = []
if self.stderr:
register_and_append(self.stderr, select_POLLIN_POLLPRI)
fd2output[self.stderr.fileno()] = stderr = []
input_offset = 0
while fd2file:
try:
ready = poller.poll()
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
for fd, mode in ready:
if mode & select.POLLOUT:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
input_offset += os.write(fd, chunk)
except OSError as e:
if e.errno == errno.EPIPE:
close_unregister_and_remove(fd)
else:
raise
else:
if input_offset >= len(input):
close_unregister_and_remove(fd)
elif mode & select_POLLIN_POLLPRI:
data = os.read(fd, 4096)
if not data:
close_unregister_and_remove(fd)
fd2output[fd].append(data)
else:
# Ignore hang up or errors.
close_unregister_and_remove(fd)
return (stdout, stderr)
def _communicate_with_select(self, input):
read_set = []
write_set = []
stdout = None # Return
stderr = None # Return
if self.stdin and input:
write_set.append(self.stdin)
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
try:
rlist, wlist, xlist = select.select(read_set, write_set, [])
except select.error, e:
if e.args[0] == errno.EINTR:
continue
raise
if self.stdin in wlist:
chunk = input[input_offset : input_offset + _PIPE_BUF]
try:
bytes_written = os.write(self.stdin.fileno(), chunk)
except OSError as e:
if e.errno == errno.EPIPE:
self.stdin.close()
write_set.remove(self.stdin)
else:
raise
else:
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
return (stdout, stderr)
def send_signal(self, sig):
"""Send a signal to the process
"""
os.kill(self.pid, sig)
def terminate(self):
"""Terminate the process with SIGTERM
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the process with SIGKILL
"""
self.send_signal(signal.SIGKILL)
def _demo_posix():
#
# Example 1: Simple redirection: Get process list
#
plist = Popen(["ps"], stdout=PIPE).communicate()[0]
print "Process list:"
print plist
#
# Example 2: Change uid before executing child
#
if os.getuid() == 0:
p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
p.wait()
#
# Example 3: Connecting several subprocesses
#
print "Looking for 'hda'..."
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 4: Catch execution error
#
print
print "Trying a weird file..."
try:
print Popen(["/this/path/does/not/exist"]).communicate()
except OSError, e:
if e.errno == errno.ENOENT:
print "The file didn't exist. I thought so..."
print "Child traceback:"
print e.child_traceback
else:
print "Error", e.errno
else:
print >>sys.stderr, "Gosh. No error."
def _demo_windows():
#
# Example 1: Connecting several subprocesses
#
print "Looking for 'PROMPT' in set output..."
p1 = Popen("set", stdout=PIPE, shell=True)
p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
print repr(p2.communicate()[0])
#
# Example 2: Simple execution of program
#
print "Executing calc..."
p = Popen("calc")
p.wait()
if __name__ == "__main__":
if mswindows:
_demo_windows()
else:
_demo_posix()
| gpl-3.0 |
jaharkes/home-assistant | homeassistant/components/light/wemo.py | 28 | 4460 | """
Support for Belkin WeMo lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.wemo/
"""
import logging
from datetime import timedelta
import homeassistant.util as util
import homeassistant.util.color as color_util
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_RGB_COLOR, ATTR_TRANSITION,
ATTR_XY_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP, SUPPORT_RGB_COLOR,
SUPPORT_TRANSITION, SUPPORT_XY_COLOR)
DEPENDENCIES = ['wemo']
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
_LOGGER = logging.getLogger(__name__)
SUPPORT_WEMO = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_RGB_COLOR |
SUPPORT_TRANSITION | SUPPORT_XY_COLOR)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup WeMo bridges and register connected lights."""
import pywemo.discovery as discovery
if discovery_info is not None:
location = discovery_info[2]
mac = discovery_info[3]
device = discovery.device_from_description(location, mac)
if device:
setup_bridge(device, add_devices)
def setup_bridge(bridge, add_devices):
"""Setup a WeMo link."""
lights = {}
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_lights():
"""Update the WeMo led objects with latest info from the bridge."""
bridge.bridge_update()
new_lights = []
for light_id, device in bridge.Lights.items():
if light_id not in lights:
lights[light_id] = WemoLight(device, update_lights)
new_lights.append(lights[light_id])
if new_lights:
add_devices(new_lights)
update_lights()
class WemoLight(Light):
"""Representation of a WeMo light."""
def __init__(self, device, update_lights):
"""Initialize the light."""
self.light_id = device.name
self.device = device
self.update_lights = update_lights
@property
def unique_id(self):
"""Return the ID of this light."""
deviceid = self.device.uniqueID
return '{}.{}'.format(self.__class__, deviceid)
@property
def name(self):
"""Return the name of the light."""
return self.device.name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self.device.state.get('level', 255)
@property
def xy_color(self):
"""Return the XY color values of this light."""
return self.device.state.get('color_xy')
@property
def color_temp(self):
"""Return the color temperature of this light in mireds."""
return self.device.state.get('temperature_mireds')
@property
def is_on(self):
"""True if device is on."""
return self.device.state['onoff'] != 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_WEMO
def turn_on(self, **kwargs):
"""Turn the light on."""
transitiontime = int(kwargs.get(ATTR_TRANSITION, 0))
if ATTR_XY_COLOR in kwargs:
xycolor = kwargs[ATTR_XY_COLOR]
elif ATTR_RGB_COLOR in kwargs:
xycolor = color_util.color_RGB_to_xy(
*(int(val) for val in kwargs[ATTR_RGB_COLOR]))
kwargs.setdefault(ATTR_BRIGHTNESS, xycolor[2])
else:
xycolor = None
if xycolor is not None:
self.device.set_color(xycolor, transition=transitiontime)
if ATTR_COLOR_TEMP in kwargs:
colortemp = kwargs[ATTR_COLOR_TEMP]
self.device.set_temperature(mireds=colortemp,
transition=transitiontime)
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.device.turn_on(level=brightness, transition=transitiontime)
else:
self.device.turn_on(transition=transitiontime)
def turn_off(self, **kwargs):
"""Turn the light off."""
transitiontime = int(kwargs.get(ATTR_TRANSITION, 0))
self.device.turn_off(transition=transitiontime)
def update(self):
"""Synchronize state with bridge."""
self.update_lights(no_throttle=True)
| mit |
kierangraham/dotfiles | Sublime/Packages/SublimeCodeIntel/libs/codeintel2/database/catalog.py | 8 | 40980 | #!python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""The API catalogs-zone of the codeintel database.
See the database/database.py module docstring for an overview.
"""
import sys
import os
from os.path import (join, dirname, exists, expanduser, splitext, basename,
split, abspath, isabs, isdir, isfile, normpath,
normcase)
import pickle as pickle
import threading
import time
from hashlib import md5
import bisect
import fnmatch
from glob import glob
from pprint import pprint, pformat
import logging
from io import StringIO
import codecs
import copy
import weakref
import queue
import ciElementTree as ET
from codeintel2.common import *
from codeintel2.buffer import Buffer
from codeintel2.util import dedent, safe_lang_from_lang, banner, hotshotit
from codeintel2.tree import tree_from_cix_path
from codeintel2.database.util import filter_blobnames_for_prefix
from codeintel2.database.resource import AreaResource
#---- globals
log = logging.getLogger("codeintel.db")
# log.setLevel(logging.DEBUG)
#---- Database zone and lib implementations
class CatalogsZone(object):
"""Singleton zone managing the db/catalogs/... area.
TODO: Locking: .cull_mem() and .save() will be called periodically
on indexer thread. Anything they access needs to be guarded.
"""
_res_index = None
_blob_index = None
_toplevelname_index = None
_toplevelprefix_index = None
_have_updated_at_least_once = False
def __init__(self, mgr, catalog_dirs=None):
self.mgr = mgr
self.db = mgr.db
if catalog_dirs is None:
catalog_dirs = []
assert isinstance(catalog_dirs, list)
self.catalog_dirs = catalog_dirs
self.base_dir = join(self.db.base_dir, "db", "catalogs")
self._lib_cache = {} # (lang, selection_res_ids) -> CatalogLib
self._lock = threading.RLock()
self._blob_and_atime_from_blobname_from_lang_cache = {}
self._dbsubpaths_and_lpaths_to_save = []
def __repr__(self):
return "<catalog zone>"
def _selection_from_selector(self, selections):
"""Given a sequence of catalog selection strings (each is a
catalog name or full path to a catalog .cix file) return a dict
mapping:
<normalized-selector> -> <selection-string>
If "selections" is None, this returns None.
"""
if selections is None:
return None
selection_from_selector = {}
for selection in selections:
if isabs(selection):
selector = normpath(normcase(selection))
else:
selector = selection.lower()
selection_from_selector[selector] = selection
return selection_from_selector
_res_ids_from_selector_cache = None
def _res_ids_from_selections(self, selections):
"""Returns a tuple of the database resource ids for the given
selections and a list of selections that didn't match any loaded
resources.
"""
if self._res_ids_from_selector_cache is None:
cache = self._res_ids_from_selector_cache = {}
for cix_area_path, res_data in list(self.res_index.items()):
cix_path = AreaResource(cix_area_path).path
res_id = res_data[0]
cache[normpath(normcase(cix_path))] = [res_id]
name = splitext(basename(cix_path))[0].lower()
if name not in cache:
cache[name] = []
cache[name].append(res_id)
log.debug("_res_ids_from_selector_cache: %r", cache)
res_ids = []
missing_selections = []
for selector, selection \
in list(self._selection_from_selector(selections).items()):
try:
res_ids += self._res_ids_from_selector_cache[selector]
except KeyError as ex:
missing_selections.append(selection)
log.debug("_res_ids_from_selections: res_ids=%r", res_ids)
return tuple(res_ids), missing_selections
@LazyClassAttribute
def _std_catalog_dir(cls):
return join(dirname(dirname(abspath(__file__))), "catalogs")
_catalog_dirs = None
@property
def catalog_dirs(self):
return self._catalog_dirs
@catalog_dirs.setter
def catalog_dirs(self, value):
assert not isinstance(value, str), \
"catalog_dirs must be an iterable, not a string"
catalog_dirs = list(value)
if self._std_catalog_dir not in catalog_dirs:
catalog_dirs.append(self._std_catalog_dir)
self._catalog_dirs = catalog_dirs
# cause a rescan next time we try to get a catalog lib
self._have_updated_at_least_once = False
def get_lib(self, lang, selections=None):
"""Return a CatalogLib for the given lang and selections."""
assert not isinstance(selections, str),\
"catalog lib 'selections' must be None or a sequence, not %r: %r"\
% (type(selections), selections)
if not self._have_updated_at_least_once:
self.update(selections)
if selections is not None:
selection_res_ids, missing_selections \
= self._res_ids_from_selections(selections)
if missing_selections:
self.update(missing_selections)
selection_res_ids, missing_selections \
= self._res_ids_from_selections(selections)
if missing_selections:
log.warn("the following catalog selections didn't match "
"any loaded API catalog: '%s'",
"', '".join(missing_selections))
else:
selection_res_ids = None
key = (lang, selection_res_ids)
if key not in self._lib_cache:
self._lib_cache[key] = CatalogLib(self, lang,
selections, selection_res_ids)
return self._lib_cache[key]
@property
def res_index(self):
"""Load and return the resource index (res_index)."""
if self._res_index is None:
idxpath = join(self.base_dir, "res_index")
self._res_index = self.db.load_pickle(idxpath, {})
return self._res_index
@property
def blob_index(self):
"""Load and return the blob index (blob_index)."""
if self._blob_index is None:
idxpath = join(self.base_dir, "blob_index")
self._blob_index = self.db.load_pickle(idxpath, {})
return self._blob_index
@property
def toplevelname_index(self):
"""Load and return the top-level name index (toplevelname_index)."""
if self._toplevelname_index is None:
idxpath = join(self.base_dir, "toplevelname_index")
self._toplevelname_index = self.db.load_pickle(idxpath, {})
return self._toplevelname_index
@property
def toplevelprefix_index(self):
"""Load and return the top-level prefix index (toplevelprefix_index)."""
if self._toplevelprefix_index is None:
idxpath = join(self.base_dir, "toplevelprefix_index")
self._toplevelprefix_index = self.db.load_pickle(idxpath, {})
return self._toplevelprefix_index
def save(self):
self._lock.acquire()
try:
for dbsubpath, lpaths in self._dbsubpaths_and_lpaths_to_save:
self.db.save_pickle(join(self.base_dir, dbsubpath), lpaths)
self._dbsubpaths_and_lpaths_to_save = []
finally:
self._lock.release()
def cull_mem(self):
"""Drop blobs from cache that have not been accessed in over 5
minutes.
To attempt to keep memory consumption under control we want to
ensure we don't keep everything cached from the db in memory
until process completion.
"""
# TOTEST: Does Python/Komodo actually release this memory or
# are we kidding ourselves?
self._lock.acquire()
try:
N = 10
if len(self._blob_and_atime_from_blobname_from_lang_cache) < N:
# Too few blobs in memory to bother culling.
return
log.info("catalog: culling memory")
now = time.time()
for lang, blob_and_atime_from_blobname \
in list(self._blob_and_atime_from_blobname_from_lang_cache.items()):
for blobname, (blob, atime) in list(blob_and_atime_from_blobname.items()):
if now - atime > 300.0: # >5 minutes since last access
del blob_and_atime_from_blobname[blobname]
finally:
self._lock.release()
def reportMemory(self):
"""
Report on memory usage from this CatalogsZone.
@returns {dict} memory usage; keys are the paths, values are a dict of
"amount" -> number
"units" -> "bytes" | "count"
"desc" -> str description
"""
log.debug("CatalogsZone: reporting memory")
import memutils
total_mem_usage = 0
result = {}
for lang, blob_and_atime_from_blobname in list(self._blob_and_atime_from_blobname_from_lang_cache.items()):
for blobname, [blob, atime] in list(blob_and_atime_from_blobname.items()):
result["explicit/python/codeintel/%s/catalog/%s" % (lang, blobname)] = {
"amount": memutils.memusage(blob),
"units": "bytes",
"desc": "The number of bytes of %s codeintel %s catalog blobs." % (lang, blobname),
}
return result
def avail_catalogs(self, selections=None):
"""Generate a list of available catalogs.
"selections" (optional) is a list of string of the same form
as to `.get_lib()'. It is used to determine the boolean
value of <selected> in the yielded tuples.
Generated dicts as follows:
{"name": <catalog-name>, # 'name' attr of <codeintel> tag
# or file basename
"lang": <lang>, # 'lang' attribute of first <file> tag
"description": <desc>, # 'description' attr of <codeintel>
"cix_path": <cix-path>,
"selected": <selected>,
"selection": <selection>,
}
where <selected> is boolean indicating if this catalog is
selected according to "selections" and <selection> is the string
in "selections" that resulted in this.
"""
selection_from_selector = self._selection_from_selector(selections)
for cix_path in (cix_path for d in self.catalog_dirs if exists(d)
for cix_path in glob(join(d, "*.cix"))):
name = lang = description = None
try:
for event, elem in ET.iterparse(cix_path, events=("start",)):
if elem.tag == "codeintel":
name = elem.get("name")
description = elem.get("description")
elif elem.tag == "file":
lang = elem.get("lang")
break
except ET.XMLParserError as ex:
log.warn("%s: error reading catalog, skipping it (%s)",
cix_path, ex)
continue
if lang is None:
log.warn("%s: no 'lang' attribute on catalog <file> tag, "
"skipping it", cix_path)
continue
if name is None:
name = splitext(basename(cix_path))[0]
norm_name = name.lower()
norm_cix_path = normpath(normcase(cix_path))
if selection_from_selector is None:
selected = True
selection = None
else:
selection = (selection_from_selector.get(norm_name)
or selection_from_selector.get(norm_cix_path))
selected = selection is not None
yield {"name": name,
"lang": lang,
"description": description,
"cix_path": cix_path,
"selected": selected,
"selection": selection}
def update(self, selections=None, progress_cb=None):
"""Update the catalog as necessary.
"selections" (optional) is a list of string of the same form
as to `.get_lib()' -- used here to filter the catalogs
that we consider for updating.
"progress_cb" (optional) is a callable that is called as
follows to show the progress of the update:
progress_cb(<desc>, <value>)
where <desc> is a short string describing the current step
and <value> is an integer between 0 and 100 indicating the
level of completeness.
"""
self._lock.acquire()
try:
self._have_updated_at_least_once = True
# Figure out what updates need to be done...
if progress_cb:
try:
progress_cb("Determining necessary catalog updates...", 5)
except:
log.exception("error in progress_cb (ignoring)")
res_name_from_res_path = dict( # this is our checklist
(p, v[2]) for p, v in list(self.res_index.items()))
todos = []
log.info("updating %s: %d catalog dir(s)", self,
len(self.catalog_dirs))
for catalog_info in self.avail_catalogs(selections):
cix_path = catalog_info["cix_path"]
res = AreaResource(cix_path)
# check that the update-time is the mtime (i.e. up-to-date)
try:
res_id, last_updated, name, res_data \
= self.res_index[res.area_path]
except KeyError:
# add this new CIX file
todos.append(("add", res, catalog_info["name"]))
else:
mtime = os.stat(cix_path).st_mtime
if last_updated != mtime: # epsilon? '>=' instead of '!='?
# update with newer version
todos.append(("update", res, catalog_info["name"]))
# else:
# log.debug("not updating '%s' catalog: mtime is unchanged",
# catalog_info["name"])
del res_name_from_res_path[res.area_path] # tick it off
for res_area_path, res_name in list(res_name_from_res_path.items()):
# remove this obsolete CIX file
try:
todos.append(("remove", AreaResource(
res_area_path), res_name))
except ValueError as ex:
# Skip resources in unknown areas. This is primarily to
# allow debugging/testing (when the set of registered
# path_areas may not include the set when running in
# Komodo.)
pass
# Filter todos on selections, if any.
if selections is not None:
selection_from_selector = self._selection_from_selector(
selections)
before = todos[:]
todos = [todo for todo in todos
if todo[2].lower() in selection_from_selector
or normpath(normcase(todo[1].path)) in selection_from_selector
]
# ... and then do them.
if not todos:
return
for i, (action, res, name) in enumerate(todos):
log.debug("%s `%s' catalog (%s)", action, name, res)
try:
if action == "add":
desc = "Adding '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
self._add_res(res)
elif action == "remove":
desc = "Removing '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
self._remove_res(res)
elif action == "update":
desc = "Updating '%s' API catalog" % basename(
res.subpath)
if progress_cb:
try:
progress_cb(desc, (5 + 95/len(todos)*i))
except:
log.exception(
"error in progress_cb (ignoring)")
else:
self.db.report_event(desc)
# XXX Bad for filesystem. Change this to do it
# more intelligently if possible.
self._remove_res(res)
self._add_res(res)
except DatabaseError as ex:
log.warn("%s (skipping)" % ex)
if progress_cb:
try:
progress_cb("Saving catalog indices...", 95)
except:
log.exception("error in progress_cb (ignoring)")
self._res_ids_from_selector_cache = None # invalidate this cache
if self._res_index is not None:
self.db.save_pickle(
join(self.base_dir, "res_index"),
self._res_index)
if self._blob_index is not None:
self.db.save_pickle(
join(self.base_dir, "blob_index"),
self._blob_index)
if self._toplevelname_index is not None:
self.db.save_pickle(
join(self.base_dir, "toplevelname_index"),
self._toplevelname_index)
if self._toplevelprefix_index is not None:
self.db.save_pickle(
join(self.base_dir, "toplevelprefix_index"),
self._toplevelprefix_index)
finally:
self._lock.release()
_existing_res_ids_cache = None
_new_res_id_counter = 0
def _new_res_id(self):
if self._existing_res_ids_cache is None:
self._existing_res_ids_cache \
= dict((d[0], True) for d in list(self.res_index.values()))
while True:
if self._new_res_id_counter not in self._existing_res_ids_cache:
new_res_id = self._new_res_id_counter
self._new_res_id_counter += 1
self._existing_res_ids_cache[new_res_id] = True
return new_res_id
self._new_res_id_counter += 1
def _remove_res(self, res):
LEN_PREFIX = self.db.LEN_PREFIX
res_id, last_updated, name, res_data = self.res_index[res.area_path]
# res_data: {lang -> blobname -> ilk -> toplevelnames}
for lang, tfifb in list(res_data.items()):
dbfile_and_res_id_from_blobname = self.blob_index[lang]
for blobname, toplevelnames_from_ilk in list(tfifb.items()):
# Update 'blob_index' for $lang.
dbfile, res_id = dbfile_and_res_id_from_blobname[blobname]
del dbfile_and_res_id_from_blobname[blobname]
# Remove ".blob" file (and associated caches).
pattern = join(self.base_dir, safe_lang_from_lang(lang),
dbfile+".*")
try:
for path in glob(pattern):
log.debug("fs-write: remove catalog %s blob file '%s'",
lang, basename(path))
os.remove(path)
except EnvironmentError as ex:
# XXX If get lots of these, then try harder. Perhaps
# creating a zombies area, or creating a list of
# them: self.db.add_zombie(dbpath).
# XXX THis isn't a correct analysis: the dbfile may just
# not have been there.
log.warn("could not remove dbfile '%s' (%s '%s'): "
"leaving zombie", dbpath, lang, blobname)
# Update 'toplevel*_index' for $lang.
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id -> blobnames}
# toplevelprefix_index: {lang -> ilk -> prefix -> res_id ->
# toplevelnames}
for ilk, toplevelnames in toplevelnames_from_ilk.items():
try:
bfrft = self.toplevelname_index[lang][ilk]
for toplevelname in toplevelnames:
del bfrft[toplevelname][res_id]
if not bfrft[toplevelname]:
del bfrft[toplevelname]
except KeyError as ex:
self.db.corruption("CatalogsZone._remove_res",
"error removing top-level names of ilk '%s' for "
"'%s' resource from toplevelname_index: %s"
% (ilk, basename(res.path), ex),
"ignore")
try:
tfrfp = self.toplevelprefix_index[lang][ilk]
for toplevelname in toplevelnames:
prefix = toplevelname[:LEN_PREFIX]
del tfrfp[prefix][res_id]
if not tfrfp[prefix]:
del tfrfp[prefix]
except KeyError as ex:
self.db.corruption("CatalogsZone._remove_res",
"error removing top-level name of ilk '%s' for "
"'%s' resource from toplevelprefix_index: %s"
% (ilk, basename(res.path), ex),
"ignore")
del self.res_index[res.area_path]
def _add_res(self, res):
cix_path = res.path
try:
tree = tree_from_cix_path(cix_path)
except ET.XMLParserError as ex:
log.warn("could not load `%s' into catalog (skipping): %s",
cix_path, ex)
return
LEN_PREFIX = self.db.LEN_PREFIX
res_id = self._new_res_id()
res_data = {} # {lang -> blobname -> ilk -> toplevelnames}
name = tree.get("name") or splitext(basename(cix_path))[0]
for blob in tree.findall("file/scope"):
lang, blobname = blob.get("lang"), blob.get("name")
if not lang:
raise DatabaseError("add `%s': no 'lang' attr on %r"
% (res, blob))
# Create 'res_data'.
tfifb = res_data.setdefault(lang, {})
toplevelnames_from_ilk = tfifb.setdefault(blobname, {})
if lang in self.db.import_everything_langs:
for toplevelname, elem in blob.names.items():
ilk = elem.get("ilk") or elem.tag
if ilk not in toplevelnames_from_ilk:
toplevelnames_from_ilk[ilk] = set([toplevelname])
else:
toplevelnames_from_ilk[ilk].add(toplevelname)
# Update 'toplevel*_index'.
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id -> blobnames}
# toplevelprefix_index: {lang -> ilk -> prefix -> res_id ->
# toplevelnames}
bfrftfi = self.toplevelname_index.setdefault(lang, {})
tfrfpfi = self.toplevelprefix_index.setdefault(lang, {})
for ilk, toplevelnames in toplevelnames_from_ilk.items():
bfrft = bfrftfi.setdefault(ilk, {})
tfrfp = tfrfpfi.setdefault(ilk, {})
for toplevelname in toplevelnames:
bfr = bfrft.setdefault(toplevelname, {})
if res_id not in bfr:
bfr[res_id] = set([blobname])
else:
bfr[res_id].add(blobname)
prefix = toplevelname[:LEN_PREFIX]
tfr = tfrfp.setdefault(prefix, {})
if res_id not in tfr:
tfr[res_id] = set([toplevelname])
else:
tfr[res_id].add(toplevelname)
# Update 'blob_index'.
dbfile_and_res_id_from_blobname \
= self.blob_index.setdefault(lang, {})
assert blobname not in dbfile_and_res_id_from_blobname, \
("codeintel: %s %r blob in `%s' collides "
"with existing %s %r blob (from res_id %r) in catalog: "
"(XXX haven't decided how to deal with that yet)"
% (lang, blobname, cix_path, lang, blobname,
dbfile_and_res_id_from_blobname[blobname][1]))
dbfile = self.db.bhash_from_blob_info(cix_path, lang, blobname)
dbfile_and_res_id_from_blobname[blobname] = (dbfile, res_id)
# Write out '.blob' file.
dbdir = join(self.base_dir, safe_lang_from_lang(lang))
if not exists(dbdir):
log.debug("fs-write: mkdir '%s'", dbdir)
os.makedirs(dbdir)
log.debug("fs-write: catalog %s blob '%s'", lang, dbfile)
ET.ElementTree(blob).write(join(dbdir, dbfile+".blob"))
# Update 'res_index'.
last_updated = os.stat(cix_path).st_mtime
self.res_index[res.area_path] \
= (res_id, last_updated, name, res_data)
def res_id_from_lang_and_blobname(self, lang, blobname):
try:
dbfile, res_id = self.blob_index[lang][blobname]
except KeyError:
return None
else:
return res_id
def get_blob(self, lang, blobname, look_in_cache_only=False):
try:
dbfile, res_id = self.blob_index[lang][blobname]
except KeyError:
return None
# If index path is in the cache: return it, update its atime.
now = time.time()
blob_and_atime_from_blobname \
= self._blob_and_atime_from_blobname_from_lang_cache.setdefault(lang, {})
if blobname in blob_and_atime_from_blobname:
log.debug("cache-read: load %s blob `%s'", lang, blobname)
blob, atime = blob_and_atime_from_blobname[blobname]
blob_and_atime_from_blobname[blobname] = (blob, now)
return blob
# Need to load and cache it.
if look_in_cache_only:
return None
dbsubpath = join(self.base_dir, safe_lang_from_lang(lang), dbfile)
blob = self.db.load_blob(dbsubpath)
blob_and_atime_from_blobname[blobname] = (blob, now)
return blob
def lpaths_from_lang_and_blobname(self, lang, blobname):
"""Get lpaths for the named blob.
We get it from the blob's "lpaths" cache key (calculating that
if necessary).
"""
blob = self.get_blob(lang, blobname, look_in_cache_only=True)
if blob is not None:
if "lpaths" in blob.cache:
return blob.cache["lpaths"]
else:
blob = self.get_blob(lang, blobname)
if blob is None:
raise NotFoundInDatabase("%s '%s' blob not found in catalogs"
% (lang, blobname))
if "lpaths" in blob.cache:
return blob.cache["lpaths"]
# Need to calculate lpaths from 'blob'.
log.debug("calc symbol info for %s '%s' catalog blob", lang, blobname)
langintel = self.mgr.langintel_from_lang(lang)
lpaths = langintel.lpaths_from_blob(blob)
# Update cache and queue this up to be saved to disk (by .save()).
blob.cache["lpaths"] = lpaths
dbfile, res_id = self.blob_index[lang][blobname]
self._lock.acquire()
try:
self._dbsubpaths_and_lpaths_to_save.append(
(join(safe_lang_from_lang(lang), dbfile+".lpaths"), lpaths)
)
finally:
self._lock.release()
return lpaths
class CatalogLib(object):
"""A light lang-specific and selection-filtered view on the whole
CatalogsZone.
"""
name = "cataloglib"
def __init__(self, catalogs_zone, lang,
selections=None, selection_res_ids=None):
self.catalogs_zone = catalogs_zone
self.lang = lang
self.selections = selections
if selection_res_ids is None:
self.selection_res_id_set = None
else:
self.selection_res_id_set = set(selection_res_ids)
self._import_handler = None
self._blob_imports_from_prefix_cache = {}
_repr_cache = None
def __repr__(self):
if self._repr_cache is None:
# Include the base names of the selected resources in the name.
if self.selection_res_id_set is None:
selection_names = ['(all)']
else:
selection_names = []
for s in self.selections:
if isabs(s):
selection_names.append(splitext(basename(s))[0])
else:
selection_names.append(s)
self._repr_cache = "<%s cataloglib: %s>"\
% (self.lang, ', '.join(selection_names))
return self._repr_cache
@property
def import_handler(self):
if self._import_handler is None:
self._import_handler \
= self.catalogs_zone.mgr.citadel.import_handler_from_lang(self.lang)
return self._import_handler
def has_blob(self, blobname):
res_id = self.catalogs_zone.res_id_from_lang_and_blobname(self.lang,
blobname)
if res_id is None:
return False
if self.selection_res_id_set is None:
return True
return res_id in self.selection_res_id_set
def get_blob(self, blobname):
if not self.has_blob(blobname): # knows how to filter on selections
return None
return self.catalogs_zone.get_blob(self.lang, blobname)
def get_blob_imports(self, prefix):
"""Return the set of imports under the given prefix.
"prefix" is a tuple of import name parts. E.g. ("xml", "sax")
for "import xml.sax." in Python. Or ("XML", "Parser") for
"use XML::Parser::" in Perl.
See description in database.py docstring for details.
"""
# This code works fine if prefix is the empty tuple.
if prefix not in self._blob_imports_from_prefix_cache:
try:
dbfile_and_res_id_from_blobname \
= self.catalogs_zone.blob_index[self.lang]
except KeyError:
return set()
if self.selection_res_id_set is None:
matches = filter_blobnames_for_prefix(
dbfile_and_res_id_from_blobname,
prefix,
self.import_handler.sep)
else:
matches = filter_blobnames_for_prefix(
(bn
for bn, (f, res_id) in list(dbfile_and_res_id_from_blobname.items())
if res_id in self.selection_res_id_set),
prefix,
self.import_handler.sep)
self._blob_imports_from_prefix_cache[prefix] = matches
return self._blob_imports_from_prefix_cache[prefix]
def _blobnames_from_toplevelname(self, toplevelname, ilk=None):
"""Yield all blobnames in the currently selected catalogs
with the given toplevelname.
If "ilk" is given then only symbols of that ilk will be considered.
"""
# toplevelname_index: {lang -> ilk -> toplevelname -> res_id ->
# blobnames}
if self.lang in self.catalogs_zone.toplevelname_index:
for i, potential_bfrft \
in self.catalogs_zone.toplevelname_index[self.lang].items():
if ilk is not None and i != ilk:
continue
if toplevelname not in potential_bfrft:
continue
potential_bfr = potential_bfrft[toplevelname]
if self.selection_res_id_set is None:
for blobnames in potential_bfr.values():
for blobname in blobnames:
yield blobname
else:
for res_id, blobnames in potential_bfr.items():
if res_id not in self.selection_res_id_set:
continue
for blobname in blobnames:
yield blobname
def hits_from_lpath(self, lpath, ctlr=None, curr_buf=None):
assert isinstance(lpath, tuple) # common mistake to pass in a string
hits = []
for blobname in self._blobnames_from_toplevelname(lpath[0]):
lpaths = self.catalogs_zone.lpaths_from_lang_and_blobname(
self.lang, blobname)
if lpath not in lpaths:
continue
blob = self.catalogs_zone.get_blob(self.lang, blobname)
# TODO: Convert lpath's in tree-evalrs to tuples instead of lists.
elem = _elem_from_scoperef((blob, list(lpath)))
hits.append((elem, (blob, list(lpath[:-1]))))
return hits
def toplevel_cplns(self, prefix=None, ilk=None, ctlr=None):
"""Return completion info for all top-level names matching the
given prefix and ilk in all selected blobs in this lib.
"prefix" is a 3-character prefix with which to filter top-level
names. If None (or not specified), results are not filtered
based on the prefix.
"ilk" is a symbol type (e.g. "class", "variable", "function")
with which to filter results. If None (or not specified),
results of any ilk are returned.
"ctlr" (optional) is an EvalController instance. If
specified it should be used in the normal way (logging,
checking .is_aborted()).
Returns a list of 2-tuples: (<ilk>, <name>).
Note: the list is not sorted, because often some special sorting
is required for the different completion evaluators that might use
this API.
"""
cplns = []
if prefix is None:
# Use 'toplevelname_index':
# {lang -> ilk -> toplevelname -> res_id -> blobnames}
toplevelname_index = self.catalogs_zone.toplevelname_index
if self.lang in toplevelname_index:
if ilk is not None:
try:
bfrft = toplevelname_index[self.lang][ilk]
except KeyError:
pass
else:
if self.selection_res_id_set is None:
cplns += [(ilk, t) for t in bfrft]
else:
cplns += [(ilk, t) for t, bfr in bfrft.items()
if self.selection_res_id_set.intersection(bfr)]
elif self.selection_res_id_set is None:
for i, bfrft in toplevelname_index[self.lang].items():
cplns += [(i, t) for t in bfrft]
else: # ilk=None, have a selection set
for i, bfrft in toplevelname_index[self.lang].items():
cplns += [(i, t) for t, bfr in bfrft.items()
if self.selection_res_id_set.intersection(bfr)]
else:
# Use 'toplevelprefix_index':
# {lang -> ilk -> prefix -> res_id -> toplevelnames}
toplevelprefix_index = self.catalogs_zone.toplevelprefix_index
if self.lang in toplevelprefix_index:
if ilk is not None:
try:
tfr = toplevelprefix_index[self.lang][ilk][prefix]
except KeyError:
pass
else:
if self.selection_res_id_set is None:
cplns += [(ilk, t)
for toplevelnames in tfr.values()
for t in toplevelnames]
else:
cplns += [(ilk, t)
for r in self.selection_res_id_set.intersection(tfr)
for t in tfr[r]]
elif self.selection_res_id_set is None:
for i, tfrfp in toplevelprefix_index[self.lang].items():
if prefix not in tfrfp:
continue
cplns += [(i, t)
for toplevelnames in tfrfp[prefix].values()
for t in toplevelnames]
else: # ilk=None, have a selection set
for i, tfrfp in toplevelprefix_index[self.lang].items():
if prefix not in tfrfp:
continue
tfr = tfrfp[prefix]
cplns += [(i, t)
for r in self.selection_res_id_set.intersection(tfr)
for t in tfr[r]]
return cplns
#---- internal support routines
def _elem_from_scoperef(scoperef):
"""A scoperef is (<blob>, <lpath>). Return the actual elem in
the <blob> ciElementTree being referred to. Returns None if not found.
"""
elem = scoperef[0]
for lname in scoperef[1]:
try:
elem = elem.names[lname]
except KeyError:
return None
return elem
| bsd-2-clause |
CredoReference/edx-platform | cms/djangoapps/contentstore/features/grading.py | 20 | 7162 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import step, world
from nose.tools import assert_equal, assert_in, assert_not_equal
from selenium.common.exceptions import InvalidElementStateException
from common import *
from contentstore.utils import reverse_course_url
from terrain.steps import reload_the_page
@step(u'I am viewing the grading settings')
def view_grading_settings(step):
world.click_course_settings()
link_css = 'li.nav-course-settings-grading a'
world.css_click(link_css)
@step(u'I add "([^"]*)" new grade')
def add_grade(step, many):
grade_css = '.new-grade-button'
for __ in range(int(many)):
world.css_click(grade_css)
@step(u'I delete a grade')
def delete_grade(step):
#grade_css = 'li.grade-specific-bar > a.remove-button'
#range_css = '.grade-specific-bar'
#world.css_find(range_css)[1].mouseover()
#world.css_click(grade_css)
world.browser.execute_script('document.getElementsByClassName("remove-button")[0].click()')
@step(u'Grade list has "([^"]*)" grades$')
def check_grade_values(step, grade_list): # pylint: disable=unused-argument
visible_list = ''.join(
[grade.text for grade in world.css_find('.letter-grade')]
)
assert_equal(visible_list, grade_list, 'Grade lists should be equal')
@step(u'I see I now have "([^"]*)" grades$')
def view_grade_slider(step, how_many):
grade_slider_css = '.grade-specific-bar'
all_grades = world.css_find(grade_slider_css)
assert_equal(len(all_grades), int(how_many))
@step(u'I move a grading section')
def move_grade_slider(step):
moveable_css = '.ui-resizable-e'
f = world.css_find(moveable_css).first
f.action_chains.drag_and_drop_by_offset(f._element, 100, 0).perform()
@step(u'I see that the grade range has changed')
def confirm_change(step):
range_css = '.range'
all_ranges = world.css_find(range_css)
for i in range(len(all_ranges)):
assert_not_equal(world.css_html(range_css, index=i), '0-50')
@step(u'I change assignment type "([^"]*)" to "([^"]*)"$')
def change_assignment_name(step, old_name, new_name):
name_id = '#course-grading-assignment-name'
index = get_type_index(old_name)
f = world.css_find(name_id)[index]
assert_not_equal(index, -1)
for __ in xrange(len(old_name)):
f._element.send_keys(Keys.END, Keys.BACK_SPACE)
f._element.send_keys(new_name)
@step(u'I go back to the main course page')
def main_course_page(step):
main_page_link = reverse_course_url('course_handler', world.scenario_dict['COURSE'].id)
world.visit(main_page_link)
assert_in('Course Outline', world.css_text('h1.page-header'))
@step(u'I do( not)? see the assignment name "([^"]*)"$')
def see_assignment_name(step, do_not, name):
# TODO: rewrite this once grading has been added back to the course outline
pass
# assignment_menu_css = 'ul.menu > li > a'
# # First assert that it is there, make take a bit to redraw
# assert_true(
# world.css_find(assignment_menu_css),
# msg="Could not find assignment menu"
# )
#
# assignment_menu = world.css_find(assignment_menu_css)
# allnames = [item.html for item in assignment_menu]
# if do_not:
# assert_not_in(name, allnames)
# else:
# assert_in(name, allnames)
@step(u'I delete the assignment type "([^"]*)"$')
def delete_assignment_type(step, to_delete):
delete_css = '.remove-grading-data'
world.css_click(delete_css, index=get_type_index(to_delete))
@step(u'I add a new assignment type "([^"]*)"$')
def add_assignment_type(step, new_name):
add_button_css = '.add-grading-data'
world.css_click(add_button_css)
name_id = '#course-grading-assignment-name'
new_assignment = world.css_find(name_id)[-1]
new_assignment._element.send_keys(new_name)
@step(u'I set the assignment weight to "([^"]*)"$')
def set_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
weight_field = world.css_find(weight_id)[-1]
old_weight = world.css_value(weight_id, -1)
for __ in range(len(old_weight)):
weight_field._element.send_keys(Keys.END, Keys.BACK_SPACE)
weight_field._element.send_keys(weight)
@step(u'the assignment weight is displayed as "([^"]*)"$')
def verify_weight(step, weight):
weight_id = '#course-grading-assignment-gradeweight'
assert_equal(world.css_value(weight_id, -1), weight)
@step(u'I do not see the changes persisted on refresh$')
def changes_not_persisted(step):
reload_the_page(step)
name_id = '#course-grading-assignment-name'
assert_equal(world.css_value(name_id), 'Homework')
@step(u'I see the assignment type "(.*)"$')
def i_see_the_assignment_type(_step, name):
assignment_css = '#course-grading-assignment-name'
assignments = world.css_find(assignment_css)
types = [ele['value'] for ele in assignments]
assert_in(name, types)
@step(u'I change the highest grade range to "(.*)"$')
def change_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
grade.value = range_name
@step(u'I see the highest grade range is "(.*)"$')
def i_see_highest_grade_range(_step, range_name):
range_css = 'span.letter-grade'
grade = world.css_find(range_css).first
assert_equal(grade.value, range_name)
@step(u'I cannot edit the "Fail" grade range$')
def cannot_edit_fail(_step):
range_css = 'span.letter-grade'
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
# try to change the grade range -- this should throw an exception
try:
ranges.last.value = 'Failure'
except InvalidElementStateException:
pass # We should get this exception on failing to edit the element
# check to be sure that nothing has changed
ranges = world.css_find(range_css)
assert_equal(len(ranges), 2)
assert_not_equal(ranges.last.value, 'Failure')
@step(u'I change the grace period to "(.*)"$')
def i_change_grace_period(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
ele = world.css_find(grace_period_css).first
# Sometimes it takes a moment for the JavaScript
# to populate the field. If we don't wait for
# this to happen, then we can end up with
# an invalid value (e.g. "00:0048:00")
# which prevents us from saving.
assert_true(world.css_has_value(grace_period_css, "00:00"))
# Set the new grace period
ele.value = grace_period
@step(u'I see the grace period is "(.*)"$')
def the_grace_period_is(_step, grace_period):
grace_period_css = '#course-grading-graceperiod'
# The default value is 00:00
# so we need to wait for it to change
world.wait_for(
lambda _: world.css_has_value(grace_period_css, grace_period)
)
def get_type_index(name):
name_id = '#course-grading-assignment-name'
all_types = world.css_find(name_id)
for index in range(len(all_types)):
if world.css_value(name_id, index=index) == name:
return index
return -1
| agpl-3.0 |
zhaofengli/shadowsocks | shadowsocks/daemon.py | 694 | 5602 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import logging
import signal
import time
from shadowsocks import common, shell
# this module is ported from ShadowVPN daemon.c
def daemon_exec(config):
if 'daemon' in config:
if os.name != 'posix':
raise Exception('daemon mode is only supported on Unix')
command = config['daemon']
if not command:
command = 'start'
pid_file = config['pid-file']
log_file = config['log-file']
if command == 'start':
daemon_start(pid_file, log_file)
elif command == 'stop':
daemon_stop(pid_file)
# always exit after daemon_stop
sys.exit(0)
elif command == 'restart':
daemon_stop(pid_file)
daemon_start(pid_file, log_file)
else:
raise Exception('unsupported daemon command %s' % command)
def write_pid_file(pid_file, pid):
import fcntl
import stat
try:
fd = os.open(pid_file, os.O_RDWR | os.O_CREAT,
stat.S_IRUSR | stat.S_IWUSR)
except OSError as e:
shell.print_exception(e)
return -1
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
assert flags != -1
flags |= fcntl.FD_CLOEXEC
r = fcntl.fcntl(fd, fcntl.F_SETFD, flags)
assert r != -1
# There is no platform independent way to implement fcntl(fd, F_SETLK, &fl)
# via fcntl.fcntl. So use lockf instead
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET)
except IOError:
r = os.read(fd, 32)
if r:
logging.error('already started at pid %s' % common.to_str(r))
else:
logging.error('already started')
os.close(fd)
return -1
os.ftruncate(fd, 0)
os.write(fd, common.to_bytes(str(pid)))
return 0
def freopen(f, mode, stream):
oldf = open(f, mode)
oldfd = oldf.fileno()
newfd = stream.fileno()
os.close(newfd)
os.dup2(oldfd, newfd)
def daemon_start(pid_file, log_file):
def handle_exit(signum, _):
if signum == signal.SIGTERM:
sys.exit(0)
sys.exit(1)
signal.signal(signal.SIGINT, handle_exit)
signal.signal(signal.SIGTERM, handle_exit)
# fork only once because we are sure parent will exit
pid = os.fork()
assert pid != -1
if pid > 0:
# parent waits for its child
time.sleep(5)
sys.exit(0)
# child signals its parent to exit
ppid = os.getppid()
pid = os.getpid()
if write_pid_file(pid_file, pid) != 0:
os.kill(ppid, signal.SIGINT)
sys.exit(1)
os.setsid()
signal.signal(signal.SIGHUP, signal.SIG_IGN)
print('started')
os.kill(ppid, signal.SIGTERM)
sys.stdin.close()
try:
freopen(log_file, 'a', sys.stdout)
freopen(log_file, 'a', sys.stderr)
except IOError as e:
shell.print_exception(e)
sys.exit(1)
def daemon_stop(pid_file):
import errno
try:
with open(pid_file) as f:
buf = f.read()
pid = common.to_str(buf)
if not buf:
logging.error('not running')
except IOError as e:
shell.print_exception(e)
if e.errno == errno.ENOENT:
# always exit 0 if we are sure daemon is not running
logging.error('not running')
return
sys.exit(1)
pid = int(pid)
if pid > 0:
try:
os.kill(pid, signal.SIGTERM)
except OSError as e:
if e.errno == errno.ESRCH:
logging.error('not running')
# always exit 0 if we are sure daemon is not running
return
shell.print_exception(e)
sys.exit(1)
else:
logging.error('pid is not positive: %d', pid)
# sleep for maximum 10s
for i in range(0, 200):
try:
# query for the pid
os.kill(pid, 0)
except OSError as e:
if e.errno == errno.ESRCH:
break
time.sleep(0.05)
else:
logging.error('timed out when stopping pid %d', pid)
sys.exit(1)
print('stopped')
os.unlink(pid_file)
def set_user(username):
if username is None:
return
import pwd
import grp
try:
pwrec = pwd.getpwnam(username)
except KeyError:
logging.error('user not found: %s' % username)
raise
user = pwrec[0]
uid = pwrec[2]
gid = pwrec[3]
cur_uid = os.getuid()
if uid == cur_uid:
return
if cur_uid != 0:
logging.error('can not set user as nonroot user')
# will raise later
# inspired by supervisor
if hasattr(os, 'setgroups'):
groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]]
groups.insert(0, gid)
os.setgroups(groups)
os.setgid(gid)
os.setuid(uid)
| apache-2.0 |
centaurialpha/ninja-ide | ninja_ide/intellisensei/analyzer/analyzer_daemon.py | 1 | 13881 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
import os
import time
from threading import Thread, Lock
from multiprocessing import Process, Queue
from ninja_ide.intellisensei.analyzer import model
from ninja_ide.intellisensei.analyzer import analyzer
try:
unicode
except NameError:
# Python 3
basestring = unicode = str # lint:ok
__completion_daemon_instance = None
WAITING_BEFORE_START = 5
PROJECTS = {}
def CompletionDaemon():
global __completion_daemon_instance
if __completion_daemon_instance is None:
__completion_daemon_instance = __CompletionDaemon()
__completion_daemon_instance.start()
return __completion_daemon_instance
class __CompletionDaemon(Thread):
def __init__(self):
Thread.__init__(self)
self.analyzer = analyzer.Analyzer()
self.modules = {}
self.projects_modules = {}
self._relations = {}
self.reference_counter = 0
self.keep_alive = True
self.lock = Lock()
self.queue_receive = Queue()
self.queue_send = Queue()
self.daemon = _DaemonProcess(self.queue_send, self.queue_receive)
self.daemon.start()
def run(self):
global WAITING_BEFORE_START
time.sleep(WAITING_BEFORE_START)
while self.keep_alive:
path_id, module, resolve = self.queue_receive.get()
if path_id is None:
continue
self.lock.acquire()
self.modules[path_id] = module
self.lock.release()
if resolve:
resolution = self._resolve_with_other_modules(resolve)
self._relations[path_id] = []
for package in resolution:
self._relations[path_id].append(resolution[package])
self.queue_send.put((path_id, module, False, resolution))
def _resolve_with_other_modules(self, packages):
resolution = {}
for package in packages:
if package.find('(') != -1:
package = package[:package.index('(')]
if self.projects_modules.get(package, False):
folder = self.projects_modules[package]
filename = os.path.join(folder, '__init__.py')
if self._analyze_file(filename):
resolution[package] = filename
elif self.projects_modules.get(package.rsplit('.', 1)[0], False):
name = package.rsplit('.', 1)
folder = self.projects_modules[name[0]]
filename = "%s.py" % os.path.join(folder, name[1])
if os.path.isfile(filename):
if self._analyze_file(filename):
resolution[package] = filename
elif self.projects_modules.get(package.rsplit('.', 2)[0], False):
name = package.rsplit('.', 2)
folder = self.projects_modules[name[0]]
filename = "%s.py" % os.path.join(folder, name[1])
if os.path.isfile(filename):
if self._analyze_file(filename):
resolution[package.rsplit('.', 1)[0]] = filename
return resolution
def _analyze_file(self, filename):
try:
if filename not in self.modules:
source = ''
with open(filename) as f:
source = f.read()
module = self.analyzer.analyze(source)
self.inspect_module(filename, module, False)
return True
except Exception as reason:
print(reason)
return False
def unload_module(self, path_id):
relations = self._relations.pop(path_id, None)
if relations is not None:
relations.append(path_id)
for module in relations:
valid = False
for rel in self._relations:
other_modules = self._relations[rel]
if module in other_modules:
valid = True
if not valid:
self.modules.pop(module, None)
def process_path(self):
for project in PROJECTS:
if PROJECTS[project]:
continue
project = os.path.abspath(project)
package = os.path.basename(project)
self.projects_modules[package] = project
for root, dirs, files in os.walk(project, followlinks=True):
if '__init__.py' in files:
package = root[len(project) + 1:].replace(
os.path.sep, '.')
self.projects_modules[package] = root
def inspect_module(self, path_id, module, recursive=True):
self.lock.acquire()
self.modules[path_id] = module
self.lock.release()
self.queue_send.put((path_id, module, recursive, None))
def get_module(self, path_id):
return self.modules.get(path_id, None)
def _shutdown_process(self):
self.queue_send.put((None, None, None, None))
self.daemon.terminate()
self.queue_receive.put((None, None, None))
def force_stop(self):
self.keep_alive = False
self._shutdown_process()
for project in PROJECTS:
PROJECTS[project] = False
if self.is_alive():
self.join()
class _DaemonProcess(Process):
def __init__(self, queue_receive, queue_send):
super(_DaemonProcess, self).__init__()
self.queue_receive = queue_receive
self.queue_send = queue_send
self.iteration = 0
self.packages = []
def run(self):
while True:
self.iteration = 0
path_id, module, recursive, resolution = self.queue_receive.get()
if path_id is None and module is None:
break
try:
if resolution is not None:
self.packages = resolution
self.iteration = 2
self._resolve_module(module)
elif module.need_resolution():
self._resolve_module(module)
self.iteration = 1
self._resolve_module(module)
else:
continue
if self.packages and recursive:
self.queue_send.put((path_id, module, self.packages))
else:
self.queue_send.put((path_id, module, []))
except Exception as reason:
# Try to not die whatever happend
message = 'Daemon Fail with: %r', reason
print(message)
raise
finally:
self.packages = []
def _resolve_module(self, module):
self._resolve_attributes(module, module)
self._resolve_functions(module, module)
for cla in module.classes:
clazz = module.classes[cla]
self._resolve_inheritance(clazz, module)
self._resolve_attributes(clazz, module)
self._resolve_functions(clazz, module)
def _resolve_inheritance(self, clazz, module):
for base in clazz.bases:
name = base.split('.', 1)
main_attr = name[0]
child_attrs = ''
if len(name) == 2:
child_attrs = name[1]
result = module.get_type(main_attr, child_attrs)
data = model.late_resolution
if result.get('found', True):
data_type = module.imports[main_attr].get_data_type()
if child_attrs:
child_attrs = '.%s' % child_attrs
name = '%s%s().' % (data_type, child_attrs)
imports = module.get_imports()
imports = [imp.split('.')[0] for imp in imports]
data = completer.get_all_completions(name, imports)
data = (name, data)
elif result.get('object', False).__class__ is model.Clazz:
data = result['object']
clazz.bases[base] = data
clazz.update_with_parent_data()
def _resolve_functions(self, structure, module):
if structure.__class__ is model.Assign:
return
for func in structure.functions:
function = structure.functions[func]
self._resolve_attributes(function, module)
self._resolve_functions(function, module)
self._resolve_returns(function, module)
def _resolve_returns(self, structure, module):
if structure.__class__ is model.Assign:
return
self._resolve_types(structure.return_type, module, structure, 'return')
def _resolve_attributes(self, structure, module):
if structure.__class__ is model.Assign:
return
for attr in structure.attributes:
assign = structure.attributes[attr]
self._resolve_types(assign.data, module, assign)
def _resolve_types(self, types, module, structure=None, split_by='='):
if self.iteration == 0:
self._resolve_with_imports(types, module, split_by)
self._resolve_with_local_names(types, module, split_by)
elif self.iteration == 1:
self._resolve_with_local_vars(types, module, split_by, structure)
else:
self._resolve_with_linked_modules(types, module, structure)
def _resolve_with_linked_modules(self, types, module, structure):
for data in types:
name = data.data_type
if not isinstance(name, basestring):
continue
for package in self.packages:
if name.startswith(package):
to_resolve = name[len(package):]
if to_resolve and to_resolve[0] == '.':
to_resolve = to_resolve[1:]
path = self.packages[package]
linked = model.LinkedModule(path, to_resolve)
data.data_type = linked
break
def _resolve_with_imports(self, types, module, splitby):
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].strip().split('.')
name = value[0]
extra = ''
if name.find('(') != -1:
extra = name[name.index('('):]
name = name[:name.index('(')]
if name in module.imports:
value[0] = module.imports[name].data_type
package = '.'.join(value)
resolve = "%s%s" % (package, extra)
data.data_type = resolve
self.packages.append(package)
def _resolve_with_local_names(self, types, module, splitby):
#TODO: resolve with functions returns
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].split('(')[0].strip()
if value in module.classes:
clazz = module.classes[value]
data.data_type = clazz
def _resolve_with_local_vars(self, types, module, splitby, structure=None):
for data in types:
if data.data_type != model.late_resolution:
continue
line = data.line_content
value = line.split(splitby)[1].split('(')[0].strip()
sym = value.split('.')
if len(sym) != 0:
main_attr = sym[0]
if len(sym) > 2:
child_attr = '.'.join(sym[1:])
elif len(sym) == 2:
child_attr = sym[1]
else:
child_attr = ''
scope = []
self._get_scope(structure, scope)
if structure.__class__ is model.Assign:
scope.pop(0)
scope.reverse()
result = module.get_type(main_attr, child_attr, scope)
data_type = model.late_resolution
if isinstance(result['type'], basestring) and len(result) < 3:
if child_attr and \
structure.__class__ is not model.Function:
data_type = "%s.%s" % (result['type'], child_attr)
else:
data_type = result['type']
elif result.get('object', False):
data_type = result['object']
if data is not None:
data.data_type = data_type
def _get_scope(self, structure, scope):
if structure.__class__ not in (None, model.Module):
scope.append(structure.name)
self._get_scope(structure.parent, scope)
def shutdown_daemon():
daemon = CompletionDaemon()
daemon.force_stop()
global __completion_daemon_instance
__completion_daemon_instance = None
def add_project_folder(project_path):
global PROJECTS
if project_path not in PROJECTS:
PROJECTS[project_path] = False
daemon = CompletionDaemon()
daemon.process_path()
| gpl-3.0 |
SSG-DRD-IOT/commercial-iot-security-system | opencv/tutorials/featureDetection/harris_corner/subpixel.py | 1 | 1331 | """
Corner with Subpixel Accuracy
may need to find corners with max accuracy
function: cv2.cornerSubPix()
refines corners detected with sub-pixel accuracy
find Harris corners first
then, pass centroids of corners to refine them
"""
# ex
# harris corners marked with red pixels
# refined corners marked in green pixels
# have to define criteria when to stop iteration
# stop after specified # of iteration or a certain accuracy achieved, whichever occurs first
# define size of nbhd it should search for corners
import cv2
import numpy as np
filename = 'chessboard2.jpg'
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find Harris corners
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 3, 0.04)
dst = cv2.dilate(dst, None)
ret, dst = cv2.threshold(dst, 0.01*dst.max(), 255, 0)
dst = np.uint8(dst)
# find centroids
ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)
# define the criteria to stop and refine the corners
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
corners = cv2.cornerSubPix(gray, np.float32(centroids), (5,5), (-1, -1), criteria)
# now draw them
res = np.hstack((centroids, corners))
res = np.int0(res)
img[res[:,1], res[:, 0]] = [0, 0, 255]
img[res[:,3], res[:, 2]] = [0, 255, 0]
cv2.imwrite('subpixel5.png', img)
| mit |
mitchelljkotler/django | django/contrib/gis/gdal/driver.py | 526 | 3260 | from ctypes import c_void_p
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.gdal.prototypes import ds as vcapi, raster as rcapi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class Driver(GDALBase):
"""
Wraps a GDAL/OGR Data Source Driver.
For more information, see the C API source code:
http://www.gdal.org/gdal_8h.html - http://www.gdal.org/ogr__api_8h.html
"""
# Case-insensitive aliases for some GDAL/OGR Drivers.
# For a complete list of original driver names see
# http://www.gdal.org/ogr_formats.html (vector)
# http://www.gdal.org/formats_list.html (raster)
_alias = {
# vector
'esri': 'ESRI Shapefile',
'shp': 'ESRI Shapefile',
'shape': 'ESRI Shapefile',
'tiger': 'TIGER',
'tiger/line': 'TIGER',
# raster
'tiff': 'GTiff',
'tif': 'GTiff',
'jpeg': 'JPEG',
'jpg': 'JPEG',
}
def __init__(self, dr_input):
"""
Initializes an GDAL/OGR driver on either a string or integer input.
"""
if isinstance(dr_input, six.string_types):
# If a string name of the driver was passed in
self.ensure_registered()
# Checking the alias dictionary (case-insensitive) to see if an
# alias exists for the given driver.
if dr_input.lower() in self._alias:
name = self._alias[dr_input.lower()]
else:
name = dr_input
# Attempting to get the GDAL/OGR driver by the string name.
for iface in (vcapi, rcapi):
driver = iface.get_driver_by_name(force_bytes(name))
if driver:
break
elif isinstance(dr_input, int):
self.ensure_registered()
for iface in (vcapi, rcapi):
driver = iface.get_driver(dr_input)
if driver:
break
elif isinstance(dr_input, c_void_p):
driver = dr_input
else:
raise GDALException('Unrecognized input type for GDAL/OGR Driver: %s' % str(type(dr_input)))
# Making sure we get a valid pointer to the OGR Driver
if not driver:
raise GDALException('Could not initialize GDAL/OGR Driver on input: %s' % str(dr_input))
self.ptr = driver
def __str__(self):
return self.name
@classmethod
def ensure_registered(cls):
"""
Attempts to register all the data source drivers.
"""
# Only register all if the driver count is 0 (or else all drivers
# will be registered over and over again)
if not cls.driver_count():
vcapi.register_all()
rcapi.register_all()
@classmethod
def driver_count(cls):
"""
Returns the number of GDAL/OGR data source drivers registered.
"""
return vcapi.get_driver_count() + rcapi.get_driver_count()
@property
def name(self):
"""
Returns description/name string for this driver.
"""
return force_text(rcapi.get_driver_description(self.ptr))
| bsd-3-clause |
MinimalOS/external_chromium_org_third_party_skia | experimental/benchtools/rebase.py | 18 | 11503 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""rebase.py: standalone script to batch update bench expectations.
Requires gsutil to access gs://chromium-skia-gm and Rietveld credentials.
Usage:
Copy script to a separate dir outside Skia repo. The script will create a
skia dir on the first run to host the repo, and will create/delete
temp dirs as needed.
./rebase.py --githash <githash prefix to use for getting bench data>
"""
import argparse
import filecmp
import os
import re
import shutil
import subprocess
import time
import urllib2
# googlesource url that has most recent Skia git hash info.
SKIA_GIT_HEAD_URL = 'https://skia.googlesource.com/skia/+log/HEAD'
# Google Storage bench file prefix.
GS_PREFIX = 'gs://chromium-skia-gm/perfdata'
# Regular expression for matching githash data.
HA_RE = '<a href="/skia/\+/([0-9a-f]+)">'
HA_RE_COMPILED = re.compile(HA_RE)
def get_git_hashes():
print 'Getting recent git hashes...'
hashes = HA_RE_COMPILED.findall(
urllib2.urlopen(SKIA_GIT_HEAD_URL).read())
return hashes
def filter_file(f):
if f.find('_msaa') > 0 or f.find('_record') > 0:
return True
return False
def clean_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def get_gs_filelist(p, h):
print 'Looking up for the closest bench files in Google Storage...'
proc = subprocess.Popen(['gsutil', 'ls',
'/'.join([GS_PREFIX, p, 'bench_' + h + '_data_skp_*'])],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err or not out:
return []
return [i for i in out.strip().split('\n') if not filter_file(i)]
def download_gs_files(p, h, gs_dir):
print 'Downloading raw bench files from Google Storage...'
proc = subprocess.Popen(['gsutil', 'cp',
'/'.join([GS_PREFIX, p, 'bench_' + h + '_data_skp_*']),
'%s/%s' % (gs_dir, p)],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err:
clean_dir(gs_dir)
return False
files = 0
for f in os.listdir(os.path.join(gs_dir, p)):
if filter_file(f):
os.remove(os.path.join(gs_dir, p, f))
else:
files += 1
if files:
return True
return False
def get_expectations_dict(f):
"""Given an expectations file f, returns a dictionary of data."""
# maps row_key to (expected, lower_bound, upper_bound) float tuple.
dic = {}
for l in open(f).readlines():
line_parts = l.strip().split(',')
if line_parts[0].startswith('#') or len(line_parts) != 5:
continue
dic[','.join(line_parts[:2])] = (float(line_parts[2]), float(line_parts[3]),
float(line_parts[4]))
return dic
def calc_expectations(p, h, gs_dir, exp_dir, repo_dir, extra_dir, extra_hash):
exp_filename = 'bench_expectations_%s.txt' % p
exp_fullname = os.path.join(exp_dir, exp_filename)
proc = subprocess.Popen(['python', 'skia/bench/gen_bench_expectations.py',
'-r', h, '-b', p, '-d', os.path.join(gs_dir, p), '-o', exp_fullname],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err:
print 'ERR_CALCULATING_EXPECTATIONS: ' + err
return False
print 'CALCULATED_EXPECTATIONS: ' + out
if extra_dir: # Adjust data with the ones in extra_dir
print 'USE_EXTRA_DATA_FOR_ADJUSTMENT.'
proc = subprocess.Popen(['python', 'skia/bench/gen_bench_expectations.py',
'-r', extra_hash, '-b', p, '-d', os.path.join(extra_dir, p), '-o',
os.path.join(extra_dir, exp_filename)],
stdout=subprocess.PIPE)
out, err = proc.communicate()
if err:
print 'ERR_CALCULATING_EXTRA_EXPECTATIONS: ' + err
return False
extra_dic = get_expectations_dict(os.path.join(extra_dir, exp_filename))
output_lines = []
for l in open(exp_fullname).readlines():
parts = l.strip().split(',')
if parts[0].startswith('#') or len(parts) != 5:
output_lines.append(l.strip())
continue
key = ','.join(parts[:2])
if key in extra_dic:
exp, lb, ub = (float(parts[2]), float(parts[3]), float(parts[4]))
alt, _, _ = extra_dic[key]
avg = (exp + alt) / 2
# Keeps the extra range in lower/upper bounds from two actual values.
new_lb = min(exp, alt) - (exp - lb)
new_ub = max(exp, alt) + (ub - exp)
output_lines.append('%s,%.2f,%.2f,%.2f' % (key, avg, new_lb, new_ub))
else:
output_lines.append(l.strip())
with open(exp_fullname, 'w') as f:
f.write('\n'.join(output_lines))
repo_file = os.path.join(repo_dir, 'expectations', 'bench', exp_filename)
if (os.path.isfile(repo_file) and
filecmp.cmp(repo_file, os.path.join(exp_dir, exp_filename))):
print 'NO CHANGE ON %s' % repo_file
return False
return True
def checkout_or_update_skia(repo_dir):
status = True
old_cwd = os.getcwd()
os.chdir(repo_dir)
print 'CHECK SKIA REPO...'
if subprocess.call(['git', 'pull'],
stderr=subprocess.PIPE):
print 'Checking out Skia from git, please be patient...'
os.chdir(old_cwd)
clean_dir(repo_dir)
os.chdir(repo_dir)
if subprocess.call(['git', 'clone', '-q', '--depth=50', '--single-branch',
'https://skia.googlesource.com/skia.git', '.']):
status = False
subprocess.call(['git', 'checkout', 'master'])
subprocess.call(['git', 'pull'])
os.chdir(old_cwd)
return status
def git_commit_expectations(repo_dir, exp_dir, update_li, h, commit,
extra_hash):
if extra_hash:
extra_hash = ', adjusted with ' + extra_hash
commit_msg = """manual bench rebase after %s%s
TBR=robertphillips@google.com
Bypassing trybots:
NOTRY=true""" % (h, extra_hash)
old_cwd = os.getcwd()
os.chdir(repo_dir)
upload = ['git', 'cl', 'upload', '-f', '--bypass-hooks',
'--bypass-watchlists', '-m', commit_msg]
branch = exp_dir.split('/')[-1]
if commit:
upload.append('--use-commit-queue')
cmds = ([['git', 'checkout', 'master'],
['git', 'pull'],
['git', 'checkout', '-b', branch, '-t', 'origin/master']] +
[['cp', '%s/%s' % (exp_dir, f), 'expectations/bench'] for f in
update_li] +
[['git', 'add'] + ['expectations/bench/%s' % i for i in update_li],
['git', 'commit', '-m', commit_msg],
upload,
['git', 'checkout', 'master'],
['git', 'branch', '-D', branch],
])
status = True
for cmd in cmds:
print 'Running ' + ' '.join(cmd)
if subprocess.call(cmd):
print 'FAILED. Please check if skia git repo is present.'
subprocess.call(['git', 'checkout', 'master'])
status = False
break
os.chdir(old_cwd)
return status
def delete_dirs(li):
for d in li:
print 'Deleting directory %s' % d
shutil.rmtree(d)
def main():
d = os.path.dirname(os.path.abspath(__file__))
os.chdir(d)
if not subprocess.call(['git', 'rev-parse'], stderr=subprocess.PIPE):
print 'Please copy script to a separate dir outside git repos to use.'
return
parser = argparse.ArgumentParser()
parser.add_argument('--githash',
help=('Githash prefix (7+ chars) to rebaseline to. If '
'a second one is supplied after comma, and it has '
'corresponding bench data, will shift the range '
'center to the average of two expected values.'))
parser.add_argument('--bots',
help=('Comma-separated list of bots to work on. If no '
'matching bots are found in the list, will default '
'to processing all bots.'))
parser.add_argument('--commit', action='store_true',
help='Whether to commit changes automatically.')
args = parser.parse_args()
repo_dir = os.path.join(d, 'skia')
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
if not checkout_or_update_skia(repo_dir):
print 'ERROR setting up Skia repo at %s' % repo_dir
return 1
file_in_repo = os.path.join(d, 'skia/experimental/benchtools/rebase.py')
if not filecmp.cmp(__file__, file_in_repo):
shutil.copy(file_in_repo, __file__)
print 'Updated this script from repo; please run again.'
return
all_platforms = [] # Find existing list of platforms with expectations.
for item in os.listdir(os.path.join(d, 'skia/expectations/bench')):
all_platforms.append(
item.replace('bench_expectations_', '').replace('.txt', ''))
platforms = []
# If at least one given bot is in all_platforms, use list of valid args.bots.
if args.bots:
bots = args.bots.strip().split(',')
for bot in bots:
if bot in all_platforms: # Filters platforms with given bot list.
platforms.append(bot)
if not platforms: # Include all existing platforms with expectations.
platforms = all_platforms
if not args.githash or len(args.githash) < 7:
raise Exception('Please provide --githash with a longer prefix (7+).')
githashes = args.githash.strip().split(',')
if len(githashes[0]) < 7:
raise Exception('Please provide --githash with longer prefixes (7+).')
commit = False
if args.commit:
commit = True
rebase_hash = githashes[0][:7]
extra_hash = ''
if len(githashes) == 2:
extra_hash = githashes[1][:7]
hashes = get_git_hashes()
short_hashes = [h[:7] for h in hashes]
if (rebase_hash not in short_hashes or
(extra_hash and extra_hash not in short_hashes) or
rebase_hash == extra_hash):
raise Exception('Provided --githashes not found, or identical!')
if extra_hash:
extra_hash = hashes[short_hashes.index(extra_hash)]
hashes = hashes[:short_hashes.index(rebase_hash) + 1]
update_li = []
ts_str = '%s' % time.time()
gs_dir = os.path.join(d, 'gs' + ts_str)
exp_dir = os.path.join(d, 'exp' + ts_str)
extra_dir = os.path.join(d, 'extra' + ts_str)
clean_dir(gs_dir)
clean_dir(exp_dir)
clean_dir(extra_dir)
for p in platforms:
clean_dir(os.path.join(gs_dir, p))
clean_dir(os.path.join(extra_dir, p))
hash_to_use = ''
for h in reversed(hashes):
li = get_gs_filelist(p, h)
if not len(li): # no data
continue
if download_gs_files(p, h, gs_dir):
print 'Copied %s/%s' % (p, h)
hash_to_use = h
break
else:
print 'DOWNLOAD BENCH FAILED %s/%s' % (p, h)
break
if hash_to_use:
if extra_hash and download_gs_files(p, extra_hash, extra_dir):
print 'Copied extra data %s/%s' % (p, extra_hash)
if calc_expectations(p, h, gs_dir, exp_dir, repo_dir, extra_dir,
extra_hash):
update_li.append('bench_expectations_%s.txt' % p)
elif calc_expectations(p, h, gs_dir, exp_dir, repo_dir, '', ''):
update_li.append('bench_expectations_%s.txt' % p)
if not update_li:
print 'No bench data to update after %s!' % args.githash
elif not git_commit_expectations(
repo_dir, exp_dir, update_li, rebase_hash, commit, extra_hash):
print 'ERROR uploading expectations using git.'
elif not commit:
print 'CL created. Please take a look at the link above.'
else:
print 'New bench baselines should be in CQ now.'
delete_dirs([gs_dir, exp_dir, extra_dir])
if __name__ == "__main__":
main()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.