gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""Python support for CodeIntel"""
import os
from os.path import (isfile, isdir, exists, dirname, splitext,
join, basename, normcase)
import sys
import logging
import random
import parser
from glob import glob
import weakref
import re
import imp
from pprint import pprint, pformat
import itertools
import SilverCity
from SilverCity.Lexer import Lexer
from SilverCity import ScintillaConstants
from SilverCity.Keywords import python_keywords
from codeintel2.common import *
from codeintel2.citadel import (CitadelBuffer, CitadelEvaluator, ImportHandler,
CitadelLangIntel)
from codeintel2.indexer import PreloadLibRequest
from codeintel2 import pythoncile
from codeintel2.util import (banner, indent, markup_text, isident, isdigit,
makePerformantLogger)
from codeintel2 import tree
from codeintel2.tree_python import PythonTreeEvaluator, PythonImportLibGenerator
from codeintel2.langintel import (ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin,
PythonCITDLExtractorMixin)
from codeintel2.tree import tree_from_cix
if _xpcom_:
from xpcom.server import UnwrapObject
#---- globals
_SCAN_BINARY_FILES = False
lang = "Python"
log = logging.getLogger("codeintel.python")
# log.setLevel(logging.DEBUG)
makePerformantLogger(log)
CACHING = True # DEPRECATED: kill it
# See http://effbot.org/zone/pythondoc.htm
_g_pythondoc_tags = list(sorted("param keyparam return exception def "
"defreturn see link linkplain".split()))
_g_python_magic_method_names = sorted([
'__init__',
'__new__',
'__del__',
'__repr__',
'__str__',
'__lt__',
'__le__',
'__eq__',
'__ne__',
'__gt__',
'__ge__',
'__cmp__',
'__rcmp__',
'__hash__',
'__nonzero__',
'__unicode__',
# Attribute access
'__getattr__',
'__setattr__',
'__delattr__',
# New style classes
'__getattribute__',
'__call__',
# Sequence classes
'__len__',
'__getitem__',
'__setitem__',
'__delitem__',
'__iter__',
'__reversed__',
'__contains__',
'__getslice__',
'__setslice__',
'__delslice__',
# Integer like operators
'__add__',
'__sub__',
'__mul__',
'__floordiv__',
'__mod__',
'__divmod__',
'__pow__',
'__lshift__',
'__rshift__',
'__and__',
'__xor__',
'__or__',
'__div__',
'__truediv__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rtruediv__',
'__rfloordiv__',
'__rmod__',
'__rdivmod__',
'__rpow__',
'__rlshift__',
'__rrshift__',
'__rand__',
'__rxor__',
'__ror__',
'__iadd__',
'__isub__',
'__imul__',
'__idiv__',
'__itruediv__',
'__ifloordiv__',
'__imod__',
'__ipow__',
'__ilshift__',
'__irshift__',
'__iand__',
'__ixor__',
'__ior__',
'__neg__',
'__pos__',
'__abs__',
'__invert__',
'__complex__',
'__int__',
'__long__',
'__float__',
'__oct__',
'__hex__',
'__index__',
'__coerce__',
# Context managers
'__enter__',
'__exit__',
])
#---- language support
class PythonLexer(Lexer):
lang = lang
def __init__(self):
self._properties = SilverCity.PropertySet()
self._lexer = SilverCity.find_lexer_module_by_id(
ScintillaConstants.SCLEX_PYTHON)
self._keyword_lists = [
SilverCity.WordList(python_keywords),
SilverCity.WordList(""), # hilighted identifiers
]
class PythonImportsEvaluator(Evaluator):
lang = lang
def __str__(self):
return "Python imports"
def eval(self, mgr):
try:
imp_prefix = tuple(self.trg.extra["imp_prefix"])
if imp_prefix:
libs = self.buf.libs
if not imp_prefix[0]:
if not imp_prefix[-1]:
# Deal with last item being empty, i.e. "from ."
imp_prefix = imp_prefix[:-1]
lookuppath = self.buf.path
while imp_prefix and not imp_prefix[0]:
lookuppath = dirname(lookuppath)
imp_prefix = imp_prefix[1:]
libs = [mgr.db.get_lang_lib(self.lang, "curdirlib",
[lookuppath])]
else:
# We use a special lib generator - that will lazily load
# additional directory libs when there are no matches found.
# This is a smart import facility - to detect imports from
# a parent directory when they are not explicitly on the
# included path list, quite common for Django and other
# Python frameworks that mangle the sys.path at runtime.
libs = PythonImportLibGenerator(mgr, self.lang,
self.buf.path, imp_prefix,
libs)
self.ctlr.set_desc("subimports of '%s'" % '.'.join(imp_prefix))
cplns = []
for lib in libs:
imports = lib.get_blob_imports(imp_prefix)
if imports:
cplns.extend(
((is_dir_import and "directory" or "module"), name)
for name, is_dir_import in imports
)
if self.trg.type == "module-members":
# Also add top-level members of the specified module.
dotted_prefix = '.'.join(imp_prefix)
if lib.has_blob(dotted_prefix):
blob = lib.get_blob(dotted_prefix)
for name in blob.names:
elem = blob.names[name]
cplns.append((elem.get(
"ilk") or elem.tag, name))
# TODO: Consider using the value of __all__
# if defined.
for e in blob:
attrs = e.get("attributes", "").split()
if "__hidden__" not in attrs:
try:
cplns += self._members_from_elem(
e, mgr)
except CodeIntelError as ex:
log.warn(
"%s (skipping members for %s)",
ex, e)
if cplns:
break
if cplns:
cplns = list(set(cplns)) # remove duplicates
else:
self.ctlr.set_desc("available imports")
all_imports = set()
for lib in self.buf.libs:
all_imports.update(lib.get_blob_imports(imp_prefix))
cplns = [((is_dir_import and "directory" or "module"), name)
for name, is_dir_import in all_imports]
if cplns:
cplns.sort(key=lambda i: i[1].upper())
self.ctlr.set_cplns(cplns)
finally:
self.ctlr.done("success")
# XXX: This function is shamelessly copy/pasted from
# tree_python.py:PythonTreeEvaluator because there was no clear
# way to reuse this shared functionality. See another XXX below, though.
def _members_from_elem(self, elem, mgr):
"""Return the appropriate set of autocomplete completions for
the given element. Typically this is just one, but can be more for
'*'-imports
"""
members = set()
if elem.tag == "import":
alias = elem.get("alias")
symbol_name = elem.get("symbol")
module_name = elem.get("module")
if symbol_name:
import_handler = mgr.citadel.import_handler_from_lang(
self.trg.lang)
try:
blob = import_handler.import_blob_name(
module_name, self.buf.libs, self.ctlr)
except:
log.warn(
"limitation in handling imports in imported modules")
raise
if symbol_name == "*": # can it be so?
for m_name, m_elem in list(blob.names.items()):
m_type = m_elem.get("ilk") or m_elem.tag
members.add((m_type, m_name))
elif symbol_name in blob.names:
symbol = blob.names[symbol_name]
member_type = (symbol.get("ilk") or symbol.tag)
members.add((member_type, alias or symbol_name))
else:
# To correctly determine the type, we'd need to
# examine all the imports of this blob, and then see
# if any of those imports match the name... which is
# better left to the tree evaluator (tree_python).
#
# For now, we just add it as an unknown type.
members.add(('unknown', alias or symbol_name))
log.info(
"could not resolve symbol %r on %r, added as 'unknown'",
symbol_name, module_name)
else:
cpln_name = alias or module_name.split('.', 1)[0]
members.add(("module", cpln_name))
else:
members.add((elem.get("ilk") or elem.tag, elem.get("name")))
return members
class PythonLangIntel(CitadelLangIntel, ParenStyleCalltipIntelMixin,
ProgLangTriggerIntelMixin,
PythonCITDLExtractorMixin):
lang = lang
interpreterPrefName = "python"
extraPathsPrefName = "pythonExtraPaths"
# Used by ProgLangTriggerIntelMixin.preceding_trg_from_pos().
trg_chars = tuple(" (.")
citdl_from_literal_type = {"string": "str"}
@LazyClassAttribute
def keywords(self):
from SilverCity.Keywords import python_keywords
return python_keywords.split(" ")
def async_eval_at_trg(self, buf, trg, ctlr):
if _xpcom_:
trg = UnwrapObject(trg)
ctlr = UnwrapObject(ctlr)
ctlr.start(buf, trg)
if trg.type in ("object-members", "call-signature",
"literal-members") or \
trg.form == TRG_FORM_DEFN:
line = buf.accessor.line_from_pos(trg.pos)
if trg.type == "literal-members":
# We could leave this to citdl_expr_from_trg, but this is a
# little bit faster, since we already know the citdl expr.
citdl_expr = trg.extra.get("citdl_expr")
else:
try:
citdl_expr = self.citdl_expr_from_trg(buf, trg)
except CodeIntelError as ex:
ctlr.error(str(ex))
ctlr.done("error")
return
evalr = PythonTreeEvaluator(ctlr, buf, trg, citdl_expr, line)
buf.mgr.request_eval(evalr)
elif trg.id == (self.lang, TRG_FORM_CPLN, "local-symbols"):
line = buf.accessor.line_from_pos(trg.pos)
citdl_expr = trg.extra.get("citdl_expr")
evalr = PythonTreeEvaluator(ctlr, buf, trg, citdl_expr, line)
buf.mgr.request_eval(evalr)
elif trg.id == (self.lang, TRG_FORM_CPLN, "magic-symbols"):
symbolstype = trg.extra.get("symbolstype")
cplns = []
if symbolstype == "string":
cplns = [("variable", "__main__")]
elif symbolstype == "def":
posttext = trg.extra.get("posttext", "")
posttext = posttext.split("\n", 1)[0]
if posttext and "(" in posttext:
cplns = [(
"function", t) for t in _g_python_magic_method_names]
else:
cplns = [(
"function", t + "(self") for t in _g_python_magic_method_names]
elif symbolstype == "global":
text = trg.extra.get("text")
if text.endswith("if"):
# Add the extended name version.
cplns = [("variable", t) for t in (
"__file__", "__loader__", "__name__ == '__main__':", "__package__")]
else:
cplns = [("variable", t) for t in (
"__file__", "__loader__", "__name__", "__package__")]
ctlr.set_cplns(cplns)
ctlr.done("success")
elif trg.id == (self.lang, TRG_FORM_CPLN, "pythondoc-tags"):
# TODO: Would like a "tag" completion image name.
cplns = [("variable", t) for t in _g_pythondoc_tags]
ctlr.set_cplns(cplns)
ctlr.done("success")
elif trg.type == "available-exceptions":
evalr = PythonTreeEvaluator(ctlr, buf, trg, None, -1)
buf.mgr.request_eval(evalr)
elif trg.type in ("available-imports", "module-members"):
evalr = PythonImportsEvaluator(ctlr, buf, trg)
buf.mgr.request_eval(evalr)
else:
raise NotImplementedError("not yet implemented: completion for "
"Python '%s' trigger" % trg.name)
# Note: Python 1.5.2 does not support sys.version_info.
info_cmd = (
r"import sys;"
r"sys.stdout.write('.'.join(map(str, sys.version_info))+'\n');"
r"sys.stdout.write(sys.prefix+'\n');"
r"sys.stdout.write('\n'.join(sys.path));")
def _python_info_from_python(self, python, env):
"""Call the given Python and return:
(<version>, <sys.prefix>, <lib-dir>, <site-lib-dir>, <sys.path>)
TODO: Unicode path issues?
"""
import process
argv = [python, "-c", self.info_cmd]
log.debug("run `%s -c ...'", python)
p = process.ProcessOpen(argv, env=env.get_all_envvars(), stdin=None)
stdout, stderr = p.communicate()
stdout_lines = stdout.splitlines(0)
retval = p.returncode
if retval:
log.warn("failed to determine Python info:\n"
" path: %s\n"
" retval: %s\n"
" stdout:\n%s\n"
" stderr:\n%s\n",
python, retval, indent('\n'.join(stdout_lines)),
indent(stderr))
# We are only to rely on the first 2 digits being in the form x.y.
ver_match = re.search("([0-9]+.[0-9]+)", stdout_lines[0])
if ver_match:
ver = ver_match.group(1)
else:
ver = None
prefix = stdout_lines[1]
if sys.platform == "win32":
libdir = join(prefix, "Lib")
else:
libdir = join(prefix, "lib", "python"+ver)
sitelibdir = join(libdir, "site-packages")
sys_path = stdout_lines[2:]
return ver, prefix, libdir, sitelibdir, sys_path
def _expand_extra_dirs(self, env, extra_dirs):
return self._gen_python_import_paths_from_dirs(extra_dirs)
def _gen_python_import_paths_from_dirs(self, extra_dirs):
"""Generate all Python import paths from a given list of dirs.
This involves handling .pth files on the given dirs. It generates
import "paths" rather than "dirs" because Python .egg files can be
returned.
Dev Notes:
- Python's .pth files can have *executable* Python code. This
currently is not handled (those kinds of lines are skipped).
"""
for dir in extra_dirs:
if not exists(dir):
continue
yield dir
try:
for pth_path in glob(join(dir, "*.pth")):
for p in self._gen_python_import_paths_from_pth_path(pth_path):
yield p
except EnvironmentError as ex:
log.warn("error analyzing .pth files in '%s': %s", dir, ex)
def _gen_python_import_paths_from_pth_path(self, pth_path):
pth_dir = dirname(pth_path)
for line in open(pth_path, 'r'):
line = line.strip()
if line.startswith("#"): # comment line
continue
path = join(pth_dir, line)
if exists(path):
yield path
#def _extra_dirs_from_env(self, env):
# extra_dirs = set()
# for pref in env.get_all_prefs(self.extraPathsPrefName):
# if not pref:
# continue
# for path in pref:
# extra_dirs.update(d.strip() for d in path.split(os.pathsep) if exists(d.strip()))
# if extra_dirs:
# extra_dirs = set(
# self._gen_python_import_paths_from_dirs(extra_dirs)
# )
# log.debug("Python extra lib dirs: %r", extra_dirs)
# return tuple(extra_dirs)
def interpreter_from_env(self, env):
"""Returns:
- absolute path to either the preferred or
default system interpreter
- None if none of the above exists
"""
# Gather information about the current python.
python = None
if env.has_pref(self.interpreterPrefName):
python = env.get_pref(self.interpreterPrefName).strip() or None
if not python or not exists(python):
import which
syspath = env.get_envvar("PATH", "")
path = [d.strip() for d in syspath.split(os.pathsep)
if d.strip()]
try:
python = which.which("python", path=path)
except which.WhichError:
pass # intentionally supressed
if python:
python = os.path.abspath(python)
return python
def python_info_from_env(self, env):
cache_key = self.lang + "-info"
info = env.cache.get(cache_key)
if info is None:
python = self.interpreter_from_env(env)
if not python:
log.warn("no Python was found from which to determine the "
"codeintel information")
info = None, None, None, None, []
else:
info = self._python_info_from_python(python, env)
env.cache[cache_key] = info
return info
def _buf_indep_libs_from_env(self, env):
"""Create the buffer-independent list of libs."""
cache_key = self.lang + "-libs"
libs = env.cache.get(cache_key)
if libs is None:
env.add_pref_observer(
self.interpreterPrefName, self._invalidate_cache)
env.add_pref_observer(self.extraPathsPrefName,
self._invalidate_cache_and_rescan_extra_dirs)
env.add_pref_observer("codeintel_selected_catalogs",
self._invalidate_cache)
db = self.mgr.db
ver, prefix, libdir, sitelibdir, sys_path \
= self.python_info_from_env(env)
libs = []
# - extradirslib
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
libs.append(db.get_lang_lib(self.lang, "extradirslib",
extra_dirs))
# Figure out which sys.path dirs belong to which lib.
paths_from_libname = {"sitelib": [], "envlib": [], "stdlib": []}
canon_sitelibdir = sitelibdir and normcase(sitelibdir) or None
canon_prefix = prefix and normcase(prefix) or None
canon_libdir = libdir and normcase(libdir) or None
canon_libdir_plat_prefix = libdir and normcase(
join(libdir, "plat-")) or None
canon_libdir_lib_prefix = libdir and normcase(
join(libdir, "lib-")) or None
for dir in sys_path:
STATE = "envlib"
canon_dir = normcase(dir)
if dir == "": # -> curdirlib (already handled)
continue
elif canon_dir.endswith(".zip") and isfile(dir):
log.warn("`%s': not handling .zip file on Python sys.path",
dir)
continue
elif canon_dir.endswith(".egg") and isfile(dir):
# log.warn("`%s': not handling .egg file on Python sys.path",
# dir)
continue
elif canon_dir.startswith(canon_sitelibdir):
STATE = "sitelib"
# Check against the known list of standard library locations.
elif canon_dir == canon_libdir or \
canon_dir.startswith(canon_libdir_plat_prefix) or \
canon_dir.startswith(canon_libdir_lib_prefix):
STATE = "stdlib"
if not exists(dir):
continue
paths_from_libname[STATE].append(dir)
log.debug("Python %s paths for each lib:\n%s",
ver, indent(pformat(paths_from_libname)))
# - envlib, sitelib, cataloglib, stdlib
if paths_from_libname["envlib"]:
libs.append(db.get_lang_lib(self.lang, "envlib",
paths_from_libname["envlib"]))
if paths_from_libname["sitelib"]:
libs.append(db.get_lang_lib(self.lang, "sitelib",
paths_from_libname["sitelib"]))
catalog_selections = env.get_pref("codeintel_selected_catalogs")
libs += [
db.get_catalog_lib(self.lang, catalog_selections),
db.get_stdlib(self.lang, ver)
]
env.cache[cache_key] = libs
return libs
def libs_from_buf(self, buf):
env = buf.env
# A buffer's libs depend on its env and the buf itself so
# we cache it on the env and key off the buffer.
cache_key = self.lang + "-buf-libs"
cache = env.cache.get(cache_key) # <buf-weak-ref> -> <libs>
if cache is None:
cache = weakref.WeakKeyDictionary()
env.cache[cache_key] = cache
if buf not in cache:
# - curdirlib
# Using the dirname of this buffer isn't always right, but
# hopefully is a good first approximation.
libs = []
if buf.path:
cwd = dirname(buf.path)
if cwd != "<Unsaved>":
libs = [self.mgr.db.get_lang_lib(
self.lang, "curdirlib", [cwd])]
libs += self._buf_indep_libs_from_env(env)
cache[buf] = libs
return cache[buf]
def _invalidate_cache(self, env, pref_name):
for key in (self.lang + "-buf-libs", self.lang + "-libs"):
if key in env.cache:
log.debug("invalidate '%s' cache on %r", key, env)
del env.cache[key]
def _invalidate_cache_and_rescan_extra_dirs(self, env, pref_name):
self._invalidate_cache(env, pref_name)
extra_dirs = self._extra_dirs_from_env(env)
if extra_dirs:
extradirslib = self.mgr.db.get_lang_lib(
self.lang, "extradirslib", extra_dirs)
request = PreloadLibRequest(extradirslib)
self.mgr.idxr.stage_request(request, 1.0)
# class PythonCitadelEvaluator(CitadelEvaluator):
# def post_process_cplns(self, cplns):
# """Drop special __FOO__ methods.
#
# Note: Eventually for some Python completions we might want to leave
# these in. For example:
#
# class Bar(Foo):
# def __init__(self):
# Foo.<|> # completions should include "__init__" here
# """
# for i in range(len(cplns)-1, -1, -1):
# value = cplns[i][1]
# if value.startswith("__") and value.endswith("__"):
# del cplns[i]
# return CitadelEvaluator.post_process_cplns(self, cplns)
# "from", "from .", "from .."
_dotted_from_rx = re.compile(r'from($|\s+\.+)')
class PythonBuffer(CitadelBuffer):
lang = lang
# Fillup chars for Python: basically, any non-identifier char.
# - remove '*' from fillup chars because: "from foo import <|>*"
cpln_fillup_chars = "~`!@#$%^&()-=+{}[]|\\;:'\",.<>?/ "
cpln_stop_chars = "~`!@#$%^&*()-=+{}[]|\\;:'\",.<>?/ "
sce_prefixes = ["SCE_P_"]
cb_show_if_empty = True
keyword_style = ScintillaConstants.SCE_P_WORD
identifier_style = ScintillaConstants.SCE_P_IDENTIFIER
@property
def libs(self):
return self.langintel.libs_from_buf(self)
def trg_from_pos(self, pos, implicit=True):
"""Python trigger types:
python-complete-object-members
python-calltip-call-signature
python-complete-pythondoc-tags
complete-available-imports
complete-module-members
Not yet implemented:
complete-available-classes
calltip-base-signature
"""
DEBUG = False # not using 'logging' system, because want to be fast
if DEBUG:
print("\n----- Python trg_from_pos(pos=%r, implicit=%r) -----"\
% (pos, implicit))
if pos == 0:
return None
accessor = self.accessor
last_pos = pos - 1
last_char = accessor.char_at_pos(last_pos)
if DEBUG:
print(" last_pos: %s" % last_pos)
print(" last_char: %r" % last_char)
# Quick out if the preceding char isn't a trigger char.
# Note: Cannot use this now that we have a 2-char locals trigger.
# if last_char not in " .(@_,":
# if DEBUG:
# print "trg_from_pos: no: %r is not in ' .(@'_" % last_char
# return None
style = accessor.style_at_pos(last_pos)
if DEBUG:
style_names = self.style_names_from_style_num(style)
print(" style: %s (%s)" % (style, ", ".join(style_names)))
if last_char == "@":
# Possibly python-complete-pythondoc-tags (the only trigger
# on '@').
#
# Notes:
# - PythonDoc 2.1b6 started allowing pythondoc tags in doc
# strings which we are yet supporting here.
# - Trigger in comments should only happen if the comment
# begins with the "##" pythondoc signifier. We don't
# bother checking that (PERF).
if style in self.comment_styles():
# Only trigger at start of comment line.
WHITESPACE = tuple(" \t")
SENTINEL = 20
i = last_pos-1
while i >= max(0, last_pos-SENTINEL):
ch = accessor.char_at_pos(i)
if ch == "#":
return Trigger(self.lang, TRG_FORM_CPLN,
"pythondoc-tags", pos, implicit)
elif ch in WHITESPACE:
pass
else:
return None
i -= 1
return None
# Remaing triggers should never trigger in some styles.
if (implicit and style in self.implicit_completion_skip_styles and last_char != '_'
or style in self.completion_skip_styles):
if DEBUG:
print("trg_from_pos: no: completion is suppressed "\
"in style at %s: %s (%s)"\
% (last_pos, style, ", ".join(style_names)))
return None
if last_char == " ":
# used for:
# * complete-available-imports
# * complete-module-members
# * complete-available-exceptions
# Triggering examples ('_' means a space here):
# import_ from_
# Non-triggering examples:
# from FOO import_ Ximport_
# Not bothering to support:
#; if FOO:import_ FOO;import_
# Typing a space is very common so lets have a quick out before
# doing the more correct processing:
if last_pos-1 < 0 or accessor.char_at_pos(last_pos-1) not in "etm,":
return None
working_text = accessor.text_range(max(0, last_pos-200),
last_pos)
line = self._last_logical_line(working_text).strip()
if not line:
return None
ch = line[-1]
line = line.replace('\t', ' ')
# from <|>
# import <|>
if line == "from" or line == "import":
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=())
# is it "from FOO import <|>" ?
if line.endswith(" import"):
if line.startswith('from '):
imp_prefix = tuple(line[len('from '):-len(
' import')].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
if line == "except" or line == "raise" or line.endswith((" except", " raise")):
return Trigger(self.lang, TRG_FORM_CPLN,
"available-exceptions", pos, implicit)
if ch == ',':
# is it "from FOO import BAR, <|>" ?
if line.startswith('from ') and ' import ' in line:
imp_prefix = tuple(line[len('from '):line.index(
' import')].strip().split('.'))
# Need better checks
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
elif last_char == '.': # must be "complete-object-members" or None
# If the first non-whitespace character preceding the '.' in the
# same statement is an identifer character then trigger, if it
# is a ')', then _maybe_ we should trigger (yes if this is
# function call paren).
#
# Triggering examples:
# FOO. FOO . FOO; BAR.
# FOO(). FOO.BAR. FOO(BAR, BAZ.
# FOO().BAR. FOO("blah();", "blam"). FOO = {BAR.
# FOO(BAR. FOO[BAR.
# ...more cases showing possible delineation of expression
# Non-triggering examples:
# FOO..
# FOO[1]. too hard to determine sequence element types
# from FOO import (BAR.
# Not sure if want to support:
# "foo". do we want to support literals? what about
# lists? tuples? dicts?
working_text = accessor.text_range(max(0, last_pos-200),
last_pos)
line = self._last_logical_line(working_text).strip()
if line:
ch = line[-1]
if (isident(ch) or isdigit(ch) or ch in '.)'):
line = line.replace('\t', ' ')
m = _dotted_from_rx.match(line)
if m:
dots = len(m.group(1).strip())
# magic value for imp_prefix, means "from .<|>"
imp_prefix = tuple('' for i in range(dots+2))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
elif line.startswith('from '):
if ' import ' in line:
# we're in "from FOO import BAR." territory,
# which is not a trigger
return None
# from FOO.
imp_prefix = tuple(line[len(
'from '):].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
elif line.startswith('import '):
# import FOO.
# figure out the dotted parts of "FOO" above
imp_prefix = tuple(line[len(
'import '):].strip().split('.'))
return Trigger(self.lang, TRG_FORM_CPLN,
"available-imports", pos, implicit,
imp_prefix=imp_prefix)
else:
return Trigger(self.lang, TRG_FORM_CPLN,
"object-members", pos, implicit)
elif ch in ("\"'"):
return Trigger(self.lang, TRG_FORM_CPLN,
"literal-members", pos, implicit,
citdl_expr="str")
else:
ch = None
if DEBUG:
print("trg_from_pos: no: non-ws char preceding '.' is not "\
"an identifier char or ')': %r" % ch)
return None
elif last_char == "_":
# used for:
# * complete-magic-symbols
# Triggering examples:
# def __<|>init__
# if __<|>name__ == '__main__':
# __<|>file__
# Ensure double "__".
if last_pos-1 < 0 or accessor.char_at_pos(last_pos-1) != "_":
return None
beforeChar = None
beforeStyle = None
if last_pos-2 >= 0:
beforeChar = accessor.char_at_pos(last_pos-2)
beforeStyle = accessor.style_at_pos(last_pos-2)
if DEBUG:
print("trg_from_pos:: checking magic symbol, beforeChar: %r" % (beforeChar))
if beforeChar and beforeChar in "\"'" and beforeStyle in self.string_styles():
if DEBUG:
print("trg_from_pos:: magic-symbols - string")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="string")
elif beforeChar == "." and beforeStyle != style:
# Turned this off, as it interferes with regular "xxx." object
# completions.
return None
if beforeStyle == style:
# No change in styles between the characters -- abort.
return None
text = accessor.text_range(max(0, last_pos-20), last_pos-1).strip()
if beforeChar and beforeChar in " \t":
if text.endswith("def"):
posttext = accessor.text_range(pos,
min(accessor.length, pos+20)
).replace(" ", "")
if DEBUG:
print("trg_from_pos:: magic-symbols - def")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="def",
posttext=posttext)
if DEBUG:
print("trg_from_pos:: magic-symbols - global")
return Trigger(self.lang, TRG_FORM_CPLN,
"magic-symbols", last_pos-1, implicit,
symbolstype="global", text=text)
elif last_char == '(':
# If the first non-whitespace character preceding the '(' in the
# same statement is an identifer character then trigger calltip,
#
# Triggering examples:
# FOO. FOO ( FOO; BAR(
# FOO.BAR( FOO(BAR, BAZ( FOO = {BAR(
# FOO(BAR( FOO[BAR(
# Non-triggering examples:
# FOO()( a function call returning a callable that is
# immediately called again is too rare to bother
# with
# def foo( might be a "calltip-base-signature", but this
# trigger is not yet implemented
# import ( will be handled by complete_members
# class Foo( is an "complete-available-classes" trigger,
# but this is not yet implemented
working_text = accessor.text_range(max(0, last_pos-200), last_pos)
line = self._last_logical_line(working_text).rstrip()
if line:
ch = line[-1]
if isident(ch) or isdigit(ch):
# If this is:
# def foo(
# then this might be the (as yet unimplemented)
# "calltip-base-signature" trigger or it should not be a
# trigger point.
#
# If this is:
# class Foo(
# then this should be the (as yet unimplemented)
# "complete-available-classes" trigger.
line = line.replace('\t', ' ')
lstripped = line.lstrip()
if lstripped.startswith("def"):
if DEBUG:
print("trg_from_pos: no: point is function declaration")
elif lstripped.startswith("class") and '(' not in lstripped:
# Second test is necessary to not exclude:
# class Foo(bar(<|>
if DEBUG:
print("trg_from_pos: no: point is class declaration")
elif lstripped.startswith('from ') and ' import' in lstripped:
# Need better checks
# is it "from FOO import (<|>" ?
imp_prefix = tuple(lstripped[len(
'from '):lstripped.index(' import')].split('.'))
if DEBUG:
print("trg_from_pos: from FOO import (")
return Trigger(self.lang, TRG_FORM_CPLN,
"module-members", pos, implicit,
imp_prefix=imp_prefix)
else:
return Trigger(self.lang, TRG_FORM_CALLTIP,
"call-signature", pos, implicit)
else:
if DEBUG:
print("trg_from_pos: no: non-ws char preceding "\
"'(' is not an identifier char: %r" % ch)
else:
if DEBUG:
print("trg_from_pos: no: no chars preceding '('")
return None
elif last_char == ',':
working_text = accessor.text_range(max(0, last_pos - 200), last_pos)
line = self._last_logical_line(working_text)
if line:
last_bracket = line.rfind("(")
if last_bracket >= 0:
pos = (pos - (len(line) - last_bracket))
return Trigger(self.lang, TRG_FORM_CALLTIP,
"call-signature", pos, implicit)
return None
else:
return None
elif pos >= 2 and style in (self.identifier_style, self.keyword_style):
# 2 character trigger for local symbols
if DEBUG:
if style == self.identifier_style:
print("Identifier style")
else:
print("Identifier keyword style")
# Previous char also need to be an identifier/word, then the one
# before that needs to be something different (operator/space).
if (accessor.style_at_pos(last_pos-1) != style or
(pos > 2 and accessor.style_at_pos(last_pos-2) == style)):
if DEBUG:
print("Not a block of two ident/word chars")
return None
if pos > 2 and accessor.char_at_pos(last_pos-2) == ".":
if DEBUG:
print(" preceeded by '.' operator - not a trigger")
return None
# Check if it makes sense to show the completions here. If defining
# a class name, or function name, you don't want to see completions.
# Also, do not override another completion type (e.g. imports).
start = accessor.line_start_pos_from_pos(pos)
preceeding_text = accessor.text_range(start, last_pos-2).strip()
if preceeding_text:
first_word = preceeding_text.split(" ")[0]
if first_word in ("class", "def", "import", "from", "except"):
if DEBUG:
print(" no trigger, as starts with %r" % (first_word, ))
# Don't trigger over the top of another trigger, i.e.
# complete-available-imports
# complete-module-members
# complete-available-exceptions
return None
citdl_expr = accessor.text_range(last_pos-1, last_pos+1)
if DEBUG:
print(" triggered 2 char symbol trigger: %r" % (citdl_expr, ))
return Trigger(self.lang, TRG_FORM_CPLN, "local-symbols",
last_pos-1, implicit,
citdl_expr=citdl_expr,
preceeding_text=preceeding_text)
def _last_logical_line(self, text):
lines = text.splitlines(0) or ['']
logicalline = lines.pop()
while lines and lines[-1].endswith('\\'):
logicalline = lines.pop()[:-1] + ' ' + logicalline
return logicalline
class PythonImportHandler(ImportHandler):
lang = lang # XXX do this for other langs as well
PATH_ENV_VAR = "PYTHONPATH"
sep = '.'
def __init__(self, mgr):
ImportHandler.__init__(self, mgr)
self.__stdCIXScanId = None
# TODO: may not be used. If so, drop it.
def _shellOutForPath(self, compiler):
import process
argv = [compiler, "-c", "import sys; print('\\n'.join(sys.path))"]
# Can't use -E to ignore PYTHONPATH because older versions of
# Python don't have it (e.g. v1.5.2).
env = dict(os.environ)
if "PYTHONPATH" in env:
del env["PYTHONPATH"]
if "PYTHONHOME" in env:
del env["PYTHONHOME"]
if "PYTHONSTARTUP" in env:
del env["PYTHONSTARTUP"]
p = process.ProcessOpen(argv, env=env, stdin=None)
stdout, stderr = p.communicate()
retval = p.returncode
path = [line for line in stdout.splitlines(0)]
if path and (path[0] == "" or path[0] == os.getcwd()):
del path[0] # cwd handled separately
return path
def setCorePath(self, compiler=None, extra=None):
if compiler is None:
import which
compiler = which.which("python")
self.corePath = self._shellOutForPath(compiler)
def _findScannableFiles(self, xxx_todo_changeme,
dirname, names):
(files, searchedDirs, skipRareImports,
importableOnly) = xxx_todo_changeme
if sys.platform.startswith("win"):
cpath = dirname.lower()
else:
cpath = dirname
if cpath in searchedDirs:
while names:
del names[0]
return
else:
searchedDirs[cpath] = 1
if skipRareImports:
if (basename(dirname) == "encodings"
and "undefined.py" in names):
# Skip most of the specific encoding definitions (saves
# about 50 files).
names = [n for n in names if n == "__init__.py"
or os.path.splitext(n)[0].endswith("_codec")]
for i in range(len(names)-1, -1, -1): # backward so can del from list
path = os.path.join(dirname, names[i])
if os.path.isdir(path):
if skipRareImports:
# Skip Python's test package (saves over 200 files)
# and other likely test dirs.
if names[i] in ("test", "tests"):
del names[i]
continue
if importableOnly:
possibles = [os.path.join(path, "__init__.py"),
os.path.join(path, "__init__.pyc"),
os.path.join(path, "__init__.pyo")]
for possible in possibles:
if os.path.isfile(possible):
break
else:
del names[i] # don't traverse non-package dirs
continue
if path.endswith(os.path.join("win32com", "gen_py")):
del names[i]
continue
elif os.path.splitext(names[i])[1] in self._gen_suffixes():
# XXX The list of Python extensions should be settable on
# the ImportHandler and Komodo should set whatever is
# set in prefs.
# XXX This check for "Python" files should probably include
# python scripts, which might likely not have the
# extension: need to grow filetype-from-content smarts.
files.append(path)
def _gen_suffixes(self):
"""Generate a sequence of scannable file suffixes in the
preferred order of scanning.
"""
yield ".py"
yield ".pyw"
if _SCAN_BINARY_FILES:
yield ".pyc"
yield ".pyo"
for suffix, mode, mod_type in imp.get_suffixes():
if suffix[0] == '.' and mod_type == imp.C_EXTENSION:
yield suffix
def find_importables_in_dir(self, imp_dir):
"""See citadel.py::ImportHandler.find_importables_in_dir() for
details.
Importables for Python look like this:
{"foo": ("foo.py", None, False),
"foolib": ("foolib/__init__.py", "__init__", False),
"bar": ("bar.pyc", None, False),
"baz": ("baz.pyo", None, False),
"qoox": ("qoox.pyd", None, False),
"qooz": ("qooz.so", None, False),
Note: .pyd are .so handling depends on the platform.
If several files happen to have the same name but different
suffixes, the one with preferred suffix wins. The suffixe preference
is defined by the order of elements in the sequence generated
by _gen_suffixes().
This particularly means that sources always win over binaries.
"""
if imp_dir == "<Unsaved>":
# TODO: stop these getting in here.
return {}
importables = {}
if os.path.isdir(imp_dir):
suffixes = dict((s, i) for i, s
in enumerate(self._gen_suffixes(), 1))
modules = []
for name in os.listdir(imp_dir):
mod, suffix = os.path.splitext(name)
if mod != '__init__':
init = os.path.join(name, '__init__.py')
if os.path.exists(os.path.join(imp_dir, init)):
modules.append((0, name, (
init, '__init__', False)))
else:
if suffix in suffixes:
modules.append((suffixes[suffix], mod,
(name, None, False)))
modules.sort(key=lambda mod: mod[0])
for _, mod, importable in modules:
if mod not in importables:
importables[mod] = importable
return importables
class PythonCILEDriver(CILEDriver):
lang = lang
def scan_purelang(self, buf):
log.info("scan_purelang: path: %r lang: %s", buf.path, buf.lang)
# log.warn("TODO: python cile that uses elementtree")
content = buf.accessor.text
el = pythoncile.scan_et(content, buf.path, lang=self.lang)
return el
def scan_binary(self, buf):
log.info("scan_binary: path: %r lang: %s", buf.path, buf.lang)
from codeintel2 import pybinary
python = buf.langintel.interpreter_from_env(buf.env)
if not python:
raise CodeIntelError("cannot find a usable Python interpreter")
cix = pybinary.safe_scan(buf.path, python)
return tree_from_cix(cix)
#---- internal support stuff
#---- registration
def register(mgr):
"""Register language support with the Manager."""
mgr.set_lang_info(lang,
silvercity_lexer=PythonLexer(),
buf_class=PythonBuffer,
langintel_class=PythonLangIntel,
import_handler_class=PythonImportHandler,
cile_driver_class=PythonCILEDriver,
is_cpln_lang=True)
|
|
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Provides wrapper for eAPI calls
This module provides a connection to eAPI by wrapping eAPI calls in an
instance of Connection. The connection module provides an easy implementation
for sending and receiving calls over eAPI using a HTTP/S transport.
"""
import sys
import json
import socket
import base64
import logging
import ssl
import re
try:
# Try Python 3.x import first
from http.client import HTTPConnection, HTTPSConnection
except ImportError:
# Use Python 2.7 import as a fallback
from httplib import HTTPConnection, HTTPSConnection
from pyeapi.utils import make_iterable
_LOGGER = logging.getLogger(__name__)
DEFAULT_HTTP_PORT = 80
DEFAULT_HTTPS_PORT = 443
DEFAULT_HTTP_LOCAL_PORT = 8080
DEFAULT_HTTPS_LOCAL_PORT = 8443
DEFAULT_HTTP_PATH = '/command-api'
DEFAULT_UNIX_SOCKET = '/var/run/command-api.sock'
def https_connection_factory(path, host, port, context=None, timeout=60):
# ignore ssl context for python versions before 2.7.9
if sys.hexversion < 34015728:
return HttpsConnection(path, host, port, timeout=timeout)
return HttpsConnection(path, host, port, context=context, timeout=timeout)
class EapiError(Exception):
"""Base exception class for all exceptions generated by eapilib
This is the base exception class for all exceptions generated by
eapilib. It is provided as a catch all for exceptions and should
not be directly raised by an methods or functions
Args:
commands (array): The list of commands there were sent to the
node that when the exception was raised
message (string): The exception error message
"""
def __init__(self, message, commands=None):
self.message = message
self.commands = commands
super(EapiError, self).__init__(message)
class CommandError(EapiError):
"""Base exception raised for command errors
The CommandError instance provides a custom exception that can be used
if the eAPI command(s) fail. It provides some additional information
that can be used to understand what caused the exception.
Args:
error_code (int): The error code returned from the eAPI call.
error_text (string): The error text message that coincides with the
error_code
commands (array): The list of commands that were sent to the node
that generated the error
message (string): The exception error message which is a concatenation
of the error_code and error_text
"""
def __init__(self, code, message, **kwargs):
cmd_err = kwargs.get('command_error')
if int(code) in [1000, 1002, 1004]:
msg_fmt = 'Error [{}]: {} [{}]'.format(code, message, cmd_err)
else:
msg_fmt = 'Error [{}]: {}'.format(code, message)
super(CommandError, self).__init__(msg_fmt)
self.error_code = code
self.error_text = message
self.command_error = cmd_err
self.commands = kwargs.get('commands')
self.output = kwargs.get('output')
self.message = msg_fmt
@property
def trace(self):
return self.get_trace()
def get_trace(self):
trace = list()
index = None
for index, out in enumerate(self.output):
_entry = {'command': self.commands[index], 'output': out}
trace.append(_entry)
if index:
index += 1
for cmd in self.commands[index:]:
_entry = {'command': cmd, 'output': None}
trace.append(_entry)
return trace
class ConnectionError(EapiError):
"""Base exception raised for connection errors
Connection errors are raised when a connection object is unable to
connect to the node. Typically these errors can result from using
the wrong transport type or not providing valid credentials.
Args:
commands (array): The list of commands there were sent to the
node that when the exception was raised
connection_type (string): The string identifier for the connection
object that generate the error
message (string): The exception error message
response (string): The message generate from the response packet
"""
def __init__(self, connection_type, message, commands=None):
self.message = message
self.connection_type = connection_type
self.commands = commands
super(ConnectionError, self).__init__(message)
class SocketConnection(HTTPConnection):
def __init__(self, path, timeout=60):
HTTPConnection.__init__(self, 'localhost')
self.path = path
self.timeout = timeout
def __str__(self):
return 'unix:%s' % self.path
def __repr__(self):
return 'unix:%s' % self.path
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.settimeout(self.timeout)
self.sock.connect(self.path)
class HttpConnection(HTTPConnection):
def __init__(self, path, *args, **kwargs):
HTTPConnection.__init__(self, *args, **kwargs)
self.path = path
def __str__(self):
return 'http://%s:%s/%s' % (self.host, self.port, self.path)
def __repr__(self):
return 'http://%s:%s/%s' % (self.host, self.port, self.path)
class HttpsConnection(HTTPSConnection):
def __init__(self, path, *args, **kwargs):
HTTPSConnection.__init__(self, *args, **kwargs)
self.path = path
def __str__(self):
return 'https://%s:%s/%s' % (self.host, self.port, self.path)
def __repr__(self):
return 'https://%s:%s/%s' % (self.host, self.port, self.path)
class HTTPSCertConnection(HTTPSConnection):
""" Class to make a HTTPS connection, with support
for full client-based SSL Authentication.
"""
def __init__(self, path, host, port, key_file, cert_file, ca_file,
timeout=None):
HTTPSConnection.__init__(self, host, key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.ca_file = ca_file
self.timeout = timeout
self.path = path
self.port = port
def __str__(self):
return 'https://%s:%s/%s - %s,%s' % (self.host, self.port, self.path,
self.key_file, self.cert_file)
def __repr__(self):
return 'https://%s:%s/%s - %s,%s' % (self.host, self.port, self.path,
self.key_file, self.cert_file)
def connect(self):
""" Connect to a host on a given (SSL) port.
If ca_file is pointing somewhere, use it
to check Server Certificate.
Redefined/copied and extended from httplib.py:1105 (Python 2.6.x).
This is needed to pass cert_reqs=ssl.CERT_REQUIRED as parameter
to ssl.wrap_socket(), which forces SSL to check server certificate
against our client certificate.
"""
sock = socket.create_connection((self.host, self.port), self.timeout)
if self._tunnel_host:
self.sock = sock
self._tunnel()
# If there's no CA File, don't force Server Certificate Check
if self.ca_file:
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ca_certs=self.ca_file,
cert_reqs=ssl.CERT_REQUIRED)
else:
self.sock = ssl.wrap_socket(sock, self.key_file,
self.cert_file,
cert_reqs=ssl.CERT_NONE)
class EapiConnection(object):
"""Creates a connection to eAPI for sending and receiving eAPI requests
The EapiConnection object provides an implementation for sending and
receiving eAPI requests and responses. This class should not need to
be instantiated directly.
"""
def __init__(self):
self.transport = None
self.error = None
self.socket_error = None
self._auth = None
def __str__(self):
return 'EapiConnection(transport=%s)' % str(self.transport)
def __repr__(self):
return 'EapiConnection(transport=%s)' % repr(self.transport)
def authentication(self, username, password):
"""Configures the user authentication for eAPI
This method configures the username and password combination to use
for authenticating to eAPI.
Args:
username (str): The username to use to authenticate the eAPI
connection with
password (str): The password in clear text to use to authenticate
the eAPI connection with
"""
_auth_text = '{}:{}'.format(username, password)
# Work around for Python 2.7/3.x compatibility
if int(sys.version[0]) > 2:
# For Python 3.x
_auth_bin = base64.encodebytes(_auth_text.encode())
_auth = _auth_bin.decode()
_auth = _auth.replace('\n', '')
self._auth = _auth
else:
# For Python 2.7
_auth = base64.encodestring(_auth_text)
self._auth = str(_auth).replace('\n', '')
_LOGGER.debug('Autentication string is: {}:***'.format(username))
def request(self, commands, encoding=None, reqid=None, **kwargs):
"""Generates an eAPI request object
This method will take a list of EOS commands and generate a valid
eAPI request object form them. The eAPI request object is then
JSON encoding and returned to the caller.
eAPI Request Object
.. code-block:: json
{
"jsonrpc": "2.0",
"method": "runCmds",
"params": {
"version": 1,
"cmds": [
<commands>
],
"format": [json, text],
}
"id": <reqid>
}
Args:
commands (list): A list of commands to include in the eAPI
request object
encoding (string): The encoding method passed as the `format`
parameter in the eAPI request
reqid (string): A custom value to assign to the request ID
field. This value is automatically generated if not passed
**kwargs: Additional keyword arguments for expanded eAPI
functionality. Only supported eAPI params are used in building
the request
Returns:
A JSON encoding request structure that can be send over eAPI
"""
commands = make_iterable(commands)
reqid = id(self) if reqid is None else reqid
params = {'version': 1, 'cmds': commands, 'format': encoding}
streaming = False
if 'autoComplete' in kwargs:
params['autoComplete'] = kwargs['autoComplete']
if 'expandAliases' in kwargs:
params['expandAliases'] = kwargs['expandAliases']
if 'streaming' in kwargs:
streaming = kwargs['streaming']
return json.dumps({'jsonrpc': '2.0', 'method': 'runCmds',
'params': params, 'id': str(reqid),
'streaming': streaming})
def send(self, data):
"""Sends the eAPI request to the destination node
This method is responsible for sending an eAPI request to the
destination node and returning a response based on the eAPI response
object. eAPI responds to request messages with either a success
message or failure message.
eAPI Response - success
.. code-block:: json
{
"jsonrpc": "2.0",
"result": [
{},
{}
{
"warnings": [
<message>
]
},
],
"id": <reqid>
}
eAPI Response - failure
.. code-block:: json
{
"jsonrpc": "2.0",
"error": {
"code": <int>,
"message": <string>
"data": [
{},
{},
{
"errors": [
<message>
]
}
]
}
"id": <reqid>
}
Args:
data (string): The data to be included in the body of the eAPI
request object
Returns:
A decoded response. The response object is deserialized from
JSON and returned as a standard Python dictionary object
Raises:
CommandError if an eAPI failure response object is returned from
the node. The CommandError exception includes the error
code and error message from the eAPI response.
"""
try:
_LOGGER.debug('Request content: {}'.format(data))
# debug('eapi_request: %s' % data)
self.transport.putrequest('POST', '/command-api')
self.transport.putheader('Content-type', 'application/json-rpc')
self.transport.putheader('Content-length', '%d' % len(data))
if self._auth:
self.transport.putheader('Authorization',
'Basic %s' % self._auth)
if int(sys.version[0]) > 2:
# For Python 3.x compatibility
data = data.encode()
self.transport.endheaders(message_body=data)
try: # Python 2.7: use buffering of HTTP responses
response = self.transport.getresponse(buffering=True)
except TypeError: # Python 2.6: older, and 3.x on
response = self.transport.getresponse()
response_content = response.read()
_LOGGER.debug('Response: status:{status}, reason:{reason}'.format(
status=response.status,
reason=response.reason))
_LOGGER.debug('Response content: {}'.format(response_content))
if response.status == 401:
raise ConnectionError(str(self), '%s. %s' % (response.reason,
response_content))
# Work around for Python 2.7/3.x compatibility
if not type(response_content) == str:
# For Python 3.x - decode bytes into string
response_content = response_content.decode()
decoded = json.loads(response_content)
_LOGGER.debug('eapi_response: %s' % decoded)
if 'error' in decoded:
(code, msg, err, out) = self._parse_error_message(decoded)
pattern = "unexpected keyword argument '(.*)'"
match = re.search(pattern, msg)
if match:
auto_msg = ('%s parameter is not supported in this'
' version of EOS.' % match.group(1))
_LOGGER.error(auto_msg)
msg = msg + '. ' + auto_msg
raise CommandError(code, msg, command_error=err, output=out)
return decoded
# socket.error is deprecated in python 3 and replaced with OSError.
except (socket.error, OSError) as exc:
_LOGGER.exception(exc)
self.socket_error = exc
self.error = exc
error_msg = 'Socket error during eAPI connection: %s' % str(exc)
raise ConnectionError(str(self), error_msg)
except ValueError as exc:
_LOGGER.exception(exc)
self.socket_error = None
self.error = exc
raise ConnectionError(str(self), 'unable to connect to eAPI')
finally:
self.transport.close()
def _parse_error_message(self, message):
"""Parses the eAPI failure response message
This method accepts an eAPI failure message and parses the necesary
parts in order to generate a CommandError.
Args:
message (str): The error message to parse
Returns:
tuple: A tuple that consists of the following:
* code: The error code specified in the failure message
* message: The error text specified in the failure message
* error: The error text from the command that generated the
error (the last command that ran)
* output: A list of all output from all commands
"""
msg = message['error']['message']
code = message['error']['code']
err = None
out = None
if 'data' in message['error']:
err = ' '.join(message['error']['data'][-1]['errors'])
out = message['error']['data']
return code, msg, err, out
def execute(self, commands, encoding='json', **kwargs):
"""Executes the list of commands on the destination node
This method takes a list of commands and sends them to the
destination node, returning the results. The execute method handles
putting the destination node in enable mode and will pass the
enable password, if required.
Args:
commands (list): A list of commands to execute on the remote node
encoding (string): The encoding to send along with the request
message to the destination node. Valid values include 'json'
or 'text'. This argument will influence the response object
encoding
**kwargs: Arbitrary keyword arguments
Returns:
A decoded response message as a native Python dictionary object
that has been deserialized from JSON.
Raises:
CommandError: A CommandError is raised that includes the error
code, error message along with the list of commands that were
sent to the node. The exception instance is also stored in
the error property and is availble until the next request is
sent
"""
if encoding not in ('json', 'text'):
raise TypeError('encoding must be one of [json, text]')
try:
self.error = None
request = self.request(commands, encoding=encoding, **kwargs)
response = self.send(request)
return response
except(ConnectionError, CommandError, TypeError) as exc:
exc.commands = commands
self.error = exc
raise
class SocketEapiConnection(EapiConnection):
def __init__(self, path=None, timeout=60, **kwargs):
super(SocketEapiConnection, self).__init__()
path = path or DEFAULT_UNIX_SOCKET
self.transport = SocketConnection(path, timeout)
class HttpLocalEapiConnection(EapiConnection):
def __init__(self, port=None, path=None, timeout=60, **kwargs):
super(HttpLocalEapiConnection, self).__init__()
port = port or DEFAULT_HTTP_LOCAL_PORT
path = path or DEFAULT_HTTP_PATH
self.transport = HttpConnection(path, 'localhost', int(port),
timeout=timeout)
class HttpEapiConnection(EapiConnection):
def __init__(self, host, port=None, path=None, username=None,
password=None, timeout=60, **kwargs):
super(HttpEapiConnection, self).__init__()
port = port or DEFAULT_HTTP_PORT
path = path or DEFAULT_HTTP_PATH
self.transport = HttpConnection(path, host, int(port), timeout=timeout)
self.authentication(username, password)
class HttpsEapiConnection(EapiConnection):
def __init__(self, host, port=None, path=None, username=None,
password=None, context=None, timeout=60, **kwargs):
super(HttpsEapiConnection, self).__init__()
port = port or DEFAULT_HTTPS_PORT
path = path or DEFAULT_HTTP_PATH
enforce_verification = kwargs.get('enforce_verification')
if context is None and not enforce_verification:
context = self.disable_certificate_verification()
self.transport = https_connection_factory(path, host, int(port),
context, timeout)
self.authentication(username, password)
def disable_certificate_verification(self):
# SSL/TLS certificate verification is enabled by default in latest
# Python releases and causes self-signed certificates generated
# on EOS to fail validation (unless explicitly imported).
# Disable the SSL/TLS certificate verification for now.
# Use the approach in PEP476 to disable certificate validation.
# TODO:
# ************************** WARNING *****************************
# This behaviour is considered a *security risk*, so use it
# temporary until a proper fix is implemented.
if hasattr(ssl, '_create_unverified_context'):
return ssl._create_unverified_context()
class HttpsEapiCertConnection(EapiConnection):
def __init__(self, host, port=None, path=None, key_file=None,
cert_file=None, ca_file=None, timeout=60, **kwargs):
if key_file is None or cert_file is None:
raise ValueError("For https_cert connections both a key_file and "
"cert_file are required. A ca_file is also "
"recommended")
super(HttpsEapiCertConnection, self).__init__()
port = port or DEFAULT_HTTPS_PORT
path = path or DEFAULT_HTTP_PATH
self.transport = HTTPSCertConnection(path, host, int(port),
key_file=key_file,
cert_file=cert_file,
ca_file=ca_file, timeout=timeout)
|
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import IECore
import Gaffer
import GafferUI
from Qt import QtGui
from Qt import QtWidgets
from Qt import QtCore
class MultiLineTextWidget( GafferUI.Widget ) :
WrapMode = IECore.Enum.create( "None_", "Word", "Character", "WordOrCharacter" )
Role = IECore.Enum.create( "Text", "Code" )
def __init__( self, text="", editable=True, wrapMode=WrapMode.WordOrCharacter, fixedLineHeight=None, role=Role.Text, **kw ) :
GafferUI.Widget.__init__( self, _PlainTextEdit(), **kw )
## \todo This should come from the Style when we get Styles applied to Widgets
# (and not just Gadgets as we have currently).
self._qtWidget().document().setDefaultStyleSheet(
"""
h1 { font-weight : bold; font-size : large; }
h1[class="ERROR"] { color : #ff5555 }
h1[class="WARNING"] { color : #ffb655 }
h1[class="INFO"] { color : #80b3ff }
h1[class="DEBUG"] { color : #aaffcc }
body { color : red }
pre[class="message"] { color : #999999 }
"""
)
self.setText( text )
self.setEditable( editable )
self.setWrapMode( wrapMode )
self.setFixedLineHeight( fixedLineHeight )
self.setRole( role )
self.dragEnterSignal().connect( Gaffer.WeakMethod( self.__dragEnter ), scoped = False )
self.dragMoveSignal().connect( Gaffer.WeakMethod( self.__dragMove ), scoped = False )
self.dragLeaveSignal().connect( Gaffer.WeakMethod( self.__dragLeave ), scoped = False )
self.dropSignal().connect( Gaffer.WeakMethod( self.__drop ), scoped = False )
self._qtWidget().setTabStopWidth( 20 ) # pixels
def getText( self ) :
if six.PY3 :
return self._qtWidget().toPlainText()
else :
# \todo We didn't return `unicode` here because
# we didn't want to break any client code. But perhaps
# now is the time, since everyone is transitioning to
# Python 3?
return self._qtWidget().toPlainText().encode( "utf-8" )
def setText( self, text ) :
if text == self.getText() :
return
return self._qtWidget().setPlainText( text )
## Inserts at the current cursor position.
def insertText( self, text ) :
self._qtWidget().insertPlainText( text )
def appendText( self, text ) :
self._qtWidget().appendPlainText( text )
## Appends HTML-formatted text - when links within
# this are clicked, the linkActivatedSignal will be
# triggered.
def appendHTML( self, html ) :
self._qtWidget().appendHtml( html )
def setEditable( self, editable ) :
self._qtWidget().setReadOnly( not editable )
self._repolish()
def getEditable( self ) :
return not self._qtWidget().isReadOnly()
def setWrapMode( self, wrapMode ) :
self._qtWidget().setWordWrapMode(
{
self.WrapMode.None_ : QtGui.QTextOption.NoWrap,
self.WrapMode.Word : QtGui.QTextOption.WordWrap,
self.WrapMode.Character : QtGui.QTextOption.WrapAnywhere,
self.WrapMode.WordOrCharacter : QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere,
}[wrapMode]
)
def getWrapMode( self ) :
return {
QtGui.QTextOption.NoWrap : self.WrapMode.None_,
QtGui.QTextOption.WordWrap : self.WrapMode.Word,
QtGui.QTextOption.WrapAnywhere : self.WrapMode.Character,
QtGui.QTextOption.WrapAtWordBoundaryOrAnywhere : self.WrapMode.WordOrCharacter,
}[self._qtWidget().wordWrapMode()]
def setFixedLineHeight( self, fixedLineHeight ) :
self._qtWidget().setFixedLineHeight( fixedLineHeight )
def getFixedLineHeight( self ) :
return self._qtWidget().getFixedLineHeight()
def setErrored( self, errored ) :
if errored == self.getErrored() :
return
self._qtWidget().setProperty( "gafferError", GafferUI._Variant.toVariant( bool( errored ) ) )
self._repolish()
def getErrored( self ) :
return GafferUI._Variant.fromVariant( self._qtWidget().property( "gafferError" ) ) or False
def setCursorPosition( self, position ) :
cursor = self._qtWidget().textCursor()
cursor.setPosition( position )
self._qtWidget().setTextCursor( cursor )
def getCursorPosition( self ) :
return self._qtWidget().textCursor().position()
def cursorPositionAt( self, position ) :
return self._qtWidget().cursorForPosition(
QtCore.QPoint( position[0], position[1] )
).position()
def selectedText( self ) :
cursor = self._qtWidget().textCursor()
text = cursor.selection().toPlainText()
if six.PY3 :
return text
else :
return text.encode( "utf-8" )
def linkAt( self, position ) :
link = self._qtWidget().anchorAt( QtCore.QPoint( position[0], position[1] ) )
return str( link )
def textChangedSignal( self ) :
try :
return self.__textChangedSignal
except :
self.__textChangedSignal = GafferUI.WidgetSignal()
self._qtWidget().textChanged.connect( Gaffer.WeakMethod( self.__textChanged ) )
return self.__textChangedSignal
## \todo Should this be at the Widget level?
# QWidgets aren't focussable by default so it's
# up for debate. setFocussed( True ) could make
# them focussable, but then the question is should
# setFocussed( False ) make them unfocussable again?
# Or maybe the first connection to keyPressSignal() should
# make them focussable?
## \todo If we don't move this to Widget, then
# at least make TextWidget match this interface (it
# currently has grabFocus())
def setFocussed( self, focussed ) :
if focussed == self.getFocussed() :
return
if focussed :
self._qtWidget().setFocus()
else :
self._qtWidget().clearFocus()
def getFocussed( self ) :
return self._qtWidget().hasFocus()
def setRole( self, role ) :
if role == self.getRole() :
return
self._qtWidget().setProperty( "gafferRole", GafferUI._Variant.toVariant( str( role ) ) )
self._repolish()
def getRole( self ) :
role = GafferUI._Variant.fromVariant( self._qtWidget().property( "gafferRole" ) )
if role is None :
return self.Role.Text
return getattr( self.Role, role )
## A signal emitted when the widget loses focus.
def editingFinishedSignal( self ) :
try :
return self.__editingFinishedSignal
except :
self.__editingFinishedSignal = GafferUI.WidgetSignal()
self._qtWidget().installEventFilter( _focusOutEventFilter )
return self.__editingFinishedSignal
## A signal emitted when enter (or Ctrl-Return) is pressed.
def activatedSignal( self ) :
try :
return self.__activatedSignal
except :
self.__activatedSignal = GafferUI.WidgetSignal()
self.keyPressSignal().connect( Gaffer.WeakMethod( self.__keyPress ), scoped = False )
return self.__activatedSignal
def linkActivatedSignal( self ) :
try :
return self.__linkActivatedSignal
except :
self.__linkActivatedSignal = GafferUI.WidgetEventSignal()
self.mouseMoveSignal().connect( Gaffer.WeakMethod( self.__mouseMove ), scoped = False )
self.buttonPressSignal().connect( Gaffer.WeakMethod( self.__buttonPress ), scoped = False )
return self.__linkActivatedSignal
## A signal emitted when the widget wants to generate some text
# to be inserted from a drag/drop operation. Signature is
# ( widget, dragData ). By default, only StringData is accepted,
# but by connecting to this signal and returning an appropriate
# string value based on dragData, any other type can be
# accommodated.
def dropTextSignal( self ) :
try :
return self.__dropTextSignal
except :
self.__dropTextSignal = Gaffer.Signal2()
return self.__dropTextSignal
def __textChanged( self ) :
self.__textChangedSignal( self )
def __keyPress( self, widget, event ) :
assert( widget is self )
if event.key=="Enter" or ( event.key=="Return" and event.modifiers==event.Modifiers.Control ) :
self.__activatedSignal( self )
return True
return False
def __mouseMove( self, widget, event ) :
link = self.linkAt( event.line.p0 )
if link :
self._qtWidget().viewport().setCursor( QtGui.QCursor( QtCore.Qt.PointingHandCursor ) )
else :
self._qtWidget().viewport().setCursor( QtGui.QCursor( QtCore.Qt.IBeamCursor ) )
return False
def __buttonPress( self, widget, event ) :
if event.buttons & GafferUI.ButtonEvent.Buttons.Left :
link = self.linkAt( event.line.p0 )
if link :
return self.__linkActivatedSignal( self, link )
return False
def __dropText( self, dragData ) :
signal = None
with IECore.IgnoredExceptions( AttributeError ) :
signal = self.__dropTextSignal
text = None
if signal is not None :
text = signal( self, dragData )
if text is None and isinstance( dragData, IECore.StringData ) :
text = dragData.value
return text
def __dragEnter( self, widget, event ) :
if not self.getEditable() :
return False
if self.__dropText( event.data ) is not None :
self.setFocussed( True )
return True
return False
def __dragMove( self, widget, event ) :
cursorPosition = self.cursorPositionAt( event.line.p0 )
self.setCursorPosition( cursorPosition )
return True
def __dragLeave( self, widget, event ) :
self.setFocussed( False )
def __drop( self, widget, event ) :
self.insertText( self.__dropText( event.data ) )
class _PlainTextEdit( QtWidgets.QPlainTextEdit ) :
def __init__( self, parent = None ) :
QtWidgets.QPlainTextEdit.__init__( self, parent )
self.__fixedLineHeight = None
self.__widgetFullyBuilt = False
def setFixedLineHeight( self, fixedLineHeight ) :
self.__fixedLineHeight = fixedLineHeight
self.setSizePolicy(
self.sizePolicy().horizontalPolicy(),
QtWidgets.QSizePolicy.Expanding if self.__fixedLineHeight is None else QtWidgets.QSizePolicy.Fixed
)
self.updateGeometry()
def getFixedLineHeight( self ) :
return self.__fixedLineHeight
def __computeHeight( self, size ) :
fixedLineHeight = self.getFixedLineHeight()
# when the multiline is displaying fixed lines
if fixedLineHeight is not None :
# computing the font metrics based on the number of lines
height = self.fontMetrics().boundingRect( "M" ).height() * fixedLineHeight
# also, we need to compute the widget margins to frame the fixed lines nicely
margin = self.contentsMargins().top() + self.contentsMargins().bottom() + self.document().documentMargin()
height += margin
size.setHeight(height)
return size
def sizeHint( self ) :
size = QtWidgets.QPlainTextEdit.sizeHint( self )
return self.__computeHeight( size )
def minimumSizeHint( self ) :
size = QtWidgets.QPlainTextEdit.minimumSizeHint( self )
return self.__computeHeight( size )
def event( self, event ) :
if event.type() == event.ShortcutOverride and event == QtGui.QKeySequence.Copy :
# QPlainTextEdit doesn't accept this when it's
# read only. so we accept it ourselves, which is
# enough to reenable copying from a read only
# widget with Ctrl+C.
event.accept()
return True
return QtWidgets.QPlainTextEdit.event( self, event )
class _FocusOutEventFilter( QtCore.QObject ) :
def __init__( self ) :
QtCore.QObject.__init__( self )
def eventFilter( self, qObject, qEvent ) :
if qEvent.type()==QtCore.QEvent.FocusOut :
widget = GafferUI.Widget._owner( qObject )
if widget is not None :
widget.editingFinishedSignal()( widget )
return False
# this single instance is used by all MultiLineTextWidgets
_focusOutEventFilter = _FocusOutEventFilter()
|
|
#!/usr/bin/env python
import logging
import re
import sys
from collections import defaultdict
from lxml.etree import tostring
from lxml.etree import tounicode
from lxml.html import document_fromstring
from lxml.html import fragment_fromstring
from cleaners import clean_attributes
from cleaners import html_cleaner
from htmls import build_doc
from htmls import get_body
from htmls import get_title
from htmls import shorten_title
logging.basicConfig(level=logging.INFO)
log = logging.getLogger()
REGEXES = {
'unlikelyCandidatesRe': re.compile('combx|comment|community|disqus|extra|foot|header|menu|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate|pagination|pager|popup|tweet|twitter', re.I),
'okMaybeItsACandidateRe': re.compile('and|article|body|column|main|shadow', re.I),
'positiveRe': re.compile('article|body|content|entry|hentry|main|page|pagination|post|text|blog|story', re.I),
'negativeRe': re.compile('combx|comment|com-|contact|foot|footer|footnote|masthead|media|meta|outbrain|promo|related|scroll|shoutbox|sidebar|sponsor|shopping|tags|tool|widget', re.I),
'divToPElementsRe': re.compile('<(a|blockquote|dl|div|img|ol|p|pre|table|ul)', re.I),
#'replaceBrsRe': re.compile('(<br[^>]*>[ \n\r\t]*){2,}',re.I),
#'replaceFontsRe': re.compile('<(\/?)font[^>]*>',re.I),
#'trimRe': re.compile('^\s+|\s+$/'),
#'normalizeRe': re.compile('\s{2,}/'),
#'killBreaksRe': re.compile('(<br\s*\/?>(\s| ?)*){1,}/'),
#'videoRe': re.compile('http:\/\/(www\.)?(youtube|vimeo)\.com', re.I),
#skipFootnoteLink: /^\s*(\[?[a-z0-9]{1,2}\]?|^|edit|citation needed)\s*$/i,
}
class Unparseable(ValueError):
pass
def describe(node, depth=1):
if not hasattr(node, 'tag'):
return "[%s]" % type(node)
name = node.tag
if node.get('id', ''):
name += '#' + node.get('id')
if node.get('class', ''):
name += '.' + node.get('class').replace(' ', '.')
if name[:4] in ['div#', 'div.']:
name = name[3:]
if depth and node.getparent() is not None:
return name + ' - ' + describe(node.getparent(), depth - 1)
return name
def to_int(x):
if not x:
return None
x = x.strip()
if x.endswith('px'):
return int(x[:-2])
if x.endswith('em'):
return int(x[:-2]) * 12
return int(x)
def clean(text):
text = re.sub('\s*\n\s*', '\n', text)
text = re.sub('[ \t]{2,}', ' ', text)
return text.strip()
def text_length(i):
return len(clean(i.text_content() or ""))
class Document:
"""Class to build a etree document out of html."""
TEXT_LENGTH_THRESHOLD = 25
RETRY_LENGTH = 250
def __init__(self, input, **options):
"""Generate the document
:param input: string of the html content.
kwargs:
- attributes:
- debug: output debug messages
- min_text_length:
- retry_length:
- url: will allow adjusting links to be absolute
"""
self.input = input
self.options = options
self.html = None
def _html(self, force=False):
if force or self.html is None:
self.html = self._parse(self.input)
return self.html
def _parse(self, input):
doc = build_doc(input)
doc = html_cleaner.clean_html(doc)
base_href = self.options.get('url', None)
if base_href:
doc.make_links_absolute(base_href, resolve_base_href=True)
else:
doc.resolve_base_href()
return doc
def content(self):
return get_body(self._html(True))
def title(self):
return get_title(self._html(True))
def short_title(self):
return shorten_title(self._html(True))
def summary(self, html_partial=False):
"""Generate the summary of the html docuemnt
:param html_partial: return only the div of the document, don't wrap
in html and body tags.
"""
try:
ruthless = True
while True:
self._html(True)
for i in self.tags(self.html, 'script', 'style'):
i.drop_tree()
for i in self.tags(self.html, 'body'):
i.set('id', 'readabilityBody')
if ruthless:
self.remove_unlikely_candidates()
self.transform_misused_divs_into_paragraphs()
candidates = self.score_paragraphs()
best_candidate = self.select_best_candidate(candidates)
if best_candidate:
article = self.get_article(candidates, best_candidate,
html_partial=html_partial)
else:
if ruthless:
log.debug("ruthless removal did not work. ")
ruthless = False
self.debug(
("ended up stripping too much - "
"going for a safer _parse"))
# try again
continue
else:
log.debug(
("Ruthless and lenient parsing did not work. "
"Returning raw html"))
article = self.html.find('body')
if article is None:
article = self.html
cleaned_article = self.sanitize(article, candidates)
article_length = len(cleaned_article or '')
retry_length = self.options.get(
'retry_length',
self.RETRY_LENGTH)
of_acceptable_length = article_length >= retry_length
if ruthless and not of_acceptable_length:
ruthless = False
# Loop through and try again.
continue
else:
return cleaned_article
except StandardError, e:
log.exception('error getting summary: ')
raise Unparseable(str(e)), None, sys.exc_info()[2]
def get_article(self, candidates, best_candidate, html_partial=False):
# Now that we have the top candidate, look through its siblings for
# content that might also be related.
# Things like preambles, content split by ads that we removed, etc.
sibling_score_threshold = max([
10,
best_candidate['content_score'] * 0.2])
# create a new html document with a html->body->div
if html_partial:
output = fragment_fromstring('<div/>')
else:
output = document_fromstring('<div/>')
best_elem = best_candidate['elem']
for sibling in best_elem.getparent().getchildren():
# in lxml there no concept of simple text
# if isinstance(sibling, NavigableString): continue
append = False
if sibling is best_elem:
append = True
sibling_key = sibling # HashableElement(sibling)
if sibling_key in candidates and \
candidates[sibling_key]['content_score'] >= sibling_score_threshold:
append = True
if sibling.tag == "p":
link_density = self.get_link_density(sibling)
node_content = sibling.text or ""
node_length = len(node_content)
if node_length > 80 and link_density < 0.25:
append = True
elif node_length <= 80 \
and link_density == 0 \
and re.search('\.( |$)', node_content):
append = True
if append:
# We don't want to append directly to output, but the div
# in html->body->div
if html_partial:
output.append(sibling)
else:
output.getchildren()[0].getchildren()[0].append(sibling)
#if output is not None:
# output.append(best_elem)
return output
def select_best_candidate(self, candidates):
sorted_candidates = sorted(candidates.values(), key=lambda x: x['content_score'], reverse=True)
for candidate in sorted_candidates[:5]:
elem = candidate['elem']
self.debug("Top 5 : %6.3f %s" % (
candidate['content_score'],
describe(elem)))
if len(sorted_candidates) == 0:
return None
best_candidate = sorted_candidates[0]
return best_candidate
def get_link_density(self, elem):
link_length = 0
for i in elem.findall(".//a"):
link_length += text_length(i)
#if len(elem.findall(".//div") or elem.findall(".//p")):
# link_length = link_length
total_length = text_length(elem)
return float(link_length) / max(total_length, 1)
def score_paragraphs(self, ):
MIN_LEN = self.options.get(
'min_text_length',
self.TEXT_LENGTH_THRESHOLD)
candidates = {}
ordered = []
for elem in self.tags(self._html(), "p", "pre", "td"):
parent_node = elem.getparent()
if parent_node is None:
continue
grand_parent_node = parent_node.getparent()
inner_text = clean(elem.text_content() or "")
inner_text_len = len(inner_text)
# If this paragraph is less than 25 characters
# don't even count it.
if inner_text_len < MIN_LEN:
continue
if parent_node not in candidates:
candidates[parent_node] = self.score_node(parent_node)
ordered.append(parent_node)
if grand_parent_node is not None and grand_parent_node not in candidates:
candidates[grand_parent_node] = self.score_node(
grand_parent_node)
ordered.append(grand_parent_node)
content_score = 1
content_score += len(inner_text.split(','))
content_score += min((inner_text_len / 100), 3)
#if elem not in candidates:
# candidates[elem] = self.score_node(elem)
#WTF? candidates[elem]['content_score'] += content_score
candidates[parent_node]['content_score'] += content_score
if grand_parent_node is not None:
candidates[grand_parent_node]['content_score'] += content_score / 2.0
# Scale the final candidates score based on link density. Good content
# should have a relatively small link density (5% or less) and be
# mostly unaffected by this operation.
for elem in ordered:
candidate = candidates[elem]
ld = self.get_link_density(elem)
score = candidate['content_score']
self.debug("Candid: %6.3f %s link density %.3f -> %6.3f" % (
score,
describe(elem),
ld,
score * (1 - ld)))
candidate['content_score'] *= (1 - ld)
return candidates
def class_weight(self, e):
weight = 0
if e.get('class', None):
if REGEXES['negativeRe'].search(e.get('class')):
weight -= 25
if REGEXES['positiveRe'].search(e.get('class')):
weight += 25
if e.get('id', None):
if REGEXES['negativeRe'].search(e.get('id')):
weight -= 25
if REGEXES['positiveRe'].search(e.get('id')):
weight += 25
return weight
def score_node(self, elem):
content_score = self.class_weight(elem)
name = elem.tag.lower()
if name == "div":
content_score += 5
elif name in ["pre", "td", "blockquote"]:
content_score += 3
elif name in ["address", "ol", "ul", "dl", "dd", "dt", "li", "form"]:
content_score -= 3
elif name in ["h1", "h2", "h3", "h4", "h5", "h6", "th"]:
content_score -= 5
return {
'content_score': content_score,
'elem': elem
}
def debug(self, *a):
if self.options.get('debug', False):
log.debug(*a)
def remove_unlikely_candidates(self):
for elem in self.html.iter():
s = "%s %s" % (elem.get('class', ''), elem.get('id', ''))
if len(s) < 2:
continue
#self.debug(s)
if REGEXES['unlikelyCandidatesRe'].search(s) and (not REGEXES['okMaybeItsACandidateRe'].search(s)) and elem.tag not in ['html', 'body']:
self.debug("Removing unlikely candidate - %s" % describe(elem))
elem.drop_tree()
def transform_misused_divs_into_paragraphs(self):
for elem in self.tags(self.html, 'div'):
# transform <div>s that do not contain other block elements into
# <p>s
#FIXME: The current implementation ignores all descendants that
# are not direct children of elem
# This results in incorrect results in case there is an <img>
# buried within an <a> for example
if not REGEXES['divToPElementsRe'].search(
unicode(''.join(map(tostring, list(elem))))):
#self.debug("Altering %s to p" % (describe(elem)))
elem.tag = "p"
#print "Fixed element "+describe(elem)
for elem in self.tags(self.html, 'div'):
if elem.text and elem.text.strip():
p = fragment_fromstring('<p/>')
p.text = elem.text
elem.text = None
elem.insert(0, p)
#print "Appended "+tounicode(p)+" to "+describe(elem)
for pos, child in reversed(list(enumerate(elem))):
if child.tail and child.tail.strip():
p = fragment_fromstring('<p/>')
p.text = child.tail
child.tail = None
elem.insert(pos + 1, p)
#print "Inserted "+tounicode(p)+" to "+describe(elem)
if child.tag == 'br':
#print 'Dropped <br> at '+describe(elem)
child.drop_tree()
def tags(self, node, *tag_names):
for tag_name in tag_names:
for e in node.findall('.//%s' % tag_name):
yield e
def reverse_tags(self, node, *tag_names):
for tag_name in tag_names:
for e in reversed(node.findall('.//%s' % tag_name)):
yield e
def sanitize(self, node, candidates):
MIN_LEN = self.options.get('min_text_length',
self.TEXT_LENGTH_THRESHOLD)
for header in self.tags(node, "h1", "h2", "h3", "h4", "h5", "h6"):
if self.class_weight(header) < 0 or self.get_link_density(header) > 0.33:
header.drop_tree()
for elem in self.tags(node, "form", "iframe", "textarea"):
elem.drop_tree()
allowed = {}
# Conditionally clean <table>s, <ul>s, and <div>s
for el in self.reverse_tags(node, "table", "ul", "div"):
if el in allowed:
continue
weight = self.class_weight(el)
if el in candidates:
content_score = candidates[el]['content_score']
#print '!',el, '-> %6.3f' % content_score
else:
content_score = 0
tag = el.tag
if weight + content_score < 0:
self.debug("Cleaned %s with score %6.3f and weight %-3s" %
(describe(el), content_score, weight, ))
el.drop_tree()
elif el.text_content().count(",") < 10:
counts = {}
for kind in ['p', 'img', 'li', 'a', 'embed', 'input']:
counts[kind] = len(el.findall('.//%s' % kind))
counts["li"] -= 100
# Count the text length excluding any surrounding whitespace
content_length = text_length(el)
link_density = self.get_link_density(el)
parent_node = el.getparent()
if parent_node is not None:
if parent_node in candidates:
content_score = candidates[parent_node]['content_score']
else:
content_score = 0
#if parent_node is not None:
#pweight = self.class_weight(parent_node) + content_score
#pname = describe(parent_node)
#else:
#pweight = 0
#pname = "no parent"
to_remove = False
reason = ""
#if el.tag == 'div' and counts["img"] >= 1:
# continue
if counts["p"] and counts["img"] > counts["p"]:
reason = "too many images (%s)" % counts["img"]
to_remove = True
elif counts["li"] > counts["p"] and tag != "ul" and tag != "ol":
reason = "more <li>s than <p>s"
to_remove = True
elif counts["input"] > (counts["p"] / 3):
reason = "less than 3x <p>s than <input>s"
to_remove = True
elif content_length < (MIN_LEN) and (counts["img"] == 0 or counts["img"] > 2):
reason = "too short content length %s without a single image" % content_length
to_remove = True
elif weight < 25 and link_density > 0.2:
reason = "too many links %.3f for its weight %s" % (
link_density, weight)
to_remove = True
elif weight >= 25 and link_density > 0.5:
reason = "too many links %.3f for its weight %s" % (
link_density, weight)
to_remove = True
elif (counts["embed"] == 1 and content_length < 75) or counts["embed"] > 1:
reason = "<embed>s with too short content length, or too many <embed>s"
to_remove = True
# if el.tag == 'div' and counts['img'] >= 1 and to_remove:
# imgs = el.findall('.//img')
# valid_img = False
# self.debug(tounicode(el))
# for img in imgs:
#
# height = img.get('height')
# text_length = img.get('text_length')
# self.debug ("height %s text_length %s" %(repr(height), repr(text_length)))
# if to_int(height) >= 100 or to_int(text_length) >= 100:
# valid_img = True
# self.debug("valid image" + tounicode(img))
# break
# if valid_img:
# to_remove = False
# self.debug("Allowing %s" %el.text_content())
# for desnode in self.tags(el, "table", "ul", "div"):
# allowed[desnode] = True
#find x non empty preceding and succeeding siblings
i, j = 0, 0
x = 1
siblings = []
for sib in el.itersiblings():
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
i =+ 1
siblings.append(sib_content_length)
if i == x:
break
for sib in el.itersiblings(preceding=True):
#self.debug(sib.text_content())
sib_content_length = text_length(sib)
if sib_content_length:
j =+ 1
siblings.append(sib_content_length)
if j == x:
break
#self.debug(str(siblings))
if siblings and sum(siblings) > 1000:
to_remove = False
self.debug("Allowing %s" % describe(el))
for desnode in self.tags(el, "table", "ul", "div"):
allowed[desnode] = True
if to_remove:
self.debug("Cleaned %6.3f %s with weight %s cause it has %s." %
(content_score, describe(el), weight, reason))
#print tounicode(el)
#self.debug("pname %s pweight %.3f" %(pname, pweight))
el.drop_tree()
for el in ([node] + [n for n in node.iter()]):
if not self.options.get('attributes', None):
#el.attrib = {} #FIXME:Checkout the effects of disabling this
pass
return clean_attributes(tounicode(node))
class HashableElement():
def __init__(self, node):
self.node = node
self._path = None
def _get_path(self):
if self._path is None:
reverse_path = []
node = self.node
while node is not None:
node_id = (node.tag, tuple(node.attrib.items()), node.text)
reverse_path.append(node_id)
node = node.getparent()
self._path = tuple(reverse_path)
return self._path
path = property(_get_path)
def __hash__(self):
return hash(self.path)
def __eq__(self, other):
return self.path == other.path
def __getattr__(self, tag):
return getattr(self.node, tag)
def main():
from optparse import OptionParser
parser = OptionParser(usage="%prog: [options] [file]")
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option('-u', '--url', default=None, help="use URL instead of a local file")
(options, args) = parser.parse_args()
if not (len(args) == 1 or options.url):
parser.print_help()
sys.exit(1)
file = None
if options.url:
import urllib
file = urllib.urlopen(options.url)
else:
file = open(args[0], 'rt')
enc = sys.__stdout__.encoding or 'utf-8'
try:
print Document(file.read(),
debug=options.verbose,
url=options.url).summary().encode(enc, 'replace')
finally:
file.close()
if __name__ == '__main__':
main()
|
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
from django.core.paginator import Paginator as DjangoPaginator
from django.db import models
from django.test import TestCase
from rest_framework import (
exceptions, filters, generics, pagination, serializers, status
)
from rest_framework.pagination import PAGE_BREAK, PageLink
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
class TestPaginationIntegration:
"""
Integration tests.
"""
def setup(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
class EvenItemsOnly(filters.BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
return [item for item in queryset if item % 2 == 0]
class BasicPagination(pagination.PageNumberPagination):
page_size = 5
page_size_query_param = 'page_size'
max_page_size = 20
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=range(1, 101),
filter_backends=[EvenItemsOnly],
pagination_class=BasicPagination
)
def test_filtered_items_are_paginated(self):
request = factory.get('/', {'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/',
'next': 'http://testserver/?page=3',
'count': 50
}
def test_setting_page_size(self):
"""
When 'paginate_by_param' is set, the client may choose a page size.
"""
request = factory.get('/', {'page_size': 10})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20],
'previous': None,
'next': 'http://testserver/?page=2&page_size=10',
'count': 50
}
def test_setting_page_size_over_maximum(self):
"""
When page_size parameter exceeds maximum allowable,
then it should be capped to the maximum.
"""
request = factory.get('/', {'page_size': 1000})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [
2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30, 32, 34, 36, 38, 40
],
'previous': None,
'next': 'http://testserver/?page=2&page_size=1000',
'count': 50
}
def test_setting_page_size_to_zero(self):
"""
When page_size parameter is invalid it should return to the default.
"""
request = factory.get('/', {'page_size': 0})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [2, 4, 6, 8, 10],
'previous': None,
'next': 'http://testserver/?page=2&page_size=0',
'count': 50
}
def test_additional_query_params_are_preserved(self):
request = factory.get('/', {'page': 2, 'filter': 'even'})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/?filter=even',
'next': 'http://testserver/?filter=even&page=3',
'count': 50
}
def test_empty_query_params_are_preserved(self):
request = factory.get('/', {'page': 2, 'filter': ''})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == {
'results': [12, 14, 16, 18, 20],
'previous': 'http://testserver/?filter=',
'next': 'http://testserver/?filter=&page=3',
'count': 50
}
def test_404_not_found_for_zero_page(self):
request = factory.get('/', {'page': '0'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page.'
}
def test_404_not_found_for_invalid_page(self):
request = factory.get('/', {'page': 'invalid'})
response = self.view(request)
assert response.status_code == status.HTTP_404_NOT_FOUND
assert response.data == {
'detail': 'Invalid page.'
}
class TestPaginationDisabledIntegration:
"""
Integration tests for disabled pagination.
"""
def setup(self):
class PassThroughSerializer(serializers.BaseSerializer):
def to_representation(self, item):
return item
self.view = generics.ListAPIView.as_view(
serializer_class=PassThroughSerializer,
queryset=range(1, 101),
pagination_class=None
)
def test_unpaginated_list(self):
request = factory.get('/', {'page': 2})
response = self.view(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == list(range(1, 101))
class TestPageNumberPagination:
"""
Unit tests for `pagination.PageNumberPagination`.
"""
def setup(self):
class ExamplePagination(pagination.PageNumberPagination):
page_size = 5
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_html_context(self):
return self.pagination.get_html_context()
def test_no_page_number(self):
request = Request(factory.get('/'))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [1, 2, 3, 4, 5]
assert content == {
'results': [1, 2, 3, 4, 5],
'previous': None,
'next': 'http://testserver/?page=2',
'count': 100
}
assert context == {
'previous_url': None,
'next_url': 'http://testserver/?page=2',
'page_links': [
PageLink('http://testserver/', 1, True, False),
PageLink('http://testserver/?page=2', 2, False, False),
PageLink('http://testserver/?page=3', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=20', 20, False, False),
]
}
assert self.pagination.display_page_controls
assert isinstance(self.pagination.to_html(), type(''))
def test_second_page(self):
request = Request(factory.get('/', {'page': 2}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [6, 7, 8, 9, 10]
assert content == {
'results': [6, 7, 8, 9, 10],
'previous': 'http://testserver/',
'next': 'http://testserver/?page=3',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/',
'next_url': 'http://testserver/?page=3',
'page_links': [
PageLink('http://testserver/', 1, False, False),
PageLink('http://testserver/?page=2', 2, True, False),
PageLink('http://testserver/?page=3', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=20', 20, False, False),
]
}
def test_last_page(self):
request = Request(factory.get('/', {'page': 'last'}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [96, 97, 98, 99, 100]
assert content == {
'results': [96, 97, 98, 99, 100],
'previous': 'http://testserver/?page=19',
'next': None,
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?page=19',
'next_url': None,
'page_links': [
PageLink('http://testserver/', 1, False, False),
PAGE_BREAK,
PageLink('http://testserver/?page=18', 18, False, False),
PageLink('http://testserver/?page=19', 19, False, False),
PageLink('http://testserver/?page=20', 20, True, False),
]
}
def test_invalid_page(self):
request = Request(factory.get('/', {'page': 'invalid'}))
with pytest.raises(exceptions.NotFound):
self.paginate_queryset(request)
class TestPageNumberPaginationOverride:
"""
Unit tests for `pagination.PageNumberPagination`.
the Django Paginator Class is overridden.
"""
def setup(self):
class OverriddenDjangoPaginator(DjangoPaginator):
# override the count in our overridden Django Paginator
# we will only return one page, with one item
count = 1
class ExamplePagination(pagination.PageNumberPagination):
django_paginator_class = OverriddenDjangoPaginator
page_size = 5
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_html_context(self):
return self.pagination.get_html_context()
def test_no_page_number(self):
request = Request(factory.get('/'))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [1]
assert content == {
'results': [1, ],
'previous': None,
'next': None,
'count': 1
}
assert context == {
'previous_url': None,
'next_url': None,
'page_links': [
PageLink('http://testserver/', 1, True, False),
]
}
assert not self.pagination.display_page_controls
assert isinstance(self.pagination.to_html(), type(''))
def test_invalid_page(self):
request = Request(factory.get('/', {'page': 'invalid'}))
with pytest.raises(exceptions.NotFound):
self.paginate_queryset(request)
class TestLimitOffset:
"""
Unit tests for `pagination.LimitOffsetPagination`.
"""
def setup(self):
class ExamplePagination(pagination.LimitOffsetPagination):
default_limit = 10
max_limit = 15
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_html_context(self):
return self.pagination.get_html_context()
def test_no_offset(self):
request = Request(factory.get('/', {'limit': 5}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [1, 2, 3, 4, 5]
assert content == {
'results': [1, 2, 3, 4, 5],
'previous': None,
'next': 'http://testserver/?limit=5&offset=5',
'count': 100
}
assert context == {
'previous_url': None,
'next_url': 'http://testserver/?limit=5&offset=5',
'page_links': [
PageLink('http://testserver/?limit=5', 1, True, False),
PageLink('http://testserver/?limit=5&offset=5', 2, False, False),
PageLink('http://testserver/?limit=5&offset=10', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
assert self.pagination.display_page_controls
assert isinstance(self.pagination.to_html(), type(''))
def test_single_offset(self):
"""
When the offset is not a multiple of the limit we get some edge cases:
* The first page should still be offset zero.
* We may end up displaying an extra page in the pagination control.
"""
request = Request(factory.get('/', {'limit': 5, 'offset': 1}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [2, 3, 4, 5, 6]
assert content == {
'results': [2, 3, 4, 5, 6],
'previous': 'http://testserver/?limit=5',
'next': 'http://testserver/?limit=5&offset=6',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5',
'next_url': 'http://testserver/?limit=5&offset=6',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=1', 2, True, False),
PageLink('http://testserver/?limit=5&offset=6', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=96', 21, False, False),
]
}
def test_first_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 5}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [6, 7, 8, 9, 10]
assert content == {
'results': [6, 7, 8, 9, 10],
'previous': 'http://testserver/?limit=5',
'next': 'http://testserver/?limit=5&offset=10',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5',
'next_url': 'http://testserver/?limit=5&offset=10',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=5', 2, True, False),
PageLink('http://testserver/?limit=5&offset=10', 3, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
def test_middle_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 10}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [11, 12, 13, 14, 15]
assert content == {
'results': [11, 12, 13, 14, 15],
'previous': 'http://testserver/?limit=5&offset=5',
'next': 'http://testserver/?limit=5&offset=15',
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5&offset=5',
'next_url': 'http://testserver/?limit=5&offset=15',
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PageLink('http://testserver/?limit=5&offset=5', 2, False, False),
PageLink('http://testserver/?limit=5&offset=10', 3, True, False),
PageLink('http://testserver/?limit=5&offset=15', 4, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=95', 20, False, False),
]
}
def test_ending_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 95}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
context = self.get_html_context()
assert queryset == [96, 97, 98, 99, 100]
assert content == {
'results': [96, 97, 98, 99, 100],
'previous': 'http://testserver/?limit=5&offset=90',
'next': None,
'count': 100
}
assert context == {
'previous_url': 'http://testserver/?limit=5&offset=90',
'next_url': None,
'page_links': [
PageLink('http://testserver/?limit=5', 1, False, False),
PAGE_BREAK,
PageLink('http://testserver/?limit=5&offset=85', 18, False, False),
PageLink('http://testserver/?limit=5&offset=90', 19, False, False),
PageLink('http://testserver/?limit=5&offset=95', 20, True, False),
]
}
def test_erronous_offset(self):
request = Request(factory.get('/', {'limit': 5, 'offset': 1000}))
queryset = self.paginate_queryset(request)
self.get_paginated_content(queryset)
self.get_html_context()
def test_invalid_offset(self):
"""
An invalid offset query param should be treated as 0.
"""
request = Request(factory.get('/', {'limit': 5, 'offset': 'invalid'}))
queryset = self.paginate_queryset(request)
assert queryset == [1, 2, 3, 4, 5]
def test_invalid_limit(self):
"""
An invalid limit query param should be ignored in favor of the default.
"""
request = Request(factory.get('/', {'limit': 'invalid', 'offset': 0}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_limit = self.pagination.default_limit
next_offset = self.pagination.default_limit
next_url = 'http://testserver/?limit={0}&offset={1}'.format(next_limit, next_offset)
assert queryset == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert content.get('next') == next_url
def test_zero_limit(self):
"""
An zero limit query param should be ignored in favor of the default.
"""
request = Request(factory.get('/', {'limit': 0, 'offset': 0}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_limit = self.pagination.default_limit
next_offset = self.pagination.default_limit
next_url = 'http://testserver/?limit={0}&offset={1}'.format(next_limit, next_offset)
assert queryset == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert content.get('next') == next_url
def test_max_limit(self):
"""
The limit defaults to the max_limit when there is a max_limit and the
requested limit is greater than the max_limit
"""
offset = 50
request = Request(factory.get('/', {'limit': '11235', 'offset': offset}))
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
max_limit = self.pagination.max_limit
next_offset = offset + max_limit
prev_offset = offset - max_limit
base_url = 'http://testserver/?limit={0}'.format(max_limit)
next_url = base_url + '&offset={0}'.format(next_offset)
prev_url = base_url + '&offset={0}'.format(prev_offset)
assert queryset == list(range(51, 66))
assert content.get('next') == next_url
assert content.get('previous') == prev_url
class CursorPaginationTestsMixin:
def test_invalid_cursor(self):
request = Request(factory.get('/', {'cursor': '123'}))
with pytest.raises(exceptions.NotFound):
self.pagination.paginate_queryset(self.queryset, request)
def test_use_with_ordering_filter(self):
class MockView:
filter_backends = (filters.OrderingFilter,)
ordering_fields = ['username', 'created']
ordering = 'created'
request = Request(factory.get('/', {'ordering': 'username'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('username',)
request = Request(factory.get('/', {'ordering': '-username'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('-username',)
request = Request(factory.get('/', {'ordering': 'invalid'}))
ordering = self.pagination.get_ordering(request, [], MockView())
assert ordering == ('created',)
def test_cursor_pagination(self):
(previous, current, next, previous_url, next_url) = self.get_pages('/')
assert previous is None
assert current == [1, 1, 1, 1, 1]
assert next == [1, 2, 3, 4, 4]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [1, 1, 1, 1, 1]
assert current == [1, 2, 3, 4, 4]
assert next == [4, 4, 5, 6, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [1, 2, 3, 4, 4]
assert current == [4, 4, 5, 6, 7]
assert next == [7, 7, 7, 7, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [4, 4, 4, 5, 6] # Paging artifact
assert current == [7, 7, 7, 7, 7]
assert next == [7, 7, 7, 8, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [7, 7, 7, 7, 7]
assert current == [7, 7, 7, 8, 9]
assert next == [9, 9, 9, 9, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(next_url)
assert previous == [7, 7, 7, 8, 9]
assert current == [9, 9, 9, 9, 9]
assert next is None
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [7, 7, 7, 7, 7]
assert current == [7, 7, 7, 8, 9]
assert next == [9, 9, 9, 9, 9]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [4, 4, 5, 6, 7]
assert current == [7, 7, 7, 7, 7]
assert next == [8, 9, 9, 9, 9] # Paging artifact
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [1, 2, 3, 4, 4]
assert current == [4, 4, 5, 6, 7]
assert next == [7, 7, 7, 7, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous == [1, 1, 1, 1, 1]
assert current == [1, 2, 3, 4, 4]
assert next == [4, 4, 5, 6, 7]
(previous, current, next, previous_url, next_url) = self.get_pages(previous_url)
assert previous is None
assert current == [1, 1, 1, 1, 1]
assert next == [1, 2, 3, 4, 4]
assert isinstance(self.pagination.to_html(), type(''))
class TestCursorPagination(CursorPaginationTestsMixin):
"""
Unit tests for `pagination.CursorPagination`.
"""
def setup(self):
class MockObject(object):
def __init__(self, idx):
self.created = idx
class MockQuerySet(object):
def __init__(self, items):
self.items = items
def filter(self, created__gt=None, created__lt=None):
if created__gt is not None:
return MockQuerySet([
item for item in self.items
if item.created > int(created__gt)
])
assert created__lt is not None
return MockQuerySet([
item for item in self.items
if item.created < int(created__lt)
])
def order_by(self, *ordering):
if ordering[0].startswith('-'):
return MockQuerySet(list(reversed(self.items)))
return self
def __getitem__(self, sliced):
return self.items[sliced]
class ExamplePagination(pagination.CursorPagination):
page_size = 5
ordering = 'created'
self.pagination = ExamplePagination()
self.queryset = MockQuerySet([
MockObject(idx) for idx in [
1, 1, 1, 1, 1,
1, 2, 3, 4, 4,
4, 4, 5, 6, 7,
7, 7, 7, 7, 7,
7, 7, 7, 8, 9,
9, 9, 9, 9, 9
]
])
def get_pages(self, url):
"""
Given a URL return a tuple of:
(previous page, current page, next page, previous url, next url)
"""
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item.created for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if next_url is not None:
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item.created for item in queryset]
else:
next = None
if previous_url is not None:
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item.created for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url)
class CursorPaginationModel(models.Model):
created = models.IntegerField()
class TestCursorPaginationWithValueQueryset(CursorPaginationTestsMixin, TestCase):
"""
Unit tests for `pagination.CursorPagination` for value querysets.
"""
def setUp(self):
class ExamplePagination(pagination.CursorPagination):
page_size = 5
ordering = 'created'
self.pagination = ExamplePagination()
data = [
1, 1, 1, 1, 1,
1, 2, 3, 4, 4,
4, 4, 5, 6, 7,
7, 7, 7, 7, 7,
7, 7, 7, 8, 9,
9, 9, 9, 9, 9
]
for idx in data:
CursorPaginationModel.objects.create(created=idx)
self.queryset = CursorPaginationModel.objects.values()
def get_pages(self, url):
"""
Given a URL return a tuple of:
(previous page, current page, next page, previous url, next url)
"""
request = Request(factory.get(url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
current = [item['created'] for item in queryset]
next_url = self.pagination.get_next_link()
previous_url = self.pagination.get_previous_link()
if next_url is not None:
request = Request(factory.get(next_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
next = [item['created'] for item in queryset]
else:
next = None
if previous_url is not None:
request = Request(factory.get(previous_url))
queryset = self.pagination.paginate_queryset(self.queryset, request)
previous = [item['created'] for item in queryset]
else:
previous = None
return (previous, current, next, previous_url, next_url)
def test_get_displayed_page_numbers():
"""
Test our contextual page display function.
This determines which pages to display in a pagination control,
given the current page and the last page.
"""
displayed_page_numbers = pagination._get_displayed_page_numbers
# At five pages or less, all pages are displayed, always.
assert displayed_page_numbers(1, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(2, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(3, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(4, 5) == [1, 2, 3, 4, 5]
assert displayed_page_numbers(5, 5) == [1, 2, 3, 4, 5]
# Between six and either pages we may have a single page break.
assert displayed_page_numbers(1, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(2, 6) == [1, 2, 3, None, 6]
assert displayed_page_numbers(3, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(4, 6) == [1, 2, 3, 4, 5, 6]
assert displayed_page_numbers(5, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(6, 6) == [1, None, 4, 5, 6]
assert displayed_page_numbers(1, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(2, 7) == [1, 2, 3, None, 7]
assert displayed_page_numbers(3, 7) == [1, 2, 3, 4, None, 7]
assert displayed_page_numbers(4, 7) == [1, 2, 3, 4, 5, 6, 7]
assert displayed_page_numbers(5, 7) == [1, None, 4, 5, 6, 7]
assert displayed_page_numbers(6, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(7, 7) == [1, None, 5, 6, 7]
assert displayed_page_numbers(1, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(2, 8) == [1, 2, 3, None, 8]
assert displayed_page_numbers(3, 8) == [1, 2, 3, 4, None, 8]
assert displayed_page_numbers(4, 8) == [1, 2, 3, 4, 5, None, 8]
assert displayed_page_numbers(5, 8) == [1, None, 4, 5, 6, 7, 8]
assert displayed_page_numbers(6, 8) == [1, None, 5, 6, 7, 8]
assert displayed_page_numbers(7, 8) == [1, None, 6, 7, 8]
assert displayed_page_numbers(8, 8) == [1, None, 6, 7, 8]
# At nine or more pages we may have two page breaks, one on each side.
assert displayed_page_numbers(1, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(2, 9) == [1, 2, 3, None, 9]
assert displayed_page_numbers(3, 9) == [1, 2, 3, 4, None, 9]
assert displayed_page_numbers(4, 9) == [1, 2, 3, 4, 5, None, 9]
assert displayed_page_numbers(5, 9) == [1, None, 4, 5, 6, None, 9]
assert displayed_page_numbers(6, 9) == [1, None, 5, 6, 7, 8, 9]
assert displayed_page_numbers(7, 9) == [1, None, 6, 7, 8, 9]
assert displayed_page_numbers(8, 9) == [1, None, 7, 8, 9]
assert displayed_page_numbers(9, 9) == [1, None, 7, 8, 9]
|
|
#!/usr/bin/env python
"""A `Property Lists`_ is a data representation used in Apple's Mac OS X as
a convenient way to store standard object types, such as string, number,
boolean, and container object.
This file contains a class ``XmlPropertyListParser`` for parse
a property list file and get back a python native data structure.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT (See LICENSE file for more details)
.. _Property Lists: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
import re
import sys
if sys.version_info >= (3,):
# Some forwards compatability
basestring = str
class PropertyListParseError(Exception):
"""Raised when parsing a property list is failed."""
pass
class XmlPropertyListParser(object):
"""The ``XmlPropertyListParser`` class provides methods that
convert `Property Lists`_ objects from xml format.
Property list objects include ``string``, ``unicode``,
``list``, ``dict``, ``datetime``, and ``int`` or ``float``.
:copyright: 2008 by Takanori Ishikawa <takanori.ishikawa@gmail.com>
:license: MIT License
.. _Property List: http://developer.apple.com/documentation/Cocoa/Conceptual/PropertyLists/
"""
def _assert(self, test, message):
if not test:
raise PropertyListParseError(message)
# ------------------------------------------------
# SAX2: ContentHandler
# ------------------------------------------------
def setDocumentLocator(self, locator):
pass
def startPrefixMapping(self, prefix, uri):
pass
def endPrefixMapping(self, prefix):
pass
def startElementNS(self, name, qname, attrs):
pass
def endElementNS(self, name, qname):
pass
def ignorableWhitespace(self, whitespace):
pass
def processingInstruction(self, target, data):
pass
def skippedEntity(self, name):
pass
def startDocument(self):
self.__stack = []
self.__plist = self.__key = self.__characters = None
# For reducing runtime type checking,
# the parser caches top level object type.
self.__in_dict = False
def endDocument(self):
self._assert(self.__plist is not None, "A top level element must be <plist>.")
self._assert(
len(self.__stack) is 0,
"multiple objects at top level.")
def startElement(self, name, attributes):
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, name, attributes)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
self.__characters = []
def endElement(self, name):
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
# Creates character string from buffered characters.
content = ''.join(self.__characters)
# For compatibility with ``xml.etree`` and ``plistlib``,
# convert text string to ascii, if possible
try:
content = content.encode('ascii')
except (UnicodeError, AttributeError):
pass
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, content)
self.__characters = None
def characters(self, content):
if self.__characters is not None:
self.__characters.append(content)
# ------------------------------------------------
# XmlPropertyListParser private
# ------------------------------------------------
def _push_value(self, value):
if not self.__stack:
self._assert(self.__plist is None, "Multiple objects at top level")
self.__plist = value
else:
top = self.__stack[-1]
#assert isinstance(top, (dict, list))
if self.__in_dict:
k = self.__key
if k is None:
raise PropertyListParseError("Missing key for dictionary.")
top[k] = value
self.__key = None
else:
top.append(value)
def _push_stack(self, value):
self.__stack.append(value)
self.__in_dict = isinstance(value, dict)
def _pop_stack(self):
self.__stack.pop()
self.__in_dict = self.__stack and isinstance(self.__stack[-1], dict)
def _start_plist(self, name, attrs):
self._assert(not self.__stack and self.__plist is None, "<plist> more than once.")
self._assert(attrs.get('version', '1.0') == '1.0',
"version 1.0 is only supported, but was '%s'." % attrs.get('version'))
def _start_array(self, name, attrs):
v = list()
self._push_value(v)
self._push_stack(v)
def _start_dict(self, name, attrs):
v = dict()
self._push_value(v)
self._push_stack(v)
def _end_array(self, name):
self._pop_stack()
def _end_dict(self, name):
if self.__key is not None:
raise PropertyListParseError("Missing value for key '%s'" % self.__key)
self._pop_stack()
def _start_true(self, name, attrs):
self._push_value(True)
def _start_false(self, name, attrs):
self._push_value(False)
def _parse_key(self, name, content):
if not self.__in_dict:
raise PropertyListParseError("<key> element must be in <dict> element.")
self.__key = content
def _parse_string(self, name, content):
self._push_value(content)
def _parse_data(self, name, content):
import base64
self._push_value(base64.b64decode(content))
# http://www.apple.com/DTDs/PropertyList-1.0.dtd says:
#
# Contents should conform to a subset of ISO 8601
# (in particular, YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'.
# Smaller units may be omitted with a loss of precision)
DATETIME_PATTERN = re.compile(r"(?P<year>\d\d\d\d)(?:-(?P<month>\d\d)(?:-(?P<day>\d\d)(?:T(?P<hour>\d\d)(?::(?P<minute>\d\d)(?::(?P<second>\d\d))?)?)?)?)?Z$")
def _parse_date(self, name, content):
import datetime
units = ('year', 'month', 'day', 'hour', 'minute', 'second', )
pattern = XmlPropertyListParser.DATETIME_PATTERN
match = pattern.match(content)
if not match:
raise PropertyListParseError("Failed to parse datetime '%s'" % content)
groups, components = match.groupdict(), []
for key in units:
value = groups[key]
if value is None:
break
components.append(int(value))
while len(components) < 3:
components.append(1)
d = datetime.datetime(*components)
self._push_value(d)
def _parse_real(self, name, content):
self._push_value(float(content))
def _parse_integer(self, name, content):
self._push_value(int(content))
START_CALLBACKS = {
'plist': _start_plist,
'array': _start_array,
'dict': _start_dict,
'true': _start_true,
'false': _start_false,
}
END_CALLBACKS = {
'array': _end_array,
'dict': _end_dict,
}
PARSE_CALLBACKS = {
'key': _parse_key,
'string': _parse_string,
'data': _parse_data,
'date': _parse_date,
'real': _parse_real,
'integer': _parse_integer,
}
# ------------------------------------------------
# XmlPropertyListParser
# ------------------------------------------------
def _to_stream(self, io_or_string):
if isinstance(io_or_string, basestring):
# Creates a string stream for in-memory contents.
from io import StringIO
return StringIO(io_or_string)
elif hasattr(io_or_string, 'read') and callable(getattr(io_or_string, 'read')):
return io_or_string
else:
raise TypeError('Can\'t convert %s to file-like-object' % type(io_or_string))
def _parse_using_etree(self, xml_input):
from xml.etree.cElementTree import iterparse
parser = iterparse(self._to_stream(xml_input), events=('start', 'end'))
self.startDocument()
try:
for action, element in parser:
name = element.tag
if action == 'start':
if name in XmlPropertyListParser.START_CALLBACKS:
XmlPropertyListParser.START_CALLBACKS[name](self, element.tag, element.attrib)
elif action == 'end':
if name in XmlPropertyListParser.END_CALLBACKS:
XmlPropertyListParser.END_CALLBACKS[name](self, name)
if name in XmlPropertyListParser.PARSE_CALLBACKS:
XmlPropertyListParser.PARSE_CALLBACKS[name](self, name, element.text or "")
element.clear()
except SyntaxError as e:
raise PropertyListParseError(e)
self.endDocument()
return self.__plist
def _parse_using_sax_parser(self, xml_input):
from xml.sax import make_parser, xmlreader, SAXParseException
source = xmlreader.InputSource()
source.setByteStream(self._to_stream(xml_input))
reader = make_parser()
reader.setContentHandler(self)
try:
reader.parse(source)
except SAXParseException as e:
raise PropertyListParseError(e)
return self.__plist
def parse(self, xml_input):
"""Parse the property list (`.plist`, `.xml, for example) ``xml_input``,
which can be either a string or a file-like object.
>>> parser = XmlPropertyListParser()
>>> parser.parse(r'<plist version="1.0">'
... r'<dict><key>Python</key><string>.py</string></dict>'
... r'</plist>')
{'Python': '.py'}
"""
try:
return self._parse_using_etree(xml_input)
except ImportError:
# No xml.etree.ccElementTree found.
return self._parse_using_sax_parser(xml_input)
def parse_string(io_or_string):
"""Parse a string (or a stream) and return the resulting object.
"""
return XmlPropertyListParser().parse(io_or_string)
def parse_file(file_path):
"""Parse the specified file and return the resulting object.
"""
with open(file_path) as f:
return XmlPropertyListParser().parse(f)
|
|
import boto3, botocore
import os, uuid, json, sys
from urlparse import urlparse
from boto3.s3.transfer import S3Transfer
from pyntaws.services._session import AWSSession
import __builtin__
__builtin__.validated_templates = list()
class AWSCloudFormation(AWSSession):
def __init__(self, **kwargs):
super(self.__class__, self).__init__(kwargs['profile_name'])
self.stack_name = kwargs['stack_name']
if len(kwargs) == 2 and 'profile_name' in kwargs and 'stack_name' in kwargs:
# Easy service, for lookups only
self.easy_service = True
else:
self.easy_service = False
self.on_failure = kwargs.get('on_failure', 'ROLLBACK')
if 'template' in kwargs and type(kwargs['template']) == str:
self.template = kwargs['template']
else:
raise Exception('Missing or wrong parameter: template')
self.includes = list()
if 'includes' in kwargs:
if type(kwargs['includes']) == list:
self.includes = kwargs['includes']
else:
raise Exception('Wrong parameter type: includes = {}'.format(type(kwargs['includes'])))
self.resources = list()
if 'resources' in kwargs:
if type(kwargs['resources']) == list:
self.resources = kwargs['resources']
else:
raise Exception('Wrong parameter type: resources = {}'.format(type(kwargs['resources'])))
if 'parameters' in kwargs:
self.parameters = kwargs['parameters']
else:
self.parameters = None
self.template = os.path.abspath(os.path.join(os.getcwd(), './src/main/cloudformation/', self.template))
for idx, template in enumerate(self.includes):
if not os.path.isabs(template):
if template.startswith('./') or template.startswith('../'):
self.includes[idx] = os.path.abspath(os.path.join(os.getcwd(), template))
else:
self.includes[idx] = os.path.abspath(os.path.join(os.getcwd(), './src/main/cloudformation/', template))
if not os.path.isfile(self.includes[idx]):
raise Exception("Can't find template file '{}' ({})".format(template, self.includes[idx]))
for idx, file in enumerate(self.resources):
if not os.path.isabs(file):
if file.startswith('./') or file.startswith('../'):
self.resources[idx] = os.path.abspath(os.path.join(os.getcwd(), file))
else:
self.resources[idx] = os.path.abspath(os.path.join(os.getcwd(), './src/main/resources/', file))
if not os.path.isfile(self.resources[idx]):
raise Exception("Can't find resource file '{}' ({})".format(file, self.resources[idx]))
url = urlparse(kwargs['s3_uri'])
self.s3_bucket = url.netloc
self.s3_key = url.path
if self.s3_key.endswith('/'):
self.s3_key = "%s%s" % (self.s3_key, os.path.basename(self.template))
if self.s3_key.startswith('/'):
self.s3_key = self.s3_key[1:]
__builtin__.aws_cloudformation = self
# @property
# def s3_uri(self):
# return self._s3_uri
def exists(self):
cloudformation = self.session.client('cloudformation')
try:
stack = None
nextToken = None
while not stack:
resp = None
if nextToken:
resp = cloudformation.describe_stacks(StackName = self.stack_name, NextToken = nextToken)
else:
resp = cloudformation.describe_stacks(StackName = self.stack_name)
for stack in resp['Stacks']:
if stack['StackStatus'] in ['CREATE_COMPLETE', 'ROLLBACK_COMPLETE','UPDATE_COMPLETE','UPDATE_ROLLBACK_COMPLETE']:
return True
if 'NextToken' in stack:
nextToken = stack['NextToken']
return False
except botocore.exceptions.ClientError as err:
err_msg = err.response['Error']['Message']
err_code = err.response['Error']['Code']
if err_msg != "Stack with id {} does not exist".format(self.stack_name) and err_code != 'ValidationError':
return False
def outputs(self, output_key, **kwargs):
return self.output(output_key, **kwargs)
def output(self, output_key, **kwargs):
cloudformation = self.session.client('cloudformation')
no_fail = False
if kwargs:
no_fail = kwargs.get('no_fail', False)
try:
stack = None
nextToken = None
while not stack:
resp = None
if nextToken:
resp = cloudformation.describe_stacks(StackName = self.stack_name, NextToken = nextToken)
else:
resp = cloudformation.describe_stacks(StackName = self.stack_name)
for stack in resp['Stacks']:
if stack['StackStatus'] in ['CREATE_COMPLETE', 'ROLLBACK_COMPLETE','UPDATE_COMPLETE','UPDATE_ROLLBACK_COMPLETE']:
break
if 'NextToken' in stack:
nextToken = stack['NextToken']
# output_value = None
if 'Outputs' in stack:
for output in stack['Outputs']:
if output['OutputKey'] == output_key:
return output['OutputValue']
except botocore.exceptions.ClientError as err:
err_msg = err.response['Error']['Message']
err_code = err.response['Error']['Code']
if err_msg != "Stack with id {} does not exist".format(self.stack_name) and err_code != 'ValidationError':
if no_fail:
print "Stack with id {} does not exist".format(self.stack_name)
else:
raise Exception, "Stack with id {} does not exist".format(self.stack_name), sys.exc_info()[2]
print "Can't find output parameter %s in stack %s under %s profile" % (output_key, self.stack_name, self.profile_name)
return None
def validate(self, details = False):
s3 = self.session.client('s3')
for template in ([self.template] + self.includes):
if template in __builtin__.validated_templates:
print 'Template {} has been validated already'.format(template)
continue
else:
__builtin__.validated_templates.append(template)
temp_filename = "temp/%s-%s" % (uuid.uuid4(), os.path.basename(template))
print "Uploading %s to temporary location s3://%s/%s" % (template, self.s3_bucket, temp_filename)
S3Transfer(s3).upload_file(
template,
self.s3_bucket,
temp_filename,
extra_args={'ACL': 'bucket-owner-full-control'}
)
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, temp_filename)
print "Validating template %s" % template_url
resp = self.session.client('cloudformation').validate_template(
TemplateURL = template_url
)
if details:
print 'Template {} details: {}'.format(template, json.dumps(resp, indent=2, separators=(',', ': ')))
print "Removing temporary file /%s from s3" % temp_filename
s3.delete_object(
Bucket = self.s3_bucket,
Key = temp_filename,
)
def create(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Creating stack {}".format(stack_name)
resp = cloudformation.create_stack(
StackName = stack_name,
TemplateURL = template_url,
Capabilities = ['CAPABILITY_NAMED_IAM'],
OnFailure = self.on_failure,
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
waiter = cloudformation.get_waiter('stack_create_complete')
waiter.wait(
StackName = resp['StackId']
)
return
def update(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Updating stack {}".format(stack_name)
resp = cloudformation.update_stack(
StackName = stack_name,
TemplateURL = template_url,
Capabilities = ['CAPABILITY_NAMED_IAM'],
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
waiter = cloudformation.get_waiter('stack_update_complete')
waiter.wait(
StackName = resp['StackId']
)
return
def delete(self, **kwargs):
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
cloudformation.delete_stack(
StackName = stack_name
)
waiter = cloudformation.get_waiter('stack_delete_complete')
waiter.wait(
StackName = stack_name
)
return
def estimate_cost(self, **kwargs):
self._upload()
cloudformation = self.session.client('cloudformation')
stack_name = self.stack_name
if 'stack_name' in kwargs:
stack_name = kwargs.get('stack_name')
template_url = "https://s3.amazonaws.com/%s/%s" % (self.s3_bucket, self.s3_key)
print "Estimating template s3://{}/{}".format(self.s3_bucket, self.s3_key)
resp = cloudformation.estimate_template_cost(
TemplateURL = template_url,
Parameters = self._join_parameters(self.parameters, kwargs.get('parameters', None))
)
print 'Check URL to see your template costs estimateion:\n{}'.format(resp['Url'])
return
def _upload(self):
print "Uploading %s to s3://%s/%s" % (self.template, self.s3_bucket, self.s3_key)
S3Transfer(self.session.client('s3')).upload_file(
self.template,
self.s3_bucket,
self.s3_key,
extra_args={'ACL': 'bucket-owner-full-control'}
)
s3_key = self.s3_key
if not s3_key.endswith('/'):
s3_key = s3_key[:s3_key.rfind('/')+1]
for file in (self.includes + self.resources):
file_s3_key = '{}{}'.format(s3_key, os.path.basename(file))
print "Uploading %s to s3://%s/%s" % (file, self.s3_bucket, file_s3_key)
S3Transfer(self.session.client('s3')).upload_file(
file,
self.s3_bucket,
file_s3_key,
extra_args={'ACL': 'bucket-owner-full-control'}
)
def _join_parameters(self, params1, params2):
if (params1 and type(params1) != list) or (params2 and type(params2) != list):
raise Exception("Parameters argument should be a list() or None")
if not params1 and params2:
return params2
elif params1 and not params2:
return params1
elif params1 and params2:
result_d = dict()
for param in params1:
result_d[param['ParameterKey']] = param['ParameterValue']
for param in params2:
result_d[param['ParameterKey']] = param['ParameterValue']
result = list()
for key in result_d:
result.append({
'ParameterKey': key,
'ParameterValue': result_d[key]
})
return result
else:
return list()
|
|
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for encoder_decoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from magenta.common import sequence_example_lib
from magenta.music import encoder_decoder
from magenta.music import testing_lib
import numpy as np
import tensorflow as tf
class OneHotEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)))
def testInputSize(self):
self.assertEqual(3, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(3, self.enc.num_classes)
def testEventsToInput(self):
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0], self.enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0], self.enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0], self.enc.events_to_input(events, 4))
def testEventsToLabel(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.events_to_label(events, 0))
self.assertEqual(1, self.enc.events_to_label(events, 1))
self.assertEqual(0, self.enc.events_to_label(events, 2))
self.assertEqual(2, self.enc.events_to_label(events, 3))
self.assertEqual(0, self.enc.events_to_label(events, 4))
def testClassIndexToEvent(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, events))
self.assertEqual(1, self.enc.class_index_to_event(1, events))
self.assertEqual(2, self.enc.class_index_to_event(2, events))
def testLabelsToNumSteps(self):
labels = [0, 1, 0, 2, 0]
self.assertEqual(3, self.enc.labels_to_num_steps(labels))
def testEncode(self):
events = [0, 1, 0, 2, 0]
sequence_example = self.enc.encode(events)
expected_inputs = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0]]
expected_labels = [1, 0, 2, 0]
expected_sequence_example = sequence_example_lib.make_sequence_example(
expected_inputs, expected_labels)
self.assertEqual(sequence_example, expected_sequence_example)
def testGetInputsBatch(self):
event_sequences = [[0, 1, 0, 2, 0], [0, 1, 2]]
expected_inputs_1 = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0]]
expected_inputs_2 = [[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]
expected_full_length_inputs_batch = [expected_inputs_1, expected_inputs_2]
expected_last_event_inputs_batch = [expected_inputs_1[-1:],
expected_inputs_2[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
self.enc.get_inputs_batch(event_sequences, True))
self.assertListEqual(
expected_last_event_inputs_batch,
self.enc.get_inputs_batch(event_sequences))
def testExtendEventSequences(self):
events1 = [0]
events2 = [0]
events3 = [0]
event_sequences = [events1, events2, events3]
softmax = [[[0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]]]
self.enc.extend_event_sequences(event_sequences, softmax)
self.assertListEqual(list(events1), [0, 2])
self.assertListEqual(list(events2), [0, 0])
self.assertListEqual(list(events3), [0, 1])
def testEvaluateLogLikelihood(self):
events1 = [0, 1, 0]
events2 = [1, 2, 2]
event_sequences = [events1, events2]
softmax = [[[0.0, 0.5, 0.5], [0.3, 0.4, 0.3]],
[[0.0, 0.6, 0.4], [0.0, 0.4, 0.6]]]
p = self.enc.evaluate_log_likelihood(event_sequences, softmax)
self.assertListEqual([np.log(0.5) + np.log(0.3),
np.log(0.4) + np.log(0.6)], p)
class OneHotIndexEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.OneHotIndexEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)))
def testInputSize(self):
self.assertEqual(1, self.enc.input_size)
def testInputDepth(self):
self.assertEqual(3, self.enc.input_depth)
def testEventsToInput(self):
events = [0, 1, 0, 2, 0]
self.assertEqual([0], self.enc.events_to_input(events, 0))
self.assertEqual([1], self.enc.events_to_input(events, 1))
self.assertEqual([0], self.enc.events_to_input(events, 2))
self.assertEqual([2], self.enc.events_to_input(events, 3))
self.assertEqual([0], self.enc.events_to_input(events, 4))
def testEncode(self):
events = [0, 1, 0, 2, 0]
sequence_example = self.enc.encode(events)
expected_inputs = [[0], [1], [0], [2]]
expected_labels = [1, 0, 2, 0]
expected_sequence_example = sequence_example_lib.make_sequence_example(
expected_inputs, expected_labels)
self.assertEqual(sequence_example, expected_sequence_example)
def testGetInputsBatch(self):
event_sequences = [[0, 1, 0, 2, 0], [0, 1, 2]]
expected_inputs_1 = [[0], [1], [0], [2], [0]]
expected_inputs_2 = [[0], [1], [2]]
expected_full_length_inputs_batch = [expected_inputs_1, expected_inputs_2]
expected_last_event_inputs_batch = [expected_inputs_1[-1:],
expected_inputs_2[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
self.enc.get_inputs_batch(event_sequences, True))
self.assertListEqual(
expected_last_event_inputs_batch,
self.enc.get_inputs_batch(event_sequences))
class LookbackEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.LookbackEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3, num_steps=range(3)), [1, 2], 2)
def testInputSize(self):
self.assertEqual(13, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(5, self.enc.num_classes)
def testEventsToInput(self):
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0,
1.0, -1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0,
-1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 0.0, 1.0],
self.enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0,
-1.0, -1.0, 0.0, 0.0],
self.enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0,
1.0, -1.0, 0.0, 1.0],
self.enc.events_to_input(events, 4))
def testEventsToLabel(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(4, self.enc.events_to_label(events, 0))
self.assertEqual(1, self.enc.events_to_label(events, 1))
self.assertEqual(4, self.enc.events_to_label(events, 2))
self.assertEqual(2, self.enc.events_to_label(events, 3))
self.assertEqual(4, self.enc.events_to_label(events, 4))
def testClassIndexToEvent(self):
events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, events[:1]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:1]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:2]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(3, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:3]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(4, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:4]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(3, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(4, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:5]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:5]))
self.assertEqual(0, self.enc.class_index_to_event(3, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(4, events[:5]))
def testLabelsToNumSteps(self):
labels = [0, 1, 0, 2, 0]
self.assertEqual(3, self.enc.labels_to_num_steps(labels))
labels = [0, 1, 3, 2, 4]
self.assertEqual(5, self.enc.labels_to_num_steps(labels))
def testEmptyLookback(self):
enc = encoder_decoder.LookbackEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3), [], 2)
self.assertEqual(5, enc.input_size)
self.assertEqual(3, enc.num_classes)
events = [0, 1, 0, 2, 0]
self.assertEqual([1.0, 0.0, 0.0, 1.0, -1.0],
enc.events_to_input(events, 0))
self.assertEqual([0.0, 1.0, 0.0, -1.0, 1.0],
enc.events_to_input(events, 1))
self.assertEqual([1.0, 0.0, 0.0, 1.0, 1.0],
enc.events_to_input(events, 2))
self.assertEqual([0.0, 0.0, 1.0, -1.0, -1.0],
enc.events_to_input(events, 3))
self.assertEqual([1.0, 0.0, 0.0, 1.0, -1.0],
enc.events_to_input(events, 4))
self.assertEqual(0, enc.events_to_label(events, 0))
self.assertEqual(1, enc.events_to_label(events, 1))
self.assertEqual(0, enc.events_to_label(events, 2))
self.assertEqual(2, enc.events_to_label(events, 3))
self.assertEqual(0, enc.events_to_label(events, 4))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:1]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:1]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:1]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:2]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:2]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:2]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:3]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:3]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:3]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:4]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:4]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:4]))
self.assertEqual(0, self.enc.class_index_to_event(0, events[:5]))
self.assertEqual(1, self.enc.class_index_to_event(1, events[:5]))
self.assertEqual(2, self.enc.class_index_to_event(2, events[:5]))
class ConditionalEventSequenceEncoderDecoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.ConditionalEventSequenceEncoderDecoder(
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(2)),
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3)))
def testInputSize(self):
self.assertEqual(5, self.enc.input_size)
def testNumClasses(self):
self.assertEqual(3, self.enc.num_classes)
def testEventsToInput(self):
control_events = [1, 1, 1, 0, 0]
target_events = [0, 1, 0, 2, 0]
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(control_events, target_events, 0))
self.assertEqual(
[0.0, 1.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(control_events, target_events, 1))
self.assertEqual(
[1.0, 0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(control_events, target_events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0, 1.0],
self.enc.events_to_input(control_events, target_events, 3))
def testEventsToLabel(self):
target_events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.events_to_label(target_events, 0))
self.assertEqual(1, self.enc.events_to_label(target_events, 1))
self.assertEqual(0, self.enc.events_to_label(target_events, 2))
self.assertEqual(2, self.enc.events_to_label(target_events, 3))
self.assertEqual(0, self.enc.events_to_label(target_events, 4))
def testClassIndexToEvent(self):
target_events = [0, 1, 0, 2, 0]
self.assertEqual(0, self.enc.class_index_to_event(0, target_events))
self.assertEqual(1, self.enc.class_index_to_event(1, target_events))
self.assertEqual(2, self.enc.class_index_to_event(2, target_events))
def testEncode(self):
control_events = [1, 1, 1, 0, 0]
target_events = [0, 1, 0, 2, 0]
sequence_example = self.enc.encode(control_events, target_events)
expected_inputs = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0]]
expected_labels = [1, 0, 2, 0]
expected_sequence_example = sequence_example_lib.make_sequence_example(
expected_inputs, expected_labels)
self.assertEqual(sequence_example, expected_sequence_example)
def testGetInputsBatch(self):
control_event_sequences = [[1, 1, 1, 0, 0], [1, 1, 1, 0, 0]]
target_event_sequences = [[0, 1, 0, 2], [0, 1]]
expected_inputs_1 = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0],
[1.0, 0.0, 1.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 0.0, 1.0]]
expected_inputs_2 = [[0.0, 1.0, 1.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 1.0, 0.0]]
expected_full_length_inputs_batch = [expected_inputs_1, expected_inputs_2]
expected_last_event_inputs_batch = [expected_inputs_1[-1:],
expected_inputs_2[-1:]]
self.assertListEqual(
expected_full_length_inputs_batch,
self.enc.get_inputs_batch(
control_event_sequences, target_event_sequences, True))
self.assertListEqual(
expected_last_event_inputs_batch,
self.enc.get_inputs_batch(
control_event_sequences, target_event_sequences))
def testExtendEventSequences(self):
target_events_1 = [0]
target_events_2 = [0]
target_events_3 = [0]
target_event_sequences = [target_events_1, target_events_2, target_events_3]
softmax = np.array(
[[[0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0]]])
self.enc.extend_event_sequences(target_event_sequences, softmax)
self.assertListEqual(list(target_events_1), [0, 2])
self.assertListEqual(list(target_events_2), [0, 0])
self.assertListEqual(list(target_events_3), [0, 1])
def testEvaluateLogLikelihood(self):
target_events_1 = [0, 1, 0]
target_events_2 = [1, 2, 2]
target_event_sequences = [target_events_1, target_events_2]
softmax = [[[0.0, 0.5, 0.5], [0.3, 0.4, 0.3]],
[[0.0, 0.6, 0.4], [0.0, 0.4, 0.6]]]
p = self.enc.evaluate_log_likelihood(target_event_sequences, softmax)
self.assertListEqual([np.log(0.5) + np.log(0.3),
np.log(0.4) + np.log(0.6)], p)
class OptionalEventSequenceEncoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.OptionalEventSequenceEncoder(
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3)))
def testInputSize(self):
self.assertEqual(4, self.enc.input_size)
def testEventsToInput(self):
events = [(False, 0), (False, 1), (False, 0), (True, 2), (True, 0)]
self.assertEqual(
[0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual(
[0.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual(
[0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0],
self.enc.events_to_input(events, 3))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0],
self.enc.events_to_input(events, 4))
class MultipleEventSequenceEncoderTest(tf.test.TestCase):
def setUp(self):
self.enc = encoder_decoder.MultipleEventSequenceEncoder([
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(2)),
encoder_decoder.OneHotEventSequenceEncoderDecoder(
testing_lib.TrivialOneHotEncoding(3))])
def testInputSize(self):
self.assertEqual(5, self.enc.input_size)
def testEventsToInput(self):
events = [(1, 0), (1, 1), (1, 0), (0, 2), (0, 0)]
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 0))
self.assertEqual(
[0.0, 1.0, 0.0, 1.0, 0.0],
self.enc.events_to_input(events, 1))
self.assertEqual(
[0.0, 1.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 2))
self.assertEqual(
[1.0, 0.0, 0.0, 0.0, 1.0],
self.enc.events_to_input(events, 3))
self.assertEqual(
[1.0, 0.0, 1.0, 0.0, 0.0],
self.enc.events_to_input(events, 4))
if __name__ == '__main__':
tf.test.main()
|
|
import logging
import hashlib
import multiprocessing
import os
import re
import shutil
import subprocess
import time
from smqtk.utils import file_utils, string_utils
__author__ = "paul.tunison@kitware.com"
class VideoMetadata (object):
"""
Simple container for video file metadata values
"""
def __init__(self):
#: :type: None or int
self.width = None
#: :type: None or int
self.height = None
#: :type: None or float
self.fps = None
#: :type: None or float
self.duration = None
def get_metadata_info(video_filepath, ffprobe_exe='ffprobe'):
"""
Use ffmpeg to extract video file metadata parameters
:param video_filepath: File path to the video to probe.
:type video_filepath: str
:param ffprobe_exe: Path to the ffprobe executable to use. By default, we
try to use the version that's on the PATH.
:return: VideoMetadata instance
:rtype: VideoMetadata
"""
log = logging.getLogger('smqtk.utils.video_utils.get_metadata_info')
re_float_match = "[+-]?(?:(?:\d+\.?\d*)|(?:\.\d+))(?:[eE][+-]?\d+)?"
log.debug("Using ffprobe: %s", ffprobe_exe)
cmd = [ffprobe_exe, '-i', video_filepath]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
# ffprobe puts output to err stream
if p.returncode: # non-zero
raise RuntimeError("Failed to probe video file. Error:\n%s"
% err)
# WxH
m = re.search("Stream.*Video.* (\d+)x(\d+)", err)
if m:
width = int(m.group(1))
height = int(m.group(2))
else:
raise RuntimeError("Couldn't find width/height specification "
"for video file '%s'" % video_filepath)
# FPS
m = re.search("Stream.*Video.* (%s) fps" % re_float_match, err)
if m:
fps = float(m.group(1))
else:
# falling back on tbr measurement
log.debug("Couldn't find fps measurement, looking for TBR")
m = re.search("Stream.*Video.* (%s) tbr" % re_float_match, err)
if m:
fps = float(m.group(1))
else:
raise RuntimeError("Couldn't find tbr specification for "
"video file '%s'" % video_filepath)
# Duration
m = re.search("Duration: (\d+):(\d+):(%s)" % re_float_match, err)
if m:
duration = (
(60 * 60 * int(m.group(1))) # hours
+ (60 * int(m.group(2))) # minutes
+ float(m.group(3)) # seconds
)
else:
raise RuntimeError("Couldn't find duration specification for "
"video file '%s'" % video_filepath)
md = VideoMetadata()
md.width = width
md.height = height
md.fps = fps
md.duration = duration
return md
def ffmpeg_extract_frame(t, input_filepath, output_filepath,
ffmpeg_exe='ffmpeg'):
"""
Extract a frame a the given time ``t`` from the input video file.
Output file may not exist or be of 0 size if we failed to extract the frame.
"""
cmd = [ffmpeg_exe, '-accurate_seek', '-ss', str(t), '-i', input_filepath,
'-frames:v', '1', output_filepath]
sPIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=sPIPE, stderr=sPIPE)
_, _ = p.communicate()
# if p.returncode != 0:
# raise RuntimeError("FFmpeg failed to extract frame at time %f! "
# "(return code: %d)"
# % (t, p.returncode))
def ffmpeg_extract_frame_map(working_dir, video_filepath, second_offset=0,
second_interval=0, max_duration=0, frames=(),
output_image_ext="png", parallel=None,
ffmpeg_exe='ffmpeg'):
"""
Return a mapping of video frame index to image file in the given output
format.
If frames requested have not yet been extracted (based on what's contained
in the specified output directory), they are done now. This means that this
method could take a little time to complete if there are many frames in the
video file and this is the first time this is being called.
This may return an empty list if there are no frames in the video for
the specified, or default, constraints.
Extracted frames are cached in a directory structure under the provided
``working_dir`` directory path: ``<working_dir>/VideoFrameExtraction``.
Frames are extracted into separate directories based on the SHA1 checksum of
the video file.
:raises RuntimeError: No frames were extracted.
:param working_dir: Working directory for frame extraction to occur in.
:type working_dir: str
:param video_filepath: Path to the video to extract frames from.
:type video_filepath: str
:param second_offset: Seconds into the video to start extracting
:type second_offset: float
:param second_interval: Number of seconds between extracted frames
:type second_interval: float
:param max_duration: Maximum number of seconds worth of extracted frames
(starting from the specified offset). If <=0, we extract until the end
of the video.
:type max_duration: float
:param frames: Specific exact frame numbers within the video to extract.
Providing explicit frames causes offset, interval and duration
parameters to be ignored and only the frames specified here to be
extracted and returned.
:type frames: collections.Iterable[int]
:param parallel: Number of processes to use for frame extraction. This is
None by default, meaning that all available cores/threads are used.
:type parallel: int or None
:param ffmpeg_exe: ffmpeg executable to use for frame extraction. By
default, we attempt to use what is available of the PATH.
:type ffmpeg_exe: str or unicode
:return: Map of frame-to-filepath for requested video frames
:rtype: dict of (int, str)
"""
log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map')
video_md = get_metadata_info(video_filepath)
video_sha1sum = hashlib.sha1(open(video_filepath, 'rb').read()).hexdigest()
frame_output_dir = os.path.join(
working_dir,
"VideoFrameExtraction",
*string_utils.partition_string(video_sha1sum, 10)
# 40 hex chars split into chunks of 4
)
file_utils.safe_create_dir(frame_output_dir)
def filename_for_frame(frame, ext):
"""
method standard filename for a given frame file
"""
return "%08d.%s" % (frame, ext.lstrip('.'))
def iter_frames_for_interval():
"""
Return a generator expression yielding frame numbers from the input
video that match the given query parameters. Indices returned are
0-based (i.e. first frame is 0, not 1).
We are making a sensible assumption that we are not dealing with frame
speeds of over 1000Hz and rounding frame frame times to the neared
thousandth of a second to mitigate floating point error.
:rtype: list of int
"""
_log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map'
'.iter_frames_for_interval')
num_frames = int(video_md.fps * video_md.duration)
first_frame = second_offset * video_md.fps
_log.debug("First frame: %f", first_frame)
if max_duration > 0:
cutoff_frame = min(float(num_frames),
(max_duration + second_offset) * video_md.fps)
else:
cutoff_frame = float(num_frames)
_log.debug("Cutoff frame: %f", cutoff_frame)
if second_interval:
incr = second_interval * video_md.fps
else:
incr = 1.0
_log.debug("Frame increment: %f", incr)
# Interpolate
yield first_frame
next_frm = first_frame + incr
while next_frm < cutoff_frame:
_log.debug("-- adding frame: %f", next_frm)
yield int(next_frm)
next_frm += incr
def extract_frames(frames_to_process):
"""
Extract specific frames from the input video file using ffmpeg. If not
all frames could be extracted, we return what we were able to extract.
:param frames_to_process: Mapping of frame-number:filepath pairs to
extract from the input video.
:type frames_to_process: dict[int,str or unicode]
:return: List of frames that were successfully extracted.
:rtype: list[int]
"""
_log = logging.getLogger('smqtk.utils.video_utils.extract_frame_map'
'.extract_frames')
# Setup temp extraction directory
tmp_extraction_dir = os.path.join(frame_output_dir, ".TMP")
if os.path.isdir(tmp_extraction_dir):
_log.debug("Existing temp director found, removing and starting "
"over")
shutil.rmtree(tmp_extraction_dir, ignore_errors=True)
os.makedirs(tmp_extraction_dir)
p = multiprocessing.Pool(parallel)
# Mapping of frame to (result, output_filepath)
#: :type: dict of (int, (AsyncResult, str))
rmap = {}
for f, ofp in frames_to_process.iteritems():
tfp = os.path.join(tmp_extraction_dir,
filename_for_frame(f, output_image_ext))
t = f / video_md.fps
rmap[f] = (
p.apply_async(ffmpeg_extract_frame,
args=(t, video_filepath, tfp, ffmpeg_exe)),
tfp
)
p.close()
# Check for failures
extracted_frames = []
for f, ofp in frames_to_process.iteritems():
r, tfp = rmap[f]
r.get() # wait for finish
if not os.path.isfile(tfp):
_log.warn("Failed to generated file for frame %d", f)
else:
extracted_frames.append(f)
os.rename(tfp, ofp)
p.join()
del p
os.removedirs(tmp_extraction_dir)
_log.debug("Frame extraction complete")
return extracted_frames
# Determine frames to extract from video
extract_indices = set()
if frames:
log.debug("Only extracting specified frames: %s", frames)
extract_indices.update(frames)
else:
log.debug("Determining frames needed for specification: "
"offset: %f, interval: %f, max_duration: %f",
second_offset, second_interval, max_duration)
extract_indices.update(iter_frames_for_interval())
if not extract_indices:
return {}
# frame/filename map that will be returned based on requested frames
frame_map = dict(
(i, os.path.join(frame_output_dir,
filename_for_frame(i, output_image_ext)))
for i in extract_indices
)
###
# Acquire a file-base lock in output directory so that we don't conflict
# with another process extracting frames to the same directory.
#
# NOTE: This method is prone to starvation if many processes are trying
# to extract to the same video frames, but not yet probably due to
# existing use cases.
#
lock_file = os.path.join(frame_output_dir, '.lock')
log.debug("Acquiring file lock in '%s'...", frame_output_dir)
while not file_utils.exclusive_touch(lock_file):
# This is sufficiently small to be fine grained, but not so small to
# burn the CPU.
time.sleep(0.01)
log.debug("Acquiring file lock -> Acquired!")
try:
###
# Determine frames to actually extract base on existing files (if any)
#
#: :type: dict[int, str]
frames_to_process = {}
existing_frames = []
for i, img_file in sorted(frame_map.items()):
if not os.path.isfile(img_file):
log.debug('frame %d needs processing', i)
frames_to_process[i] = img_file
else:
existing_frames.append(i)
###
# Extract needed frames via hook function that provides
# implementation.
#
if frames_to_process:
frames_extracted = extract_frames(frames_to_process)
if (len(existing_frames) + len(frames_extracted)) == 0:
raise RuntimeError("Failed to extract any frames for video")
return frame_map
finally:
os.remove(lock_file)
|
|
# -*- coding: utf-8 -*-
"""
The Gymkhana houses competitions running on the system.
PythonPro Limited
2013-02-21
"""
import json
import logging
from greplin import scales
from pp.latchpony.service.utils import redis_from_config
from pp.latchpony.model.competition import LocalCompetition
def get_log(extra=None):
m = "{}.{}".format(__name__, extra) if extra else __name__
return logging.getLogger(m)
class CompetitionNotFoundError(Exception):
"""Raised when a competition was not found for an org_id."""
class Gymkhana(object):
"""This manages the competitions running under the system.
What does Gymkhana mean?
* http://en.wikipedia.org/wiki/Gymkhana
"""
def __init__(self, redis):
"""
"""
self.log = logging.getLogger("{}.Gymkhana".format(__name__))
self.redis = redis
self.stats = scales.collection(
'/ghymkhana',
scales.IntDictStat('competition_add'),
scales.IntDictStat('competition_recovery'),
scales.IntDictStat('competition_update'),
scales.IntDictStat('competition_validation_failed'),
scales.IntDictStat('competition_removed'),
)
def has_competition(self, org_id):
"""Check if the gymkhana knows about the competition.
:param org_id: The unique id of the competition to look for.
:returns: True | False
False means no competition was found.
"""
return self.redis.exists(org_id)
def validate_competition(self, org_id, data):
"""Check the data produces a valid competition.
:param data: The string competition configuration.
:returns: A valid competition instance.
"""
comp = LocalCompetition.load(data)
try:
comp.validate()
finally:
self.stats.competition_validation_failed[org_id] += 1
return comp
def list_competitions(self):
"""Return all the competitions (org_ids) housed in the gymkhana.
:returns: A list of org_id strings.
"""
return list(self.redis.smembers('competitions'))
def get_competition(self, org_id):
"""Recover the stored competition for the given organisation.
:param org_id: The unique id of the competition to look for.
If the org_id was not found the CompetitionNotFoundError will be
raised.
:returns: A competition instance.
"""
if not self.has_competition(org_id):
self.log.error(
"get_competition: no found org_id '{0}'".format(org_id)
)
raise CompetitionNotFoundError(
"Unknown org_id: '{0}'".format(org_id)
)
# Recover the data and create a competition instance:
self.log.debug(
"get_competition: Competition() recover for '{0}'.".format(org_id)
)
# this should never fail, if it does then someone was fiddling with
# the stored set in redis directly!
org_data = json.loads(self.redis.get(org_id))
comp = self.validate_competition(org_id, org_data)
# update the count on the amount of times this competition has
# been used:
self.stats.competition_recovery[org_id] += 1
self.log.debug(
"get_competition: success org_id '{0}':'{1}'.".format(
org_id, comp
)
)
return comp
def add_competition(self, org_id, data):
"""House a new competition in the gymkhana.
:param org_id: The unique id of the competition to look for.
:param data: The latchpony configuration string for this competition.
E.g. the minimum set up::
... action
... anyone
... anything
... permissions
*** deny anyone to action anything wt 10
... aliases
The data will be validated before storage. If it fails
validate_competition() will raise exceptions indicating the problem.
"""
# First validate the competition before storing it. This will raise
# any exceptions preventing further progress.
self.validate_competition(org_id, data)
# OK: store the competition data for later use:
self.redis.sadd('competitions', org_id)
self.redis.set(org_id, json.dumps(data))
# update the count on the amount of times this competition has
# been added:
self.stats.competition_add[org_id] += 1
def update_competition(self, org_id, data):
"""Update a competition housed in the gymkhana.
:param org_id: The unique id of the competition to look for.
:param data: The latchpony configuration string to use instead.
E.g. the minimum set up::
... action
... anyone
... anything
... permissions
*** deny anyone to action anything wt 10
... aliases
The data will be validated before storage. If it fails
validate_competition() will raise exceptions indicating the problem.
"""
self.validate_competition(org_id, data)
self.redis.set(org_id, json.dumps(data))
# update the count on the amount of times this competition has
# been updated:
self.stats.competition_update[org_id] += 1
self.log.debug(
"update_competition: '{0}' updated ok.".format(
org_id
)
)
def remove_competition(self, org_id):
"""Finish a competition and remove it from the gymkhana.
:param org_id: The unique id of the competition to remove.
If the org_id isn't found CompetitionNotFoundError will be raised.
"""
if not self.has_competition(org_id):
self.log.debug(
"remove_competition: '{0}' not present to remove.".format(
org_id
)
)
self.redis.srem('competitions', org_id)
self.redis.delete(org_id)
# update the count on the amount of times this competition has
# been removed:
self.stats.competition_removed[org_id] += 1
def dump_competition(self, org_id):
"""Dump out the text format version of the competition.
This could be later loaded back in for updates.
:param org_id: The unique id of the competition to remove.
If the org_id isn't found CompetitionNotFoundError will be raised.
"""
if not self.has_competition(org_id):
self.log.debug(
"remove_competition: '{0}' not present to remove.".format(
org_id
)
)
raw_data = json.loads(self.redis.get(org_id))
return raw_data
class PyramidGymkhana(object):
"""An interface between pyramid based service and Gymkhana.
This will appear on the request object which can then be used in views.
"""
def __init__(self, request):
"""
:param request: This is the pyramid request object.
The request is passed into the view which is using this API.
"""
self.log = get_log("PyramidGymkhana")
self._settings = request.registry.settings
self.log.info("configuring Gymkhana instance.")
strict_redis = redis_from_config(self._settings)
self.log.info("request.gym now refers to the Gymkhana instance.")
self.api = Gymkhana(strict_redis)
|
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import copy
import difflib
import itertools
import os.path
import time
import traceback
from behave import step_registry
from behave.compat.os_path import relpath
class Argument(object):
'''An argument found in a *feature file* step name and extracted using
step decorator `parameters`_.
The attributes are:
.. attribute:: original
The actual text matched in the step name.
.. attribute:: value
The potentially type-converted value of the argument.
.. attribute:: name
The name of the argument. This will be None if the parameter is
anonymous.
.. attribute:: start
The start index in the step name of the argument. Used for display.
.. attribute:: end
The end index in the step name of the argument. Used for display.
'''
def __init__(self, start, end, original, value, name=None):
self.start = start
self.end = end
self.original = original
self.value = value
self.name = name
# @total_ordering
# class FileLocation(unicode):
class FileLocation(object):
"""
Provides a value object for file location objects.
A file location consists of:
* filename
* line (number), optional
LOCATION SCHEMA:
* "{filename}:{line}" or
* "{filename}" (if line number is not present)
"""
# -- pylint: disable=R0904,R0924
# R0904: 30,0:FileLocation: Too many public methods (43/30) => unicode
# R0924: 30,0:FileLocation: Badly implemented Container, ...=> unicode
__pychecker__ = "missingattrs=line" # -- Ignore warnings for 'line'.
def __init__(self, filename, line=None):
self.filename = filename
self.line = line
# def __new__(cls, filename, line=None):
# assert isinstance(filename, basestring)
# obj = unicode.__new__(cls, filename)
# obj.line = line
# obj.__filename = filename
# return obj
#
# @property
# def filename(self):
# # -- PREVENT: Assignments via property (and avoid self-recursion).
# return self.__filename
def get(self):
return self.filename
def abspath(self):
return os.path.abspath(self.filename)
def basename(self):
return os.path.basename(self.filename)
def dirname(self):
return os.path.dirname(self.filename)
def exists(self):
return os.path.exists(self.filename)
def __eq__(self, other):
if isinstance(other, FileLocation):
return self.filename == other.filename and self.line == other.line
else:
return self.filename == other
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if isinstance(other, FileLocation):
if self.filename < other.filename:
return True
elif self.filename > other.filename:
return False
else:
assert self.filename == other.filename
return self.line < other.line
else:
return self.filename < other
def __le__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
return not other < self
def __gt__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
if isinstance(other, FileLocation):
return other < self
else:
return self.filename > other
def __ge__(self, other):
# -- SEE ALSO: python2.7, functools.total_ordering
return not self < other
def __repr__(self):
return u'<FileLocation: filename="%s", line=%s>' % \
(self.filename, self.line)
def __str__(self):
if self.line is None:
return self.filename
else:
assert self.line >= 0
return u"%s:%d" % (self.filename, self.line)
class BasicStatement(object):
def __init__(self, filename, line, keyword, name):
filename = filename or '<string>'
filename = relpath(filename, os.getcwd()) # -- NEEDS: abspath?
self.location = FileLocation(filename, line)
assert isinstance(keyword, unicode)
assert isinstance(name, unicode)
self.keyword = keyword
self.name = name
@property
def filename(self):
# return os.path.abspath(self.location.filename)
return self.location.filename
@property
def line(self):
return self.location.line
# @property
# def location(self):
# p = relpath(self.filename, os.getcwd())
# return '%s:%d' % (p, self.line)
def __cmp__(self, other):
# -- NOTE: Ignore potential FileLocation differences.
return cmp((self.keyword, self.name), (other.keyword, other.name))
class TagStatement(BasicStatement):
def __init__(self, filename, line, keyword, name, tags):
super(TagStatement, self).__init__(filename, line, keyword, name)
self.tags = tags
class Replayable(object):
type = None
def replay(self, formatter):
getattr(formatter, self.type)(self)
class Feature(TagStatement, Replayable):
'''A `feature`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
be "Feature".
.. attribute:: name
The name of the feature (the text after "Feature".)
.. attribute:: description
The description of the feature as seen in the *feature file*. This is
stored as a list of text lines.
.. attribute:: background
The :class:`~behave.model.Background` for this feature, if any.
.. attribute:: scenarios
A list of :class:`~behave.model.Scenario` making up this feature.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the feature. See `controlling
things with tags`_.
.. attribute:: status
Read-Only. A summary status of the feature's run. If read before the
feature is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The feature was has not been completely tested yet.
"skipped"
One or more steps of this feature was passed over during testing.
"passed"
The feature was tested successfully.
"failed"
One or more steps of this feature failed.
.. attribute:: duration
The time, in seconds, that it took to test this feature. If read before
the feature is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the feature
was found.
.. attribute:: line
The line number of the *feature file* where the feature was found.
.. _`feature`: gherkin.html#features
'''
type = "feature"
def __init__(self, filename, line, keyword, name, tags=[], description=[],
scenarios=[], background=None):
super(Feature, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.scenarios = []
self.background = background
self.parser = None
for scenario in scenarios:
self.add_scenario(scenario)
def __repr__(self):
return '<Feature "%s": %d scenario(s)>' % \
(self.name, len(self.scenarios))
def __iter__(self):
return iter(self.scenarios)
def add_scenario(self, scenario):
scenario.feature = self
scenario.background = self.background
self.scenarios.append(scenario)
@property
def status(self):
skipped = True
for scenario_or_outline in self.scenarios:
# FIXME: Check if necessary, ScenarioOutline.status computes OK.
if isinstance(scenario_or_outline, ScenarioOutline):
for scenario in scenario_or_outline:
if scenario.status == 'failed':
return 'failed'
if scenario.status == 'untested':
return 'untested'
if scenario.status != 'skipped':
skipped = False
else:
scenario = scenario_or_outline
if scenario.status == 'failed':
return 'failed'
if scenario.status == 'untested':
return 'untested'
if scenario.status != 'skipped':
skipped = False
return skipped and 'skipped' or 'passed'
@property
def duration(self):
if self.background:
duration = self.background.duration or 0.0
else:
duration = 0.0
for scenario in self.scenarios:
duration += scenario.duration
return duration
def walk_scenarios(self, with_outlines=False):
"""
Provides a flat list of all scenarios of this feature.
A ScenarioOutline element adds its scenarios to this list.
But the ScenarioOutline element itself is only added when specified.
A flat scenario list is useful when all scenarios of a features
should be processed.
:param with_outlines: If ScenarioOutline items should be added, too.
:return: List of all scenarios of this feature.
"""
all_scenarios = []
for scenario in self.scenarios:
if isinstance(scenario, ScenarioOutline):
scenario_outline = scenario
if with_outlines:
all_scenarios.append(scenario_outline)
all_scenarios.extend(scenario_outline.scenarios)
else:
all_scenarios.append(scenario)
return all_scenarios
def should_run(self, config=None):
"""
Determines if this Feature (and its scenarios) should run.
Implements the run decision logic for a feature.
The decision depends on:
* if the Feature is marked as skipped
* if the config.tags (tag expression) enable/disable this feature
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = self.status != "skipped"
if answer and config:
answer = self.should_run_with_tags(config.tags)
return answer
def should_run_with_tags(self, tag_expression):
'''
Determines if this feature should run when the tag expression is used.
A feature should run if:
* it should run according to its tags
* any of its scenarios should run according to its tags
:param tag_expression: Runner/config environment tags to use.
:return: True, if feature should run. False, otherwise (skip it).
'''
run_feature = tag_expression.check(self.tags)
if not run_feature:
for scenario in self:
if scenario.should_run_with_tags(tag_expression):
run_feature = True
break
return run_feature
def mark_skipped(self):
"""
Marks this feature (and all its scenarios and steps) as skipped.
"""
for scenario in self.scenarios:
scenario.mark_skipped()
assert self.status == "skipped"
def run(self, runner):
failed = False
runner.context._push()
runner.context.feature = self
# run this feature if the tags say so or any one of its scenarios
run_feature = self.should_run(runner.config)
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.feature(self)
# current tags as a set
runner.context.tags = set(self.tags)
if not runner.config.dry_run and run_feature:
for tag in self.tags:
runner.run_hook('before_tag', runner.context, tag)
runner.run_hook('before_feature', runner.context, self)
if self.background and (run_feature or runner.config.show_skipped):
for formatter in runner.formatters:
formatter.background(self.background)
failed_count = 0
for scenario in self:
# -- OPTIONAL: Select scenario by name (regular expressions).
if (runner.config.name and
not runner.config.name_re.search(scenario.name)):
scenario.mark_skipped()
continue
failed = scenario.run(runner)
if failed:
failed_count += 1
# do we want to stop on the first failure?
if failed and runner.config.stop:
break
if run_feature:
runner.run_hook('after_feature', runner.context, self)
for tag in self.tags:
runner.run_hook('after_tag', runner.context, tag)
runner.context._pop()
if run_feature or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.eof()
failed = (failed_count > 0)
return failed
class Background(BasicStatement, Replayable):
'''A `background`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Background".
.. attribute:: name
The name of the background (the text after "Background:".)
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this background.
.. attribute:: duration
The time, in seconds, that it took to run this background. If read
before the background is run it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`background`: gherkin.html#backgrounds
'''
type = "background"
def __init__(self, filename, line, keyword, name, steps=[]):
super(Background, self).__init__(filename, line, keyword, name)
self.steps = steps or []
def __repr__(self):
return '<Background "%s">' % self.name
def __iter__(self):
return iter(self.steps)
@property
def duration(self):
duration = 0
for step in self.steps:
duration += step.duration
return duration
class Scenario(TagStatement, Replayable):
'''A `scenario`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario".
.. attribute:: name
The name of the scenario (the text after "Scenario:".)
.. attribute:: description
The description of the scenario as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario. See `controlling
things with tags`_.
.. attribute:: status
Read-Only. A summary status of the scenario's run. If read before the
scenario is fully tested it will return "untested" otherwise it will
return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more steps of this scenario was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more steps of this scenario failed.
.. attribute:: duration
The time, in seconds, that it took to test this scenario. If read before
the scenario is tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario`: gherkin.html#scenarios
'''
type = "scenario"
def __init__(self, filename, line, keyword, name, tags=[], steps=[],
description=None):
super(Scenario, self).__init__(filename, line, keyword, name, tags)
self.description = description or []
self.steps = steps or []
self.background = None
self.feature = None # REFER-TO: owner=Feature
self._row = None
self.should_skip = None
self.stderr = None
self.stdout = None
self.was_dry_run = False
def __repr__(self):
return '<Scenario "%s">' % self.name
def __iter__(self):
if self.background is not None:
return itertools.chain(self.background, self.steps)
else:
return iter(self.steps)
@property
def all_steps(self):
"""Returns iterator to all steps, including background steps if any."""
return self.__iter__()
@property
def status(self):
if self.should_skip:
# -- PERFORMANCE SHORTCUT: Scenario(Outline) is marked as skipped.
return 'skipped'
for step in self.steps:
if step.status == 'failed':
return 'failed'
elif step.status == 'undefined':
if self.was_dry_run:
# -- SPECIAL CASE: In dry-run with undefined-step discovery
# Undefined steps should not cause failed scenario.
return 'untested'
else:
# -- NORMALLY: Undefined steps cause failed scenario.
return 'failed'
elif step.status == 'skipped':
return 'skipped'
elif step.status == 'untested':
return 'untested'
return 'passed'
@property
def duration(self):
duration = 0
for step in self.steps:
duration += step.duration
return duration
@property
def effective_tags(self):
"""
Effective tags for this scenario:
* own tags
* tags inherited from its feature
"""
tags = self.tags
if self.feature:
tags = self.feature.tags + self.tags
return tags
def should_run(self, config=None):
"""
Determines if this Scenario (or ScenarioOutline) should run.
Implements the run decision logic for a scenario.
The decision depends on:
* if the Scenario is marked as skipped
* if the config.tags (tag expression) enable/disable this scenario
:param config: Runner configuration to use (optional).
:return: True, if scenario should run. False, otherwise.
"""
answer = not self.should_skip
if answer and config:
answer = self.should_run_with_tags(config.tags)
return answer
def should_run_with_tags(self, tag_expression):
"""
Determines if this scenario should run when the tag expression is used.
:param tag_expression: Runner/config environment tags to use.
:return: True, if scenario should run. False, otherwise (skip it).
"""
return tag_expression.check(self.effective_tags)
def mark_skipped(self):
"""
Marks this scenario (and all its steps) as skipped.
"""
self.should_skip = True
for step in self:
assert step.status == "untested" or step.status == "skipped"
step.status = "skipped"
assert self.status == "skipped", "OOPS: scenario.status=%s" % self.status
def run(self, runner):
failed = False
run_scenario = self.should_run(runner.config)
run_steps = run_scenario and not runner.config.dry_run
dry_run_scenario = run_scenario and runner.config.dry_run
self.was_dry_run = dry_run_scenario
if run_scenario or runner.config.show_skipped:
for formatter in runner.formatters:
formatter.scenario(self)
runner.context._push()
runner.context.scenario = self
runner.context.tags = set(self.effective_tags)
if not runner.config.dry_run and run_scenario:
for tag in self.tags:
runner.run_hook('before_tag', runner.context, tag)
runner.run_hook('before_scenario', runner.context, self)
runner.setup_capture()
if run_scenario or runner.config.show_skipped:
for step in self:
for formatter in runner.formatters:
formatter.step(step)
for step in self:
if run_steps:
if not step.run(runner):
run_steps = False
failed = True
runner.context._set_root_attribute('failed', True)
elif failed or dry_run_scenario:
# -- SKIP STEPS: After failure/undefined-step occurred.
# BUT: Detect all remaining undefined steps.
step.status = 'skipped'
if dry_run_scenario:
step.status = 'untested'
found_step = step_registry.registry.find_match(step)
if not found_step:
step.status = 'undefined'
runner.undefined.append(step)
else:
# -- SKIP STEPS: For disabled scenario.
# NOTE: Undefined steps are not detected (by intention).
step.status = 'skipped'
# Attach the stdout and stderr if generate Junit report
if runner.config.junit:
self.stdout = runner.context.stdout_capture.getvalue()
self.stderr = runner.context.stderr_capture.getvalue()
runner.teardown_capture()
if not runner.config.dry_run and run_scenario:
runner.run_hook('after_scenario', runner.context, self)
for tag in self.tags:
runner.run_hook('after_tag', runner.context, tag)
runner.context._pop()
return failed
class ScenarioOutline(Scenario):
'''A `scenario outline`_ parsed from a *feature file*.
A scenario outline extends the existing :class:`~behave.model.Scenario`
class with the addition of the :class:`~behave.model.Examples` tables of
data from the *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Scenario Outline".
.. attribute:: name
The name of the scenario (the text after "Scenario Outline:".)
.. attribute:: description
The description of the `scenario outline`_ as seen in the *feature file*.
This is stored as a list of text lines.
.. attribute:: feature
The :class:`~behave.model.Feature` this scenario outline belongs to.
.. attribute:: steps
A list of :class:`~behave.model.Step` making up this scenario outline.
.. attribute:: examples
A list of :class:`~behave.model.Examples` used by this scenario outline.
.. attribute:: tags
A list of @tags (as :class:`~behave.model.Tag` which are basically
glorified strings) attached to the scenario. See `controlling
things with tags`_.
.. attribute:: status
Read-Only. A summary status of the scenario outlines's run. If read
before the scenario is fully tested it will return "untested" otherwise
it will return one of:
"untested"
The scenario was has not been completely tested yet.
"skipped"
One or more scenarios of this outline was passed over during testing.
"passed"
The scenario was tested successfully.
"failed"
One or more scenarios of this outline failed.
.. attribute:: duration
The time, in seconds, that it took to test the scenarios of this
outline. If read before the scenarios are tested it will return 0.0.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`scenario outline`: gherkin.html#scenario-outlines
'''
type = "scenario_outline"
def __init__(self, filename, line, keyword, name, tags=[],
steps=[], examples=[], description=None):
super(ScenarioOutline, self).__init__(filename, line, keyword, name,
tags, steps, description)
self.examples = examples or []
self._scenarios = []
@property
def scenarios(self):
'''Return the scenarios with the steps altered to take the values from
the examples.
'''
if self._scenarios:
return self._scenarios
for example in self.examples:
for row in example.table:
new_steps = []
for step in self.steps:
new_steps.append(step.set_values(row))
scenario = Scenario(self.filename, self.line, self.keyword,
self.name, self.tags, new_steps)
scenario.feature = self.feature
scenario.background = self.background
scenario._row = row
self._scenarios.append(scenario)
return self._scenarios
def __repr__(self):
return '<ScenarioOutline "%s">' % self.name
def __iter__(self):
return iter(self.scenarios)
@property
def status(self):
for scenario in self.scenarios:
if scenario.status == 'failed':
return 'failed'
if scenario.status == 'skipped':
return 'skipped'
if scenario.status == 'untested':
return 'untested'
return 'passed'
@property
def duration(self):
duration = 0
for scenario in self.scenarios:
duration += scenario.duration
return duration
def mark_skipped(self):
"""
Marks this scenario outline (and all its scenarios/steps) as skipped.
"""
for scenario in self.scenarios:
scenario.mark_skipped()
assert self.status == "skipped"
def run(self, runner):
failed = False
for sub in self.scenarios:
runner.context._set_root_attribute('active_outline', sub._row)
failed = sub.run(runner)
if failed and runner.config.stop:
return False
runner.context._set_root_attribute('active_outline', None)
return failed
class Examples(BasicStatement, Replayable):
'''A table parsed from a `scenario outline`_ in a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Example".
.. attribute:: name
The name of the example (the text after "Example:".)
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the example
in the *feature file*.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the scenario
was found.
.. attribute:: line
The line number of the *feature file* where the scenario was found.
.. _`examples`: gherkin.html#examples
'''
type = "examples"
def __init__(self, filename, line, keyword, name, table=None):
super(Examples, self).__init__(filename, line, keyword, name)
self.table = table
class Step(BasicStatement, Replayable):
'''A single `step`_ parsed from a *feature file*.
The attributes are:
.. attribute:: keyword
This is the keyword as seen in the *feature file*. In English this will
typically be "Given", "When", "Then" or a number of other words.
.. attribute:: name
The name of the step (the text after "Given" etc.)
.. attribute:: step_type
The type of step as determined by the keyword. If the keyword is "and"
then the previous keyword in the *feature file* will determine this
step's step_type.
.. attribute:: text
An instance of :class:`~behave.model.Text` that came with the step
in the *feature file*.
.. attribute:: table
An instance of :class:`~behave.model.Table` that came with the step
in the *feature file*.
.. attribute:: status
Read-Only. A summary status of the step's run. If read before the
step is tested it will return "untested" otherwise it will
return one of:
"skipped"
This step was passed over during testing.
"passed"
The step was tested successfully.
"failed"
The step failed.
.. attribute:: duration
The time, in seconds, that it took to test this step. If read before the
step is tested it will return 0.0.
.. attribute:: error_message
If the step failed then this will hold any error information, as a
single string. It will otherwise be None.
.. attribute:: filename
The file name (or "<string>") of the *feature file* where the step was
found.
.. attribute:: line
The line number of the *feature file* where the step was found.
.. _`step`: gherkin.html#steps
'''
type = "step"
def __init__(self, filename, line, keyword, step_type, name, text=None,
table=None):
super(Step, self).__init__(filename, line, keyword, name)
self.step_type = step_type
self.text = text
self.table = table
self.status = 'untested'
self.duration = 0.0
self.error_message = None
self.exception = None
def __repr__(self):
return '<%s "%s">' % (self.step_type, self.name)
def __eq__(self, other):
return (self.step_type, self.name) == (other.step_type, other.name)
def __hash__(self):
return hash(self.step_type) + hash(self.name)
def set_values(self, table_row):
result = copy.deepcopy(self)
for name, value in table_row.items():
result.name = result.name.replace("<%s>" % name, value)
if result.text:
result.text = result.text.replace("<%s>" % name, value)
if result.table:
for row in result.table:
for i, cell in enumerate(row.cells):
row.cells[i] = cell.replace("<%s>" % name, value)
return result
def run(self, runner, quiet=False, capture=True):
# access module var here to allow test mocking to work
match = step_registry.registry.find_match(self)
if match is None:
runner.undefined.append(self)
if not quiet:
for formatter in runner.formatters:
formatter.match(NoMatch())
self.status = 'undefined'
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
return False
keep_going = True
if not quiet:
for formatter in runner.formatters:
formatter.match(match)
runner.run_hook('before_step', runner.context, self)
runner.start_capture()
try:
start = time.time()
# -- ENSURE:
# * runner.context.text/.table attributes are reset (#66).
# * Even EMPTY multiline text is available in context.
runner.context.text = self.text
runner.context.table = self.table
match.run(runner.context)
self.status = 'passed'
except AssertionError, e:
self.status = 'failed'
self.exception = e
if e.args:
error = u'Assertion Failed: %s' % e
else:
# no assertion text; format the exception
error = traceback.format_exc()
except Exception, e:
self.status = 'failed'
error = traceback.format_exc()
self.exception = e
self.duration = time.time() - start
runner.stop_capture()
# flesh out the failure with details
if self.status == 'failed':
if capture:
# -- CAPTURE-ONLY: Non-nested step failures.
if runner.config.stdout_capture:
output = runner.stdout_capture.getvalue()
if output:
error += '\nCaptured stdout:\n' + output
if runner.config.stderr_capture:
output = runner.stderr_capture.getvalue()
if output:
error += '\nCaptured stderr:\n' + output
if runner.config.log_capture:
output = runner.log_capture.getvalue()
if output:
error += '\nCaptured logging:\n' + output
self.error_message = error
keep_going = False
if not quiet:
for formatter in runner.formatters:
formatter.result(self)
runner.run_hook('after_step', runner.context, self)
return keep_going
class Table(Replayable):
'''A `table`_ extracted from a *feature file*.
Table instance data is accessible using a number of methods:
**iteration**
Iterating over the Table will yield the :class:`~behave.model.Row`
instances from the .rows attribute.
**indexed access**
Individual rows may be accessed directly by index on the Table instance;
table[0] gives the first non-heading row and table[-1] gives the last
row.
The attributes are:
.. attribute:: headings
The headings of the table as a list of strings.
.. attribute:: rows
An list of instances of :class:`~behave.model.Row` that make up the body
of the table in the *feature file*.
Tables are also comparable, for what that's worth. Headings and row data
are compared.
.. _`table`: gherkin.html#table
'''
type = "table"
def __init__(self, headings, line=None, rows=[]):
Replayable.__init__(self)
self.headings = headings
self.line = line
self.rows = []
for row in rows:
self.add_row(row, line)
def add_row(self, row, line=None):
self.rows.append(Row(self.headings, row, line))
def add_column(self, column_name, values=None, default_value=u""):
"""
Adds a new column to this table.
Uses :param:`default_value` for new cells (if :param:`values` are
not provided). param:`values` are extended with :param:`default_value`
if values list is smaller than the number of table rows.
:param column_name: Name of new column (as string).
:param values: Optional list of cell values in new column.
:param default_value: Default value for cell (if values not provided).
:returns: Index of new column (as number).
"""
# assert isinstance(column_name, unicode)
assert not self.has_column(column_name)
if values is None:
values = [default_value] * len(self.rows)
elif not isinstance(values, list):
values = list(values)
if len(values) < len(self.rows):
more_size = len(self.rows) - len(values)
more_values = [default_value] * more_size
values.extend(more_values)
new_column_index = len(self.headings)
self.headings.append(column_name)
for row, value in zip(self.rows, values):
assert len(row.cells) == new_column_index
row.cells.append(value)
return new_column_index
def has_column(self, column_name):
return column_name in self.headings
def get_column_index(self, column_name):
return self.headings.index(column_name)
def require_column(self, column_name):
"""
Require that a column exists in the table.
Raise an AssertionError if the column does not exist.
:param column_name: Name of new column (as string).
:return: Index of column (as number) if it exists.
"""
if not self.has_column(column_name):
columns = ", ".join(self.headings)
msg = "REQUIRE COLUMN: %s (columns: %s)" % (column_name, columns)
raise AssertionError(msg)
return self.get_column_index(column_name)
def require_columns(self, column_names):
for column_name in column_names:
self.require_column(column_name)
def ensure_column_exists(self, column_name):
"""
Ensures that a column with the given name exists.
If the column does not exist, the column is added.
:param column_name: Name of column (as string).
:return: Index of column (as number).
"""
if self.has_column(column_name):
return self.get_column_index(column_name)
else:
return self.add_column(column_name)
def __repr__(self):
return "<Table: %dx%d>" % (len(self.headings), len(self.rows))
def __eq__(self, other):
if isinstance(other, Table):
if self.headings != other.headings:
return False
for my_row, their_row in zip(self.rows, other.rows):
if my_row != their_row:
return False
else:
# -- ASSUME: table <=> raw data comparison
other_rows = other
for my_row, their_row in zip(self.rows, other_rows):
if my_row != their_row:
return False
return True
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self.rows)
def __getitem__(self, index):
return self.rows[index]
def assert_equals(self, data):
'''Assert that this table's cells are the same as the supplied "data".
The data passed in must be a list of lists giving:
[
[row 1],
[row 2],
[row 3],
]
If the cells do not match then a useful AssertionError will be raised.
'''
assert self == data
raise NotImplementedError
class Row(object):
'''One row of a `table`_ parsed from a *feature file*.
Row data is accessible using a number of methods:
**iteration**
Iterating over the Row will yield the individual cells as strings.
**named access**
Individual cells may be accessed by heading name; row['name'] would give
the cell value for the column with heading "name".
**indexed access**
Individual cells may be accessed directly by index on the Row instance;
row[0] gives the first cell and row[-1] gives the last cell.
The attributes are:
.. attribute:: cells
The list of strings that form the cells of this row.
.. attribute:: headings
The headings of the table as a list of strings.
Rows are also comparable, for what that's worth. Only the cells are
compared.
.. _`table`: gherkin.html#table
'''
def __init__(self, headings, cells, line=None, comments=None):
self.headings = headings
self.comments = comments
for c in cells:
assert isinstance(c, unicode)
self.cells = cells
self.line = line
def __getitem__(self, name):
try:
index = self.headings.index(name)
except ValueError:
if isinstance(name, int):
index = name
else:
raise KeyError('"%s" is not a row heading' % name)
return self.cells[index]
def __repr__(self):
return '<Row %r>' % (self.cells,)
def __eq__(self, other):
return self.cells == other.cells
def __ne__(self, other):
return not self == other
def __len__(self):
return len(self.cells)
def __iter__(self):
return iter(self.cells)
def items(self):
return zip(self.headings, self.cells)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def as_dict(self):
"""
Converts the row and its cell data into a dictionary.
:return: Row data as dictionary (without comments, line info).
"""
from behave.compat.collections import OrderedDict
return OrderedDict(self.items())
class Tag(unicode):
'''Tags appear may be associated with Features or Scenarios.
They're a subclass of regular strings (unicode pre-Python 3) with an
additional ``line`` number attribute (where the tag was seen in the source
feature file.
See `controlling things with tags`_.
'''
def __new__(cls, name, line):
o = unicode.__new__(cls, name)
o.line = line
return o
class Text(unicode):
'''Store multiline text from a Step definition.
The attributes are:
.. attribute:: value
The actual text parsed from the *feature file*.
.. attribute:: content_type
Currently only 'text/plain'.
'''
def __new__(cls, value, content_type=u'text/plain', line=0):
assert isinstance(value, unicode)
assert isinstance(content_type, unicode)
o = unicode.__new__(cls, value)
o.content_type = content_type
o.line = line
return o
def line_range(self):
line_count = len(self.splitlines())
return (self.line, self.line + line_count + 1)
def replace(self, old, new):
return Text(super(Text, self).replace(old, new), self.content_type,
self.line)
def assert_equals(self, expected):
'''Assert that my text is identical to the "expected" text.
A nice context diff will be displayed if they do not match.'
'''
if self == expected:
return True
diff = []
for line in difflib.unified_diff(self.splitlines(),
expected.splitlines()):
diff.append(line)
# strip unnecessary diff prefix
diff = ['Text does not match:'] + diff[3:]
raise AssertionError('\n'.join(diff))
class Match(Replayable):
'''An parameter-matched *feature file* step name extracted using
step decorator `parameters`_.
.. attribute:: func
The step function that this match will be applied to.
.. attribute:: arguments
A list of :class:`behave.model.Argument` instances containing the
matched parameters from the step name.
'''
type = "match"
def __init__(self, func, arguments=None):
super(Match, self).__init__()
self.func = func
self.arguments = arguments
self.location = None
if func:
self.location = self.make_location(func)
def __repr__(self):
if self.func:
func_name = self.func.__name__
else:
func_name = '<no function>'
return '<Match %s, %s>' % (func_name, self.location)
def __eq__(self, other):
if not isinstance(other, Match):
return False
return (self.func, self.location) == (other.func, other.location)
def with_arguments(self, arguments):
match = copy.copy(self)
match.arguments = arguments
return match
def run(self, context):
args = []
kwargs = {}
for arg in self.arguments:
if arg.name is not None:
kwargs[arg.name] = arg.value
else:
args.append(arg.value)
with context.user_mode():
self.func(context, *args, **kwargs)
@staticmethod
def make_location(step_function):
'''
Extracts the location information from the step function and builds
the location string (schema: "{source_filename}:{line_number}").
:param step_function: Function whose location should be determined.
:return: Step function location as string.
'''
filename = relpath(step_function.func_code.co_filename, os.getcwd())
line_number = step_function.func_code.co_firstlineno
return FileLocation(filename, line_number)
class NoMatch(Match):
'''
Used for an "undefined step" when it can not be matched with a
step definition.
'''
def __init__(self):
Match.__init__(self, func=None)
self.func = None
self.arguments = []
self.location = None
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
import six
from six.moves.urllib import parse
from solumclient.common.apiclient import exceptions
from solumclient.i18n import _
from oslo_utils import strutils
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key=None, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key] if response_key is not None else body
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key=None):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
"""
body = self.client.get(url).json()
data = body[response_key] if response_key is not None else body
return self.resource_class(self, data, loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key=None, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'. If response_key is None - all response body
will be used.
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
data = body[response_key] if response_key is not None else body
if return_raw:
return data
return self.resource_class(self, data)
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'. If response_key is None - all response body
will be used.
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
@six.add_metaclass(abc.ABCMeta)
class ManagerWithFind(BaseManager):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in kwargs.copy().items():
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = _("No %(name)s matching %(args)s.") % {
'name': self.resource_class.__name__,
'args': kwargs
}
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
return "<%s %s>" % (self.__class__.__name__, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion."""
if self.HUMAN_ID:
name = getattr(self, self.NAME_ATTR, None)
if name is not None:
return strutils.to_slug(name)
return None
def _add_details(self, info):
for (k, v) in info.items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded():
self.get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def get(self):
"""Support for lazy loading details.
Some clients, such as novaclient have the option to lazy load the
details, details which can be loaded with this function.
"""
# set_loaded() first ... so if we have to bail, we know we tried.
self.set_loaded(True)
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
def __ne__(self, other):
return not self.__eq__(other)
def is_loaded(self):
return self._loaded
def set_loaded(self, val):
self._loaded = val
def to_dict(self):
return copy.deepcopy(self._info)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: booopooob@gmail.com
#
# Info:
#
#
import asyncio
from typing import Callable, Tuple, Dict
import constants
import settings
from constants import STAGE_SOCKS5_UDP_ASSOCIATE, \
STAGE_SOCKS5_TCP_RELAY, \
STRUCT_BBB, STRUCT_BB, STRUCT_B, STRUCT_SOCK5_REPLY
from protocol.socks5.header import Socks5AddrHeader
from util import what_type_of_the_address
class Socks5Processor(object):
def __init__(self, loop, transport,
tcp_connect_coroutine: Callable[[Socks5AddrHeader], Tuple[str, int]],
udp_connect_coroutine: Callable[[Socks5AddrHeader], Tuple[str, int]],
auth=constants.SOCKS5_METHOD_NO_AUTHENTICATION_REQUIRED,
username_passwords: Dict = None):
self.loop = loop
self.transport = transport
self.tcp_connect_coroutine = tcp_connect_coroutine
self.udp_connect_coroutine = udp_connect_coroutine
self.auth = auth
self.username_passwords = username_passwords
self.state = constants.STAGE_SOCKS5_METHOD_SELECT
def upd_relaying(self):
return self.state == constants.STAGE_SOCKS5_UDP_ASSOCIATE
def tcp_relaying(self):
return self.state == constants.STAGE_SOCKS5_TCP_RELAY
def neek_more_data(self):
return self.state not in [constants.STAGE_SOCKS5_TCP_RELAY, constants.STAGE_SOCKS5_UDP_ASSOCIATE]
def feed_data(self, data):
if self.state == constants.STAGE_SOCKS5_METHOD_SELECT:
#
# request:
# +-----+----------+----------+
# | VER | NMETHODS | METHODS |
# +-----+----------+----------+
# | 1 | 1 | 1to255 |
# +-----+----------+----------+
#
# response:
# +-----+--------+
# | VER | METHOD |
# +-----+--------+
# | 1 | 1 |
# +-----+--------+
#
if len(data) < 3:
settings.PROTO_LOG.warn('no enough data for SOCKS METHOD SELECT')
self.transport.close()
return False
version, num_of_methods = STRUCT_BB.unpack_from(data)
method_data = data[STRUCT_BB.size:]
method_data = method_data[:num_of_methods]
methods = [method for method, in STRUCT_B.iter_unpack(method_data)]
if self.auth in methods:
# The server selects from one of the methods given in METHODS, and sends a METHOD selection message
response_data = STRUCT_BB.pack(constants.SOCKS5_VERSION, self.auth)
if self.auth == constants.SOCKS5_METHOD_NO_AUTHENTICATION_REQUIRED:
self.state = constants.STAGE_SOCKS5_REQUEST
elif self.auth == constants.SOCKS5_METHOD_USERNAME_PASSWORD:
self.state = constants.STAGE_SOCKS5_USERNAME_PASSWORD_AUTHENTICATION
self.transport.write(response_data)
return True
else:
# If the selected METHOD is X'FF', none of the methods listed by the
# client are acceptable, and the client MUST close the connection.
#
response_data = STRUCT_BB.pack(constants.SOCKS5_VERSION, constants.SOCKS5_METHOD_NO_ACCEPTABLE_METHODS)
self.transport.write(response_data)
return False
elif self.state == constants.STAGE_SOCKS5_USERNAME_PASSWORD_AUTHENTICATION:
#
# +----+------+----------+------+----------+
# |VER | ULEN | UNAME | PLEN | PASSWD |
# +----+------+----------+------+----------+
# | 1 | 1 | 1 to 255 | 1 | 1 to255 |
# +----+------+----------+------+----------+
#
version, len_of_user = STRUCT_BB.unpack_from(data)
data = data[STRUCT_BB.size:]
user = data[:len_of_user].decode('utf-8')
data = data[len_of_user:]
len_of_password, = STRUCT_B.unpack_from(data[:1])
data = data[1:]
password = data[:len_of_password].decode('utf-8')
# +----+--------+
# |VER | STATUS |
# +----+--------+
# | 1 | 1 |
# +----+--------+
#
# A STATUS field of X'00' indicates success. If the server returns a
# `failure' (STATUS value other than X'00') status, it MUST close the
# connection.
#
if user in self.username_passwords and self.username_passwords[user] == password:
self.state = constants.STAGE_SOCKS5_REQUEST
# The VER field contains the current version of the subnegotiation,
# which is X'01'.
self.transport.write(STRUCT_BB.pack(0x01, 0))
return True
else:
self.transport.write(STRUCT_BB.pack(0x01, 0xFF))
self.transport.close()
return False
elif self.state == constants.STAGE_SOCKS5_REQUEST:
#
# resquest:
# +-----+-----+-------+------+----------+----------+
# | VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |
# +-----+-----+-------+------+----------+----------+
# | 1 | 1 | X'00' | 1 | Variable | 2 |
# +-----+-----+-------+------+----------+----------+
#
# response:
# +-----+-----+-------+------+----------+----------+
# | VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |
# +-----+-----+-------+------+----------+----------+
# | 1 | 1 | X'00' | 1 | Variable | 2 |
# +-----+-----+-------+------+----------+----------+
#
ret, cmd, addr = self._parse_socks_request(data)
if not ret:
return False
elif not cmd in [constants.SOCKS5_CMD_CONNECT, constants.SOCKS5_CMD_UDP_ASSOCIATE]:
self._write_error_socks_response_with_reply_code(constants.SOCKS5_REPLY_COMMAND_NOT_SUPPORTED)
return False
else:
f = None
if cmd == constants.SOCKS5_CMD_CONNECT:
f = asyncio.ensure_future(self.tcp_connect_coroutine(addr), loop=self.loop)
elif cmd == constants.SOCKS5_CMD_UDP_ASSOCIATE:
f = asyncio.ensure_future(self.udp_connect_coroutine(addr), loop=self.loop)
def conn_completed(future):
# socks5 states: In the reply to a CONNECT, BND.PORT contains the port number that
# the server assigned to connect to the target host, while BND.ADDR contains the associated IP address.
ret, (foward_addr, foward_port) = future.result()
if not ret:
self._write_error_socks_response_with_reply_code(constants.SOCKS5_REPLY_NETWORK_UNREACHABLE)
return False
else:
# If the reply code (REP value of X'00') indicates a success,
# and the request was either a BIND or a CONNECT, the client may now start passing data.
self._write_succeed_socks_response_with_addr(Socks5AddrHeader(
addr=foward_addr,
port=foward_port,
addr_type=what_type_of_the_address(foward_addr),
))
if cmd == constants.SOCKS5_CMD_CONNECT:
self.state = STAGE_SOCKS5_TCP_RELAY
elif cmd == constants.SOCKS5_CMD_UDP_ASSOCIATE:
self.state = STAGE_SOCKS5_UDP_ASSOCIATE
# close the connection and go to the UDP relay server for data relay
self.transport.close()
f.add_done_callback(conn_completed)
return True
def _write_error_socks_response_with_reply_code(self, reply_code):
response_data = STRUCT_SOCK5_REPLY.pack(constants.SOCKS5_VERSION,
reply_code,
constants.SOCKS5_RESERVED_BYTE,
constants.SOCKS5_ADDRTYPE_IPV4,
0,
0,
)
self.transport.write(response_data)
self.transport.close()
def _write_succeed_socks_response_with_addr(self, addr):
response_data = STRUCT_BBB.pack(constants.SOCKS5_VERSION,
constants.SOCKS5_REPLY_SUCCEEDED,
constants.SOCKS5_RESERVED_BYTE,
) + addr.to_bytes()
self.transport.write(response_data)
def _parse_socks_request(self, data):
"""
return the CMD and address(extract from DST.addr DST.port),
if ERROR, close the transport and return (False, None, None)
"""
if len(data) < 10:
settings.PROTO_LOG.error('no enough data for SOCKS request')
self.transport.close()
return False
version, cmd, _ = STRUCT_BBB.unpack_from(data)
addr = Socks5AddrHeader()
try:
length = addr.from_bytes(data[STRUCT_BBB.size:])
except ValueError:
settings.PROTO_LOG.exception('Fail to get addr')
self._write_error_socks_response_with_reply_code(constants.SOCKS_REPLY_ADDRESS_TYPE_NOT_SUPPORTED)
return False, None, None
else:
return True, cmd, addr
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.conf import settings
from django import http
from django.test.utils import override_settings
from mox import IsA # noqa
from novaclient import exceptions as nova_exceptions
from novaclient.v1_1 import servers
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class ServerWrapperTests(test.TestCase):
def test_get_base_attribute(self):
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(self.servers.first().id, server.id)
def test_image_name(self):
image = self.images.first()
self.mox.StubOutWithMock(api.glance, 'image_get')
api.glance.image_get(IsA(http.HttpRequest),
image.id).AndReturn(image)
self.mox.ReplayAll()
server = api.nova.Server(self.servers.first(), self.request)
self.assertEqual(image.name, server.image_name)
class ComputeApiTests(test.APITestCase):
def test_server_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_HARD
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id)
self.assertIsNone(ret_val)
def test_server_soft_reboot(self):
server = self.servers.first()
HARDNESS = servers.REBOOT_SOFT
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.reboot(server.id, HARDNESS)
self.mox.ReplayAll()
ret_val = api.nova.server_reboot(self.request, server.id, HARDNESS)
self.assertIsNone(ret_val)
def test_server_vnc_console(self):
server = self.servers.first()
console = self.servers.vnc_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_vnc_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_vnc_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.VNCConsole)
def test_server_spice_console(self):
server = self.servers.first()
console = self.servers.spice_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_spice_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_spice_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.SPICEConsole)
def test_server_rdp_console(self):
server = self.servers.first()
console = self.servers.rdp_console_data
console_type = console["console"]["type"]
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get_rdp_console(server.id,
console_type).AndReturn(console)
self.mox.ReplayAll()
ret_val = api.nova.server_rdp_console(self.request,
server.id,
console_type)
self.assertIsInstance(ret_val, api.nova.RDPConsole)
def test_server_list(self):
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True, {'all_tenants': True}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
def test_server_list_pagination(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 20)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}).AndReturn(servers)
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertFalse(has_more)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_server_list_pagination_more(self):
page_size = getattr(settings, 'API_RESULT_PAGE_SIZE', 1)
servers = self.servers.list()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.list(True,
{'all_tenants': True,
'marker': None,
'limit': page_size + 1}) \
.AndReturn(servers[:page_size + 1])
self.mox.ReplayAll()
ret_val, has_more = api.nova.server_list(self.request,
{'marker': None,
'paginate': True},
all_tenants=True)
for server in ret_val:
self.assertIsInstance(server, api.nova.Server)
self.assertEqual(page_size, len(ret_val))
self.assertTrue(has_more)
def test_usage_get(self):
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.get(self.tenant.id,
'start',
'end').AndReturn(self.usages.first())
self.mox.ReplayAll()
ret_val = api.nova.usage_get(self.request, self.tenant.id,
'start', 'end')
self.assertIsInstance(ret_val, api.nova.NovaUsage)
def test_usage_list(self):
usages = self.usages.list()
novaclient = self.stub_novaclient()
novaclient.usage = self.mox.CreateMockAnything()
novaclient.usage.list('start', 'end', True).AndReturn(usages)
self.mox.ReplayAll()
ret_val = api.nova.usage_list(self.request, 'start', 'end')
for usage in ret_val:
self.assertIsInstance(usage, api.nova.NovaUsage)
def test_server_get(self):
server = self.servers.first()
novaclient = self.stub_novaclient()
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server.id).AndReturn(server)
self.mox.ReplayAll()
ret_val = api.nova.server_get(self.request, server.id)
self.assertIsInstance(ret_val, api.nova.Server)
def _test_absolute_limits(self, values, expected_results):
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in six.iteritems(values):
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
novaclient = self.stub_novaclient()
novaclient.limits = self.mox.CreateMockAnything()
novaclient.limits.get(reserved=True).AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.nova.tenant_absolute_limits(self.request, reserved=True)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
def test_absolute_limits_handle_unlimited(self):
values = {"maxTotalCores": -1, "maxTotalInstances": 10}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10}
self._test_absolute_limits(values, expected_results)
def test_absolute_limits_negative_used_workaround(self):
values = {"maxTotalCores": -1,
"maxTotalInstances": 10,
"totalInstancesUsed": -1,
"totalCoresUsed": -1,
"totalRAMUsed": -2048,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
expected_results = {"maxTotalCores": float("inf"),
"maxTotalInstances": 10,
"totalInstancesUsed": 0,
"totalCoresUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 1,
"totalFloatingIpsUsed": 0,
}
self._test_absolute_limits(values, expected_results)
def test_cold_migrate_host_succeed(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.migrate("test_uuid")
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", False, True,
True)
self.assertTrue(ret_val)
def test_cold_migrate_host_fails(self):
hypervisor = self.hypervisors.first()
novaclient = self.stub_novaclient()
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.migrate("test_uuid").AndRaise(
nova_exceptions.ClientException(404))
self.mox.ReplayAll()
self.assertRaises(nova_exceptions.ClientException,
api.nova.migrate_host,
self.request, "host", False, True, True)
def test_live_migrate_host_with_active_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.first()
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.live_migrate(server_uuid, None, True, True)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
def test_live_migrate_host_with_paused_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[3]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.live_migrate(server_uuid, None, True, True)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
def test_live_migrate_host_without_running_vm(self):
hypervisor = self.hypervisors.first()
server = self.servers.list()[1]
novaclient = self.stub_novaclient()
server_uuid = hypervisor.servers[0]["uuid"]
novaclient.hypervisors = self.mox.CreateMockAnything()
novaclient.hypervisors.search('host', True).AndReturn([hypervisor])
novaclient.servers = self.mox.CreateMockAnything()
novaclient.servers.get(server_uuid).AndReturn(server)
novaclient.servers.migrate(server_uuid)
self.mox.ReplayAll()
ret_val = api.nova.migrate_host(self.request, "host", True, True,
True)
self.assertTrue(ret_val)
|
|
"""
module for accessing a USB HID YubiKey
"""
# Copyright (c) 2010, 2011, 2012 Yubico AB
# See the file COPYING for licence statement.
__all__ = [
# constants
# functions
# classes
'YubiKeyUSBHID',
'YubiKeyUSBHIDError',
'YubiKeyUSBHIDStatus',
]
from .yubico_version import __version__
from . import yubico_util
from . import yubico_exception
from . import yubikey_frame
from . import yubikey_config
from . import yubikey_defs
from . import yubikey_base
from .yubikey_defs import SLOT, YUBICO_VID, PID
from .yubikey_base import YubiKey
import struct
import time
import sys
import usb
# Various USB/HID parameters
_USB_TYPE_CLASS = (0x01 << 5)
_USB_RECIP_INTERFACE = 0x01
_USB_ENDPOINT_IN = 0x80
_USB_ENDPOINT_OUT = 0x00
_HID_GET_REPORT = 0x01
_HID_SET_REPORT = 0x09
_USB_TIMEOUT_MS = 2000
# from ykcore_backend.h
_FEATURE_RPT_SIZE = 8
_REPORT_TYPE_FEATURE = 0x03
# dict used to select command for mode+slot in _challenge_response
_CMD_CHALLENGE = {'HMAC': {1: SLOT.CHAL_HMAC1, 2: SLOT.CHAL_HMAC2},
'OTP': {1: SLOT.CHAL_OTP1, 2: SLOT.CHAL_OTP2},
}
class YubiKeyUSBHIDError(yubico_exception.YubicoError):
""" Exception raised for errors with the USB HID communication. """
class YubiKeyUSBHIDCapabilities(yubikey_base.YubiKeyCapabilities):
"""
Capture the capabilities of the various versions of YubiKeys.
Overrides just the functions from YubiKeyCapabilities() that are available
in one or more versions, leaving the other ones at False through default_answer.
"""
def __init__(self, model, version, default_answer):
super(YubiKeyUSBHIDCapabilities, self).__init__(
model=model,
version=version,
default_answer=default_answer)
def have_yubico_OTP(self):
""" Yubico OTP support has always been available in the standard YubiKey. """
return True
def have_OATH(self, mode):
""" OATH HOTP was introduced in YubiKey 2.2. """
if mode not in ['HOTP']:
return False
return (self.version >= (2, 1, 0,))
def have_challenge_response(self, mode):
""" Challenge-response was introduced in YubiKey 2.2. """
if mode not in ['HMAC', 'OTP']:
return False
return (self.version >= (2, 2, 0,))
def have_serial_number(self):
""" Reading serial number was introduced in YubiKey 2.2, but depends on extflags set too. """
return (self.version >= (2, 2, 0,))
def have_ticket_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_config_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_extended_flag(self, flag):
return flag.is_compatible(model = self.model, version = self.version)
def have_extended_scan_code_mode(self):
return (self.version >= (2, 0, 0,))
def have_shifted_1_mode(self):
return (self.version >= (2, 0, 0,))
def have_configuration_slot(self, slot):
return (slot in [1, 2])
class YubiKeyHIDDevice(object):
"""
High-level wrapper for low-level HID commands for a HID based YubiKey.
"""
def __init__(self, debug=False, skip=0):
"""
Find and connect to a YubiKey (USB HID).
Attributes :
skip -- number of YubiKeys to skip
debug -- True or False
"""
self.debug = debug
self._usb_handle = None
if not self._open(skip):
raise YubiKeyUSBHIDError('YubiKey USB HID initialization failed')
self.status()
def status(self):
"""
Poll YubiKey for status.
"""
data = self._read()
self._status = YubiKeyUSBHIDStatus(data)
return self._status
def __del__(self):
try:
if self._usb_handle:
self._close()
except (IOError, AttributeError):
pass
def _write_config(self, cfg, slot):
""" Write configuration to YubiKey. """
old_pgm_seq = self._status.pgm_seq
frame = cfg.to_frame(slot=slot)
self._debug("Writing %s frame :\n%s\n" % \
(yubikey_config.command2str(frame.command), cfg))
self._write(frame)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
# make sure we have a fresh pgm_seq value
self.status()
self._debug("Programmed slot %i, sequence %i -> %i\n" % (slot, old_pgm_seq, self._status.pgm_seq))
cfgs = self._status.valid_configs()
if not cfgs and self._status.pgm_seq == 0:
return
if self._status.pgm_seq == old_pgm_seq + 1:
return
raise YubiKeyUSBHIDError('YubiKey programming failed (seq %i not increased (%i))' % \
(old_pgm_seq, self._status.pgm_seq))
def _read_response(self, may_block=False):
""" Wait for a response to become available, and read it. """
# wait for response to become available
res = self._waitfor_set(yubikey_defs.RESP_PENDING_FLAG, may_block)[:7]
# continue reading while response pending is set
while True:
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_PENDING_FLAG:
seq = flags & 0b00011111
if res and (seq == 0):
break
res += this[:7]
else:
break
self._write_reset()
return res
def _read(self):
""" Read a USB HID feature report from the YubiKey. """
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_IN
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
recv = self._usb_handle.controlMsg(request_type,
_HID_GET_REPORT,
_FEATURE_RPT_SIZE,
value = value,
timeout = _USB_TIMEOUT_MS)
if len(recv) != _FEATURE_RPT_SIZE:
self._debug("Failed reading %i bytes (got %i) from USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, recv))
raise YubiKeyUSBHIDError('Failed reading from USB HID YubiKey')
data = b''.join(yubico_util.chr_byte(c) for c in recv)
self._debug("READ : %s" % (yubico_util.hexdump(data, colorize=True)))
return data
def _write(self, frame):
"""
Write a YubiKeyFrame to the USB HID.
Includes polling for YubiKey readiness before each write.
"""
for data in frame.to_feature_reports(debug=self.debug):
debug_str = None
if self.debug:
(data, debug_str) = data
# first, we ensure the YubiKey will accept a write
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
self._raw_write(data, debug_str)
return True
def _write_reset(self):
"""
Reset read mode by issuing a dummy write.
"""
data = b'\x00\x00\x00\x00\x00\x00\x00\x8f'
self._raw_write(data)
self._waitfor_clear(yubikey_defs.SLOT_WRITE_FLAG)
return True
def _raw_write(self, data, debug_str = None):
"""
Write data to YubiKey.
"""
if self.debug:
if not debug_str:
debug_str = ''
hexdump = yubico_util.hexdump(data, colorize=True)[:-1] # strip LF
self._debug("WRITE : %s %s\n" % (hexdump, debug_str))
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_OUT
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
sent = self._usb_handle.controlMsg(request_type,
_HID_SET_REPORT,
data,
value = value,
timeout = _USB_TIMEOUT_MS)
if sent != _FEATURE_RPT_SIZE:
self.debug("Failed writing %i bytes (wrote %i) to USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, sent))
raise YubiKeyUSBHIDError('Failed talking to USB HID YubiKey')
return sent
def _waitfor_clear(self, mask, may_block=False):
"""
Wait for the YubiKey to turn OFF the bits in 'mask' in status responses.
Returns the 8 bytes last read.
"""
return self._waitfor('nand', mask, may_block)
def _waitfor_set(self, mask, may_block=False):
"""
Wait for the YubiKey to turn ON the bits in 'mask' in status responses.
Returns the 8 bytes last read.
"""
return self._waitfor('and', mask, may_block)
def _waitfor(self, mode, mask, may_block, timeout=2):
"""
Wait for the YubiKey to either turn ON or OFF certain bits in the status byte.
mode is either 'and' or 'nand'
timeout is a number of seconds (precision about ~0.5 seconds)
"""
finished = False
sleep = 0.01
# After six sleeps, we've slept 0.64 seconds.
wait_num = (timeout * 2) - 1 + 6
resp_timeout = False # YubiKey hasn't indicated RESP_TIMEOUT (yet)
while not finished:
time.sleep(sleep)
this = self._read()
flags = yubico_util.ord_byte(this[7])
if flags & yubikey_defs.RESP_TIMEOUT_WAIT_FLAG:
if not resp_timeout:
resp_timeout = True
seconds_left = flags & yubikey_defs.RESP_TIMEOUT_WAIT_MASK
self._debug("Device indicates RESP_TIMEOUT (%i seconds left)\n" \
% (seconds_left))
if may_block:
# calculate new wait_num - never more than 20 seconds
seconds_left = min(20, seconds_left)
wait_num = (seconds_left * 2) - 1 + 6
if mode == 'nand':
if not flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not cleared bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
elif mode == 'and':
if flags & mask == mask:
finished = True
else:
self._debug("Status %s (0x%x) has not set bits %s (0x%x)\n"
% (bin(flags), flags, bin(mask), mask))
else:
assert()
if not finished:
wait_num -= 1
if wait_num == 0:
if mode == 'nand':
reason = 'Timed out waiting for YubiKey to clear status 0x%x' % mask
else:
reason = 'Timed out waiting for YubiKey to set status 0x%x' % mask
raise yubikey_base.YubiKeyTimeout(reason)
sleep = min(sleep + sleep, 0.5)
else:
return this
def _open(self, skip=0):
""" Perform HID initialization """
usb_device = self._get_usb_device(skip)
if usb_device:
usb_conf = usb_device.configurations[0]
self._usb_int = usb_conf.interfaces[0][0]
else:
raise YubiKeyUSBHIDError('No USB YubiKey found')
try:
self._usb_handle = usb_device.open()
self._usb_handle.detachKernelDriver(0)
except Exception as error:
if 'could not detach kernel driver from interface' in str(error):
self._debug('The in-kernel-HID driver has already been detached\n')
else:
self._debug("detachKernelDriver not supported!\n")
try:
self._usb_handle.setConfiguration(1)
except usb.USBError:
self._debug("Unable to set configuration, ignoring...\n")
self._usb_handle.claimInterface(self._usb_int)
return True
def _close(self):
"""
Release the USB interface again.
"""
self._usb_handle.releaseInterface()
try:
# If we're using PyUSB >= 1.0 we can re-attach the kernel driver here.
self._usb_handle.dev.attach_kernel_driver(0)
except:
pass
self._usb_int = None
self._usb_handle = None
return True
def _get_usb_device(self, skip=0):
"""
Get YubiKey USB device.
Optionally allows you to skip n devices, to support multiple attached YubiKeys.
"""
try:
# PyUSB >= 1.0, this is a workaround for a problem with libusbx
# on Windows.
import usb.core
import usb.legacy
devices = [usb.legacy.Device(d) for d in usb.core.find(
find_all=True, idVendor=YUBICO_VID)]
except ImportError:
# Using PyUsb < 1.0.
import usb
devices = [d for bus in usb.busses() for d in bus.devices]
for device in devices:
if device.idVendor == YUBICO_VID:
if device.idProduct in PID.all(otp=True):
if skip == 0:
return device
skip -= 1
return None
def _debug(self, out, print_prefix=True):
""" Print out to stderr, if debugging is enabled. """
if self.debug:
if print_prefix:
pre = self.__class__.__name__
if hasattr(self, 'debug_prefix'):
pre = getattr(self, 'debug_prefix')
sys.stderr.write("%s: " % pre)
sys.stderr.write(out)
class YubiKeyUSBHID(YubiKey):
"""
Class for accessing a YubiKey over USB HID.
This class is for communicating specifically with standard YubiKeys
(USB vendor id = 0x1050, product id = 0x10) using USB HID.
There is another class for the YubiKey NEO BETA, even though that
product also goes by product id 0x10 for the BETA versions. The
expectation is that the final YubiKey NEO will have it's own product id.
Tested with YubiKey versions 1.3 and 2.2.
"""
model = 'YubiKey'
description = 'YubiKey (or YubiKey NANO)'
_capabilities_cls = YubiKeyUSBHIDCapabilities
def __init__(self, debug=False, skip=0, hid_device=None):
"""
Find and connect to a YubiKey (USB HID).
Attributes :
skip -- number of YubiKeys to skip
debug -- True or False
"""
super(YubiKeyUSBHID, self).__init__(debug)
if hid_device is None:
self._device = YubiKeyHIDDevice(debug, skip)
else:
self._device = hid_device
self.capabilities = \
self._capabilities_cls(model=self.model,
version=self.version_num(),
default_answer=False)
def __repr__(self):
return '<%s instance at %s: YubiKey version %s>' % (
self.__class__.__name__,
hex(id(self)),
self.version()
)
def __str__(self):
return '%s (%s)' % (self.model, self.version())
def status(self):
"""
Poll YubiKey for status.
"""
return self._device.status()
def version_num(self):
""" Get the YubiKey version as a tuple (major, minor, build). """
return self._device._status.ykver()
def version(self):
""" Get the YubiKey version. """
return self._device._status.version()
def serial(self, may_block=True):
""" Get the YubiKey serial number (requires YubiKey 2.2). """
if not self.capabilities.have_serial_number():
raise yubikey_base.YubiKeyVersionError("Serial number unsupported in YubiKey %s" % self.version() )
return self._read_serial(may_block)
def challenge_response(self, challenge, mode='HMAC', slot=1, variable=True, may_block=True):
""" Issue a challenge to the YubiKey and return the response (requires YubiKey 2.2). """
if not self.capabilities.have_challenge_response(mode):
raise yubikey_base.YubiKeyVersionError("%s challenge-response unsupported in YubiKey %s" % (mode, self.version()) )
return self._challenge_response(challenge, mode, slot, variable, may_block)
def init_config(self, **kw):
""" Get a configuration object for this type of YubiKey. """
return YubiKeyConfigUSBHID(ykver=self.version_num(), \
capabilities = self.capabilities, \
**kw)
def write_config(self, cfg, slot=1):
""" Write a configuration to the YubiKey. """
cfg_req_ver = cfg.version_required()
if cfg_req_ver > self.version_num():
raise yubikey_base.YubiKeyVersionError('Configuration requires YubiKey version %i.%i (this is %s)' % \
(cfg_req_ver[0], cfg_req_ver[1], self.version()))
if not self.capabilities.have_configuration_slot(slot):
raise YubiKeyUSBHIDError("Can't write configuration to slot %i" % (slot))
return self._device._write_config(cfg, slot)
def _read_serial(self, may_block):
""" Read the serial number from a YubiKey > 2.2. """
frame = yubikey_frame.YubiKeyFrame(command = SLOT.DEVICE_SERIAL)
self._device._write(frame)
response = self._device._read_response(may_block=may_block)
if not yubico_util.validate_crc16(response[:6]):
raise YubiKeyUSBHIDError("Read from device failed CRC check")
# the serial number is big-endian, although everything else is little-endian
serial = struct.unpack('>lxxx', response)
return serial[0]
def _challenge_response(self, challenge, mode, slot, variable, may_block):
""" Do challenge-response with a YubiKey > 2.0. """
# Check length and pad challenge if appropriate
if mode == 'HMAC':
if len(challenge) > yubikey_defs.SHA1_MAX_BLOCK_SIZE:
raise yubico_exception.InputError('Mode HMAC challenge too big (%i/%i)' \
% (yubikey_defs.SHA1_MAX_BLOCK_SIZE, len(challenge)))
if len(challenge) < yubikey_defs.SHA1_MAX_BLOCK_SIZE:
pad_with = b'\0'
if variable and challenge[-1:] == pad_with:
pad_with = b'\xff'
challenge = challenge.ljust(yubikey_defs.SHA1_MAX_BLOCK_SIZE, pad_with)
response_len = yubikey_defs.SHA1_DIGEST_SIZE
elif mode == 'OTP':
if len(challenge) != yubikey_defs.UID_SIZE:
raise yubico_exception.InputError('Mode OTP challenge must be %i bytes (got %i)' \
% (yubikey_defs.UID_SIZE, len(challenge)))
challenge = challenge.ljust(yubikey_defs.SHA1_MAX_BLOCK_SIZE, b'\0')
response_len = 16
else:
raise yubico_exception.InputError('Invalid mode supplied (%s, valid values are HMAC and OTP)' \
% (mode))
try:
command = _CMD_CHALLENGE[mode][slot]
except:
raise yubico_exception.InputError('Invalid slot specified (%s)' % (slot))
frame = yubikey_frame.YubiKeyFrame(command=command, payload=challenge)
self._device._write(frame)
response = self._device._read_response(may_block=may_block)
if not yubico_util.validate_crc16(response[:response_len + 2]):
raise YubiKeyUSBHIDError("Read from device failed CRC check")
return response[:response_len]
class YubiKeyUSBHIDStatus(object):
""" Class to represent the status information we get from the YubiKey. """
CONFIG1_VALID = 0x01 # Bit in touchLevel indicating that configuration 1 is valid (from firmware 2.1)
CONFIG2_VALID = 0x02 # Bit in touchLevel indicating that configuration 2 is valid (from firmware 2.1)
def __init__(self, data):
# From ykdef.h :
#
# struct status_st {
# unsigned char versionMajor; /* Firmware version information */
# unsigned char versionMinor;
# unsigned char versionBuild;
# unsigned char pgmSeq; /* Programming sequence number. 0 if no valid configuration */
# unsigned short touchLevel; /* Level from touch detector */
# };
fmt = '<x BBB B H B'
self.version_major, \
self.version_minor, \
self.version_build, \
self.pgm_seq, \
self.touch_level, \
self.flags = struct.unpack(fmt, data)
def __repr__(self):
valid_str = ''
flags_str = ''
if self.ykver() >= (2,1,0):
valid_str = ", valid=%s" % (self.valid_configs())
if self.flags:
flags_str = " (flags 0x%x)" % (self.flags)
return '<%s instance at %s: YubiKey version %s, pgm_seq=%i, touch_level=%i%s%s>' % (
self.__class__.__name__,
hex(id(self)),
self.version(),
self.pgm_seq,
self.touch_level,
valid_str,
flags_str,
)
def ykver(self):
""" Returns a tuple with the (major, minor, build) version of the YubiKey firmware. """
return (self.version_major, self.version_minor, self.version_build)
def version(self):
""" Return the YubiKey firmware version as a string. """
version = "%d.%d.%d" % (self.ykver())
return version
def valid_configs(self):
""" Return a list of slots having a valid configurtion. Requires firmware 2.1. """
if self.ykver() < (2,1,0):
raise YubiKeyUSBHIDError('Valid configs unsupported in firmware %s' % (self.version()))
res = []
if self.touch_level & self.CONFIG1_VALID == self.CONFIG1_VALID:
res.append(1)
if self.touch_level & self.CONFIG2_VALID == self.CONFIG2_VALID:
res.append(2)
return res
class YubiKeyConfigUSBHID(yubikey_config.YubiKeyConfig):
"""
Configuration class for USB HID YubiKeys.
"""
def __init__(self, ykver, capabilities = None, **kw):
super(YubiKeyConfigUSBHID, self).__init__(ykver=ykver, capabilities=capabilities, **kw)
|
|
#!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import re
import socket
import ssl
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import Configurable
def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None):
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
"""
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM,
0, flags)):
af, socktype, proto, canonname, sockaddr = res
sock = socket.socket(af, socktype, proto)
set_close_exec(sock.fileno())
if os.name != 'nt':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
sock.setblocking(0)
sock.bind(sockaddr)
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, 'AF_UNIX'):
def bind_unix_socket(file, mode=0o600, backlog=128):
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
try:
st = os.stat(file)
except OSError as err:
if err.errno != errno.ENOENT:
raise
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(sock, callback, io_loop=None):
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
"""
if io_loop is None:
io_loop = IOLoop.current()
def accept_handler(fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error as e:
if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
return
raise
callback(connection, address)
io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ)
def is_valid_ip(ip):
"""Returns true if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
try:
res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC,
socket.SOCK_STREAM,
0, socket.AI_NUMERICHOST)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.BlockingResolver`
* `tornado.netutil.ThreadedResolver`
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
"""
@classmethod
def configurable_base(cls):
return Resolver
@classmethod
def configurable_default(cls):
return BlockingResolver
def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None):
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
"""
raise NotImplementedError()
class ExecutorResolver(Resolver):
def initialize(self, io_loop=None, executor=None):
self.io_loop = io_loop or IOLoop.current()
self.executor = executor or dummy_executor
@run_on_executor
def resolve(self, host, port, family=socket.AF_UNSPEC):
addrinfo = socket.getaddrinfo(host, port, family)
results = []
for family, socktype, proto, canonname, address in addrinfo:
results.append((family, address))
return results
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
"""
def initialize(self, io_loop=None):
super(BlockingResolver, self).initialize(io_loop=io_loop)
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
"""
def initialize(self, io_loop=None, num_threads=10):
from concurrent.futures import ThreadPoolExecutor
super(ThreadedResolver, self).initialize(
io_loop=io_loop, executor=ThreadPoolExecutor(num_threads))
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can contain either host strings or host-port pairs.
"""
def initialize(self, resolver, mapping):
self.resolver = resolver
self.mapping = mapping
def resolve(self, host, port, *args, **kwargs):
if (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, *args, **kwargs)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile',
'cert_reqs', 'ca_certs', 'ciphers'])
def ssl_options_to_context(ssl_options):
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 3.2+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, dict):
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
if (not hasattr(ssl, 'SSLContext') or
isinstance(ssl_options, ssl.SSLContext)):
return ssl_options
context = ssl.SSLContext(
ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23))
if 'certfile' in ssl_options:
context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None))
if 'cert_reqs' in ssl_options:
context.verify_mode = ssl_options['cert_reqs']
if 'ca_certs' in ssl_options:
context.load_verify_locations(ssl_options['ca_certs'])
if 'ciphers' in ssl_options:
context.set_ciphers(ssl_options['ciphers'])
return context
def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs):
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either a dictionary (as accepted by
`ssl_options_to_context`) or an `ssl.SSLContext` object.
Additional keyword arguments are passed to ``wrap_socket``
(either the `~ssl.SSLContext` method or the `ssl` module function
as appropriate).
"""
context = ssl_options_to_context(ssl_options)
if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext):
if server_hostname is not None and getattr(ssl, 'HAS_SNI'):
# Python doesn't have server-side SNI support so we can't
# really unittest this, but it can be manually tested with
# python3.2 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname,
**kwargs)
else:
return context.wrap_socket(socket, **kwargs)
else:
return ssl.wrap_socket(socket, **dict(context, **kwargs))
if hasattr(ssl, 'match_hostname'): # python 3.2+
ssl_match_hostname = ssl.match_hostname
SSLCertificateError = ssl.CertificateError
else:
# match_hostname was added to the standard library ssl module in python 3.2.
# The following code was backported for older releases and copied from
# https://bitbucket.org/brandon/backports.ssl_match_hostname
class SSLCertificateError(ValueError):
pass
def _dnsname_to_pat(dn):
pats = []
for frag in dn.split(r'.'):
if frag == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
else:
# Otherwise, '*' matches any dotless fragment.
frag = re.escape(frag)
pats.append(frag.replace(r'\*', '[^.]*'))
return re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
def ssl_match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 rules
are mostly followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if not san:
# The subject is only checked when subjectAltName is empty
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_to_pat(value).match(hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise SSLCertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise SSLCertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise SSLCertificateError("no appropriate commonName or "
"subjectAltName fields were found")
|
|
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.spawn import find_executable
import logging
import os
import platform
import subprocess
import sys
import common
import util
""" Contains all necessary methods for setting up the Windows environment """
# Program Files directories. The default place for installation through an exe,
# hence the first place to search for completed installation.
PROGRAM_FILES = os.environ.get("ProgramFiles")
PROGRAM_FILES_X86 = os.environ.get("ProgramFiles(x86)")
# VISUAL STUDIO
# Download link and hash for Visual Studio 2013 Community edition, which will
# be downloaded if the user does not have any version of VS installed.
VS_DEFAULT_NAME = "Microsoft Visual Studio Community 2013"
VS_DEFAULT_VERSION = "Microsoft Visual Studio 12.0"
VS_DEFAULT_URL = "https://go.microsoft.com/fwlink/?LinkId=532495&clcid=0x409"
VS_DEFAULT_HASH = "748764ba84ce2ecf29f3ee1475c8cf0da664b351"
# Versions of Visual Studio and Visual C++ that are accepted for using fplutil
VS_NAME_PREFIX = "Microsoft Visual Studio "
VS_COMPILER_PREFIX = "Microsoft Visual C++ "
VS_COMPATIBLE_TYPES = [
"Community",
"Professional"
]
# Product year: Version name
VS_COMPATIBLE_VERSIONS = {
"2010": "10.0",
"2012": "11.0",
"2013": "12.0",
"2015": "14.0"
}
VS_COMPILER_BASE_URL = ("https://www.microsoft.com/en-us/download/"
"details.aspx?id=")
VS_COMPILER_DOWNLOADS = {
"10.0": "23691",
"11.0": "30679",
"12.0": "40784",
"14.0": "40784"
}
# CMAKE
# Default directory name, latest version name, and download link and
# corresponding SHA-1 hash, and minimum version:
CMAKE_DIR = "CMake"
CMAKE_VERSION = "cmake-3.4.1-win32-x86"
CMAKE_URL = "https://cmake.org/files/v3.4/" + CMAKE_VERSION + ".zip"
CMAKE_HASH = "4894baeafc0368d6530bf2c6bfe4fc94056bd04a"
CMAKE_MIN_VERSION = 3, 4, 1
# CWEBP
# Default directory name, base download link, and OS version dependent url
# suffix with corresponding SHA-1 hash in the format:
# OS version: (download link ending, hash)
# and minimum version
CWEBP_DIR = "cwebp"
CWEBP_BASE_URL = "http://downloads.webmproject.org/releases/webp/"
CWEBP_VERSIONS = {
common.WINDOWS_32: ("libwebp-0.4.4-windows-x86",
"88f44c6434535ef9a0470d7a5352ba5a883a6342"),
common.WINDOWS_64: ("libwebp-0.4.4-windows-x64",
"59cd4347029d9acbb6eda7efb6d15fe74d1232a2")
}
CWEBP_MIN_VERSION = 0, 4, 0
# IMAGEMAGICK
# Base download link, and OS version dependent url suffix with corresponding
# SHA-1 hash in the format:
# OS version: (download link ending, hash)
IMAGEMAGICK_BASE_URL = "http://www.imagemagick.org/download/binaries/"
IMAGEMAGICK_VERSIONS = {
common.WINDOWS_32: ("ImageMagick-6.9.2-10-Q16-x86-dll.exe",
"3a6d9d6e0989771e472b084bfd1cb15b5aeed720"),
common.WINDOWS_64: ("ImageMagick-6.9.2-10-Q16-x64-dll.exe",
"19f05721960ff0b28602cc4317f3942d0e2bf705")
}
# JAVA
# Base download link, and OS version dependent url suffix with corresponding
# SHA-1 hash in the format:
# OS version: (download link ending, hash)
JAVA_URL = ("http://www.oracle.com/technetwork/java/javase/downloads/"
"jdk8-downloads-2133151.html")
JAVA_VERSIONS = {
common.WINDOWS_32: "Windows x86 jdk-8u65-windows-i586.exe",
common.WINDOWS_64: "Windows x64 jdk-8u65-windows-x64.exe"
}
# PYTHON
# Base download link, and OS version dependent url suffix with corresponding
# SHA-1 hash in the format:
# OS version: (download link ending, hash)
# and minimum version
PYTHON_BASE_URL = "https://www.python.org/ftp/python/"
PYTHON_VERSIONS = {
common.WINDOWS_32: ("2.7.8/python-2.7.8.msi",
"a945690f9837e1a954afaabb8552b79a7abfd53d5"),
common.WINDOWS_64: ("2.7.8/python-2.7.8.amd64.msi",
"a19375bc3d7ca7d3c022f2d4a42fdf2c54f79d1d")
}
PYTHON_MIN_VERSION = 2, 7, 8
# DIRECTX
# Download link and hash for DirectX June 2010 edition
DIRECTX_URL = ("https://download.microsoft.com/download/A/E/7/AE743F1F-632B"
"-4809-87A9-AA1BB3458E31/DXSDK_Jun10.exe")
DIRECTX_HASH = "8fe98c00fde0f524760bb9021f438bd7d9304a69"
class WindowsSetup(common.Setup):
"""Contains all necessary methods for setting up Windows.
Attributes:
programs: A string of all the programs that are currently installed. Used
for determining if Visual Studio has been installed or not.
path_update: A string of the update to the Windows path that needs to be
appended.
vs_version: The version of Visual Studio either has installed, or is to be
installed.
version: Whether the system is 32bit or 64bit.
program_files: The default place of installation from exe installers. Used
for searching to check when programs are installed.
java_path: A string of the path to the location of the java directory. Used
for path setting if java.exe cannot be located in any of the default
locations.
python_path: A string of the path to the location of the python directory.
Used for path setting if python.exe cannot be located in any of the
default locations.
install_vs: Whether or not Visual Studio should be checked for or installed.
fix_directx: Whether or not to try and fix problems DirectX might be having
with Visual Studio
fix_path: Windows path seems easily corruptable. If set, only call
update_path and don't reinstall anything.
Raises:
VersionUnsupportedError: If the system version is neither 32bit or 64bit
VersionTooLowError: If the OS is older than Windows 7
BadDirectoryError: If the given cwebp or cmake path does not exist.
"""
def __init__(self, options):
common.Setup.__init__(self, options)
self.programs = ""
self.path_update = ""
self.vs_version = VS_DEFAULT_VERSION
version = platform.architecture()[0]
if version == "32bit":
self.version = common.WINDOWS_32
self.program_files = PROGRAM_FILES
elif version == "64bit":
self.version = common.WINDOWS_64
self.program_files = PROGRAM_FILES_X86
else:
raise common.VersionUnsupportedError("Not 32 or 64 bit Windows")
major, minor = get_windows_os_number()
if major < 6 or (major == 6 and minor < 1): # Windows Vista and below
raise common.VersionTooLowError(platform.release())
self.java_path = options.java_location
self.python_path = options.python_location
self.install_vs = not options.no_visual_studio
self.fix_directx = options.fix_directx
self.fix_path = options.fix_path
def check_programs(self):
"""Get a list of all programs currently installed.
Raises:
PermissionDeniedError: If wmic fails for any reason.
"""
logging.info("Checking what needs to be installed..")
try:
self.programs = subprocess.check_output("wmic product get name",
shell=True)
except subprocess.CalledProcessError:
raise common.PermissionDeniedError("wmic", "Try closing cmd.exe and "
"reopening as Administrator. (Right "
"click, select 'Run as "
"Administrator')")
def windows_setup_visual_studio(self):
"""Check for compatible versions of Visual Studio and Visual C++.
If no compatible version of Visual Studio is detected, download default
version. If a compatible version is detected, check if a compatible
version of the C++ compiler has been installed.
Raises:
FileDownloadError: If the Visual Studio installer fails to download, or
is downloaded incorrectly.
"""
for line in self.programs.splitlines():
if VS_NAME_PREFIX in line:
for name in get_all_vs():
if line.strip() == name:
self.vs_version = VS_COMPATIBLE_VERSIONS.get(name.split(" ")[-1])
logging.info("Visual Studio already installed.")
self.windows_check_compiler()
return
logging.info("Visual Studio not installed. Installing " + VS_DEFAULT_NAME +
" now...")
location = os.path.join(common.BASE_DIR, "vs_community.exe")
location = util.download_file(VS_DEFAULT_URL, location,
"Visual Studio Installer", VS_DEFAULT_HASH)
if not location:
raise common.FileDownloadError("https://www.visualstudio.com/en-us/"
"downloads/download-visual-studio-vs.aspx",
"Please rerun this script after "
"completing manual installation.")
logging.info("Now lauching Visual Stusio Installer.\n*** Please ensure you "
"select \"Visual C++\" ***\nYour computer will "
"likely need to be restarted. If so, click 'Restart Now' when "
"prompted and rerun this script after reboot.\nIf no restart "
"is required, click 'Finish' and rerun script.")
subprocess.call("cmd /k " + location, shell=True)
# cmd /k will stop the script, but just in case, exit
sys.exit()
def windows_check_compiler(self):
"""check for compatible version of Visual C++.
If no compatible version is found, download the same one was the version
of Visual Studio currently installed.
Raises:
InstallFailedError: If the user does not want to install Visual C++.
WebbrowserFailedError: If the link to Visual C++ could not be opened in
the user's default browser.
InstallInterruptError: If the user cancels the wait for installation of
Visual C++.
"""
for line in self.programs.splitlines():
if VS_COMPILER_PREFIX in line:
for name in VS_COMPATIBLE_VERSIONS.iterkeys():
if line.startswith(VS_COMPILER_PREFIX + name):
logging.info("Visual C++ already installed.")
return
logging.warn("Could not find Visual C++ compiler.\nPlease open Visual "
"Studio installer now and repair installation, or continue "
"and download Visual C++.")
if not raw_input("Continue? (y/n) ").lower().startswith("y"):
raise common.InstallFailedError("Visual C++", "https://www.microsoft.com/"
"en-us/download/details.aspx?id=48145",
"If you would like to skip Visual Studio "
"installation, please rerun this script "
"with the flag\n\t--no_visual_studio")
if self.version == common.WINDOWS_32:
filename = "vcredist_x86.exe"
else:
filename = "vcredist_x64.exe"
logging.info("Opening web browser. Please download\n\t" + filename + "\n"
"Once download is complete, double click the exe and follow "
"installation instructions.")
url = VS_COMPILER_BASE_URL + VS_COMPILER_DOWNLOADS.get(self.vs_version)
if not util.open_link(url, "Visual C++"):
raise common.WebbrowserFailedError("Visual C++", url)
if not util.wait_for_installation("cl.exe", search=True,
basedir=self.program_files):
raise common.InstallInterruptError("Visual C++", "If you would like to "
"skip Visual Studio installation, "
"please rerun this script with the "
"flag\n\t--no_visual_studio")
logging.info("Visual C++ installed.")
def windows_fix_directx(self):
"""Attempt to fix problems DirectX may be having with Visual Studio.
DirectX comes pre-installed on Windows 7 and up, but having Visual C++ 2010
or higher may give an "S1023" error due to it being newer than the latest
version of DirectX, June 2010 DirectX SDK. This can be fixed by
reinstalling DirectX once Visual C++ has been established.
Raises:
FileDownloadError: If the Visual Studio installer fails to download, or
is downloaded incorrectly.
"""
logging.info("Attempting to fix problems with DirectX...")
try:
subprocess.call("MsiExec.exe /passive /X{F0C3E5D1-1ADE-321E-8167-"
"68EF0DE699A5}", shell=True)
subprocess.call("MsiExec.exe /passive /X{1D8E6291-B0D5-35EC-8441-"
"6616F567A0F7}", shell=True)
except subprocess.CalledProcessError:
logging.warning("MsiExec.exe failed. Could not resolve conflicts with "
"DirectX and Visual Studio.")
return
location = os.path.join(common.BASE_DIR, "directx.exe")
location = util.download_file(DIRECTX_URL, location, "DirectX",
DIRECTX_HASH)
if not location:
raise common.FileDownloadError("http://www.microsoft.com/en-us/download/"
"details.aspx?id=6812", "Please rerun "
"this script after completing manual "
"installation.")
subprocess.call("start cmd /c " + location, shell=True)
logging.info("DirectX successfully reinstalled.")
def windows_install_cmake(self):
"""Check for and install cmake.
Raises:
FileDownloadError: If the CMake zip fails to download, or is downloaded
incorrectly.
"""
if find_executable("cmake"):
if check_cmake_version():
logging.info("CMake already installed.")
return
else:
logging.info("CMake version not sufficient. Updating now.")
else:
location = util.check_dir(self.cmake_path, CMAKE_VERSION,
os.path.join("bin", "cmake.exe"))
if location:
logging.info("CMake already installed.")
self.cmake_path = location
return
else:
logging.info("CMake not installed. Downloading now...")
location = os.path.join(common.BASE_DIR, "cmake.zip")
location = util.download_file(CMAKE_URL, location, "cmake", CMAKE_HASH)
if not location:
raise common.FileDownloadError("https://cmake.org/download/", "Please "
"rerun this script afterwards with the "
"flag\n\t--cmake=\\path\\to\\cmake")
util.extract_zipfile(location, "r", self.cmake_path, "cmake")
logging.info("cmake successfully installed.")
def windows_install_cwebp(self):
"""Check for and install cwebp in given directory.
Raises:
FileDownloadError: If the cwebp zip fails to download, or is downloaded
incorrectly.
"""
if find_executable("cwebp"):
if check_cwebp_version():
logging.info("cwebp already installed.")
return
else:
logging.info("cwebp version not sufficient. Updating now.")
else:
location = util.check_dir(self.cwebp_path,
CWEBP_VERSIONS.get(self.version)[0],
"\\bin\\cmake.exe")
if location:
logging.info("CMake already installed.")
self.cmake_path = location
return
version, file_hash = CWEBP_VERSIONS.get(self.version)
logging.info("cwebp not installed. Downloading now...")
url = CWEBP_BASE_URL + version + ".zip"
location = os.path.join(common.BASE_DIR, "cwebp.zip")
location = util.download_file(url, location, "cwebp", file_hash)
if not location:
raise common.FileDownloadError("https://developers.google.com/speed/webp/"
"docs/precompiled", "Please rerun this "
"script afterwards with the flag\n\t"
"--cmake=\\path\\to\\cmake")
util.extract_zipfile(location, "r", self.cwebp_path, "cwebp")
logging.info("cwebp successfully installed.")
def windows_install_imagemagick(self):
"""Check for and install ImageMagick.
Raises:
FileDownloadError: If the ImageMagick installer fails to download, or is
downloaded incorrectly.
InstallInterruptError: If the user cancels the wait for installation of
ImageMagick.
"""
if find_executable("convert"):
logging.info("ImageMagick is already installed.")
return
logging.info("ImageMagick not installed. Downloading now...")
url, file_hash = IMAGEMAGICK_VERSIONS.get(self.version)
url = IMAGEMAGICK_BASE_URL + url
location = os.path.join(common.BASE_DIR, "imagemagick.exe")
location = util.download_file(url, location, "imagemagick", file_hash)
if not location:
raise common.FileDownloadError("http://www.imagemagick.org/script/binary-"
"releases.php", "Please rerun this script "
"after completing manual installation.\n")
subprocess.call("start cmd /c " + location, shell=True)
if not util.wait_for_installation("convert"):
raise common.InstallInterruptError("ImageMagick")
logging.info("ImageMagick successfully installed.")
def windows_install_java(self):
"""Check for and install Java.
Downloading the jdk installer can't be done through python, or equivalent
bash commands due to some javascript on the download site. It instead has
to be through the users default browser.
Raises:
WebbrowserFailedError: If the link to Java JDK could not be opened in
the user's default browser.
InstallInterruptError: If the user cancels the wait for installation of
Java JDK.
"""
if find_executable("java"):
logging.info("Java already installed.")
return
# Since installing Java is annoying, we want to make doubly sure the user
# doesn't have it already.
location = util.find_file(PROGRAM_FILES, "java.exe")
if not location and self.program_files == PROGRAM_FILES_X86:
# In case the user has installed the 32 bit version on a 64 bit machine
location = util.find_file(PROGRAM_FILES_X86, "java.exe")
if location:
logging.info("Java already installed at " + location + ".")
self.java_path = os.path.dirname(location)
return
logging.warn("Java not installed. Please accept the terms and conditions, "
"and download:\n\t" + JAVA_VERSIONS.get(self.version) +
"\nOnce download is complete, double click the exe and follow "
"installation instructions.")
# Java JDK can't be installed without the user accepting the terms and
# conditions, which can only be done in their browser
logging.warn("Java not installed. Opening browser...")
if not util.open_link(JAVA_URL, "Java JDK"):
raise common.WebbrowserFailedError("Java JDK", JAVA_URL)
if not util.wait_for_installation("java.exe", search=True,
basedir=PROGRAM_FILES):
raise common.InstallInterruptError("Java JDK")
logging.info("Java successfully installed.")
def windows_install_python(self):
"""Checks for and installs at least Python 2.7.8.
Raises:
FileDownloadError: If the Python installer fails to download, or is
downloaded incorrectly.
InstallInterruptError: If the user cancels the wait for installation of
ImageMagick.
InstallFailedError: If msiexec fails, or Python cannot be installed.
"""
if find_executable("python"):
if check_python_version():
logging.info("Python already installed.")
return
else:
logging.info("Python version not sufficient. Updating now.")
else:
logging.info("Python not installed. Downloading now.")
url, file_hash = PYTHON_VERSIONS.get(self.version)
url = PYTHON_BASE_URL + url
location = os.path.join(common.BASE_DIR, "python.msi")
location = util.download_file(url, location, "python", file_hash)
if not location:
raise common.FileDownloadError("https://www.python.org/downloads/release/"
"python-278/", "Please rerun this script "
"after completing manual installation.\n")
logging.info("Opening Python installer. For convenience, please select the "
"'Add python.exe to Path' option.")
try:
subprocess.call("msiexec /i " + location, shell=True)
except subprocess.CalledProcessError:
raise common.InstallFailedError("Python", "https://www.python.org/"
"downloads/release/python-278/", "Please "
"rerun this script after installating "
"Python manually.")
def update_path(self):
"""Checks PATH variable and edits it accordingly.
Update or repair Windows PATH. If called after setup, path will be updated.
If called by the flag --fix_path, path will be repaired.
"""
update = ""
# Installed by this script
if not find_executable("cwebp"):
cwebp_ver, _ = CWEBP_VERSIONS.get(self.version)
update = (os.path.join(self.cwebp_path, cwebp_ver, "bin") + os.pathsep
+ update)
if not find_executable("cl"):
update = (os.path.join(self.program_files, self.vs_version, "VC", "bin")
+ os.pathsep + update)
# Installed by exe installers
if not find_executable("cmake"):
location = util.check_dir(self.cmake_path,
os.path.join(CMAKE_VERSION, "bin"), "cmake.exe")
if not location:
location = util.find_file(self.program_files, "cmake.exe")
if location:
location = os.path.dirname(location)
if location:
update = location + os.pathsep + update
else:
logging.warn("Unable to set path for CMake. Please rerun this script "
"with additional flag:\n\t--cmake=\\path\\to\\cmake")
if not find_executable("java"):
location = util.check_dir(self.java_path, "bin", "java.exe")
if not location:
location = util.find_file(os.path.dirname(self.program_files),
"java.exe")
if location:
location = os.path.dirname(location)
if location:
update = location + os.pathsep + update
else:
logging.warn("Unable to set path for Java. Please rerun this script "
"with the additional flag:\n\t--java=\\path\\to\\java")
if not find_executable("python"):
location = util.check_dir(self.python_path, "files", "python.exe")
if not location:
location = util.find_file(os.path.dirname(self.program_files),
"python.exe")
if location:
location = os.path.dirname(location)
if location:
update = location + os.pathsep + update
else:
logging.warn("Unable to set path for Python. Please rerun this script "
"with the additional flag:\n\t--python=\\path\\to\\python")
self.path_update = update
self.bash_profile_changed = True
def get_windows_path_update(self):
"""Returns all the paths that needs to be appended to the Windows PATH."""
return self.path_update
def setup_all(self):
"""Perform all necessary setup."""
if self.fix_path:
self.update_path()
return
if self.install_vs:
self.check_programs()
self.windows_setup_visual_studio()
if self.fix_directx:
self.windows_fix_directx()
self.windows_install_cmake()
self.windows_install_cwebp()
self.windows_install_imagemagick()
self.windows_install_java()
self.windows_install_python()
self.update_path()
logging.info("Windows setup complete.")
def update_windows_path(path):
"""Performs the bash command to update the path. Must be done last."""
subprocess.call("setx PATH \"" + path)
def get_windows_os_number():
"""Gets an integer of the Windows OS number."""
build = platform.version().split(".")
return int(build[0]), int(build[1])
def check_cmake_version():
"""Gets current version of CMake and checks if it's high enough.
Returns:
Boolean: True if the version is high enough, False if it is not.
"""
output = subprocess.check_output("cmake --version")
version = output.splitlines()[0].split(" ")[-1]
major, minor, build = tuple(int(x) for x in version.split("."))
if ((major > CMAKE_MIN_VERSION[0]) or
(major == CMAKE_MIN_VERSION[0] and minor > CMAKE_MIN_VERSION[1]) or
(major == CMAKE_MIN_VERSION[0] and minor == CMAKE_MIN_VERSION[1] and
build >= CMAKE_MIN_VERSION[2])):
return True
else:
return False
def check_cwebp_version():
"""Gets current version of cwebp and checks if it's high enough.
Returns:
Boolean: True if the version is high enough, False if it is not.
"""
output = subprocess.check_output("cwebp -version")
version = output.strip()
major, minor, build = tuple(int(x) for x in version.split("."))
if ((major > CWEBP_MIN_VERSION[0]) or
(major == CWEBP_MIN_VERSION[0] and minor > CWEBP_MIN_VERSION[1]) or
(major == CWEBP_MIN_VERSION[0] and minor == CWEBP_MIN_VERSION[1] and
build >= CWEBP_MIN_VERSION[2])):
return True
else:
return False
def check_python_version():
"""Gets current version of Python and checks if it's high enough.
Python version must be obtained through the command line, rather than
Python's internal version check.
Returns:
Boolean: True if the version is high enough, False if it is not.
"""
version = subprocess.Popen("python -V", stderr=subprocess.PIPE)
version = version.stderr.read().strip().split(" ")[1]
major, minor, build = tuple(int(x) for x in version.split("."))
if ((major > PYTHON_MIN_VERSION[0]) or
(major == PYTHON_MIN_VERSION[0] and minor > PYTHON_MIN_VERSION[1]) or
(major == PYTHON_MIN_VERSION[0] and minor == PYTHON_MIN_VERSION[1] and
build >= PYTHON_MIN_VERSION[2])):
return True
else:
return False
def get_all_vs():
"""Creates a list of all the compatible versions of Visual Studio."""
all_vs = []
for vs_type in VS_COMPATIBLE_TYPES:
for year_num in VS_COMPATIBLE_VERSIONS.iterkeys():
all_vs.append(VS_NAME_PREFIX + vs_type + " " + year_num)
return all_vs
|
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import tempfile
from telemetry import decorators
from telemetry.core import exceptions
from telemetry.core import platform
from telemetry.core import util
from telemetry.core import video
from telemetry.core.platform import proc_supporting_platform_backend
from telemetry.core.platform.power_monitor import android_ds2784_power_monitor
from telemetry.core.platform.power_monitor import android_dumpsys_power_monitor
from telemetry.core.platform.power_monitor import android_temperature_monitor
from telemetry.core.platform.power_monitor import monsoon_power_monitor
from telemetry.core.platform.power_monitor import power_monitor_controller
from telemetry.core.platform.profiler import android_prebuilt_profiler_helper
# Get build/android scripts into our path.
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib import screenshot # pylint: disable=F0401
from pylib.perf import cache_control # pylint: disable=F0401
from pylib.perf import perf_control # pylint: disable=F0401
from pylib.perf import thermal_throttle # pylint: disable=F0401
try:
from pylib.perf import surface_stats_collector # pylint: disable=F0401
except Exception:
surface_stats_collector = None
_HOST_APPLICATIONS = [
'avconv',
'ipfw',
'perfhost',
]
class AndroidPlatformBackend(
proc_supporting_platform_backend.ProcSupportingPlatformBackend):
def __init__(self, device, no_performance_mode):
super(AndroidPlatformBackend, self).__init__()
self._device = device
self._surface_stats_collector = None
self._perf_tests_setup = perf_control.PerfControl(self._device)
self._thermal_throttle = thermal_throttle.ThermalThrottle(self._device)
self._no_performance_mode = no_performance_mode
self._raw_display_frame_rate_measurements = []
self._can_access_protected_file_contents = \
self._device.old_interface.CanAccessProtectedFileContents()
power_controller = power_monitor_controller.PowerMonitorController([
monsoon_power_monitor.MonsoonPowerMonitor(),
android_ds2784_power_monitor.DS2784PowerMonitor(device),
android_dumpsys_power_monitor.DumpsysPowerMonitor(device),
])
self._powermonitor = android_temperature_monitor.AndroidTemperatureMonitor(
power_controller, device)
self._video_recorder = None
self._video_output = None
if self._no_performance_mode:
logging.warning('CPU governor will not be set!')
def IsRawDisplayFrameRateSupported(self):
return True
def StartRawDisplayFrameRateMeasurement(self):
assert not self._surface_stats_collector
# Clear any leftover data from previous timed out tests
self._raw_display_frame_rate_measurements = []
self._surface_stats_collector = \
surface_stats_collector.SurfaceStatsCollector(self._device)
self._surface_stats_collector.Start()
def StopRawDisplayFrameRateMeasurement(self):
if not self._surface_stats_collector:
return
self._surface_stats_collector.Stop()
for r in self._surface_stats_collector.GetResults():
self._raw_display_frame_rate_measurements.append(
platform.Platform.RawDisplayFrameRateMeasurement(
r.name, r.value, r.unit))
self._surface_stats_collector = None
def GetRawDisplayFrameRateMeasurements(self):
ret = self._raw_display_frame_rate_measurements
self._raw_display_frame_rate_measurements = []
return ret
def SetFullPerformanceModeEnabled(self, enabled):
if self._no_performance_mode:
return
if enabled:
self._perf_tests_setup.SetHighPerfMode()
else:
self._perf_tests_setup.SetDefaultPerfMode()
def CanMonitorThermalThrottling(self):
return True
def IsThermallyThrottled(self):
return self._thermal_throttle.IsThrottled()
def HasBeenThermallyThrottled(self):
return self._thermal_throttle.HasBeenThrottled()
def GetCpuStats(self, pid):
if not self._can_access_protected_file_contents:
logging.warning('CPU stats cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuStats(pid)
def GetCpuTimestamp(self):
if not self._can_access_protected_file_contents:
logging.warning('CPU timestamp cannot be retrieved on non-rooted device.')
return {}
return super(AndroidPlatformBackend, self).GetCpuTimestamp()
def PurgeUnpinnedMemory(self):
"""Purges the unpinned ashmem memory for the whole system.
This can be used to make memory measurements more stable. Requires root.
"""
if not self._can_access_protected_file_contents:
logging.warning('Cannot run purge_ashmem. Requires a rooted device.')
return
if not android_prebuilt_profiler_helper.InstallOnDevice(
self._device, 'purge_ashmem'):
raise Exception('Error installing purge_ashmem.')
(status, output) = self._device.old_interface.GetAndroidToolStatusAndOutput(
android_prebuilt_profiler_helper.GetDevicePath('purge_ashmem'),
log_result=True)
if status != 0:
raise Exception('Error while purging ashmem: ' + '\n'.join(output))
def GetMemoryStats(self, pid):
memory_usage = self._device.old_interface.GetMemoryUsageForPid(pid)
return {'ProportionalSetSize': memory_usage['Pss'] * 1024,
'SharedDirty': memory_usage['Shared_Dirty'] * 1024,
'PrivateDirty': memory_usage['Private_Dirty'] * 1024,
'VMPeak': memory_usage['VmHWM'] * 1024}
def GetIOStats(self, pid):
return {}
def GetChildPids(self, pid):
child_pids = []
ps = self._GetPsOutput(['pid', 'name'])
for curr_pid, curr_name in ps:
if int(curr_pid) == pid:
name = curr_name
for curr_pid, curr_name in ps:
if curr_name.startswith(name) and curr_name != name:
child_pids.append(int(curr_pid))
break
return child_pids
@decorators.Cache
def GetCommandLine(self, pid):
ps = self._GetPsOutput(['pid', 'name'], pid)
if not ps:
raise exceptions.ProcessGoneException()
return ps[0][1]
def GetOSName(self):
return 'android'
@decorators.Cache
def GetOSVersionName(self):
return self._device.old_interface.GetBuildId()[0]
def CanFlushIndividualFilesFromSystemCache(self):
return False
def FlushEntireSystemCache(self):
cache = cache_control.CacheControl(self._device)
cache.DropRamCaches()
def FlushSystemCacheForDirectory(self, directory, ignoring=None):
raise NotImplementedError()
def FlushDnsCache(self):
self._device.RunShellCommand('ndc resolver flushdefaultif', root=True)
def LaunchApplication(
self, application, parameters=None, elevate_privilege=False):
if application in _HOST_APPLICATIONS:
platform.GetHostPlatform().LaunchApplication(
application, parameters, elevate_privilege=elevate_privilege)
return
if elevate_privilege:
raise NotImplementedError("elevate_privilege isn't supported on android.")
if not parameters:
parameters = ''
self._device.RunShellCommand('am start ' + parameters + ' ' + application)
def IsApplicationRunning(self, application):
if application in _HOST_APPLICATIONS:
return platform.GetHostPlatform().IsApplicationRunning(application)
return len(self._device.old_interface.ExtractPid(application)) > 0
def CanLaunchApplication(self, application):
if application in _HOST_APPLICATIONS:
return platform.GetHostPlatform().CanLaunchApplication(application)
return True
def InstallApplication(self, application):
if application in _HOST_APPLICATIONS:
platform.GetHostPlatform().InstallApplication(application)
return
raise NotImplementedError(
'Please teach Telemetry how to install ' + application)
@decorators.Cache
def CanCaptureVideo(self):
return self.GetOSVersionName() >= 'K'
def StartVideoCapture(self, min_bitrate_mbps):
"""Starts the video capture at specified bitrate."""
min_bitrate_mbps = max(min_bitrate_mbps, 0.1)
if min_bitrate_mbps > 100:
raise ValueError('Android video capture cannot capture at %dmbps. '
'Max capture rate is 100mbps.' % min_bitrate_mbps)
self._video_output = tempfile.mkstemp()[1]
if self.is_video_capture_running:
self._video_recorder.Stop()
self._video_recorder = screenshot.VideoRecorder(
self._device, self._video_output, megabits_per_second=min_bitrate_mbps)
self._video_recorder.Start()
util.WaitFor(self._video_recorder.IsStarted, 5)
@property
def is_video_capture_running(self):
return self._video_recorder is not None
def StopVideoCapture(self):
assert self.is_video_capture_running, 'Must start video capture first'
self._video_recorder.Stop()
self._video_recorder.Pull()
self._video_recorder = None
return video.Video(self, self._video_output)
def CanMonitorPower(self):
return self._powermonitor.CanMonitorPower()
def StartMonitoringPower(self, browser):
self._powermonitor.StartMonitoringPower(browser)
def StopMonitoringPower(self):
return self._powermonitor.StopMonitoringPower()
def _GetFileContents(self, fname):
if not self._can_access_protected_file_contents:
logging.warning('%s cannot be retrieved on non-rooted device.' % fname)
return ''
return '\n'.join(
self._device.old_interface.GetProtectedFileContents(fname))
def _GetPsOutput(self, columns, pid=None):
assert columns == ['pid', 'name'] or columns == ['pid'], \
'Only know how to return pid and name. Requested: ' + columns
command = 'ps'
if pid:
command += ' -p %d' % pid
ps = self._device.RunShellCommand(command)[1:]
output = []
for line in ps:
data = line.split()
curr_pid = data[1]
curr_name = data[-1]
if columns == ['pid', 'name']:
output.append([curr_pid, curr_name])
else:
output.append([curr_pid])
return output
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves.urllib.request import Request
from six.moves.urllib.request import urlopen
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import format_master_url
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
_GKE_ENV_VARIABLE = 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'
_ENDPOINTS_SEPARATOR = ','
_DEFAULT_ENV_VARIABLE = 'TPU_NAME'
_DISCOVERY_SERVICE_URL_ENV_VARIABLE = 'TPU_API_DISCOVERY_URL'
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def _tpuService(self):
"""Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object calls this method to get a new API object whenever they need
to communicate with the Cloud API.
Returns:
A Google Cloud TPU API object.
"""
if self._service:
return self._service
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials,
discoveryServiceUrl=self._discovery_url)
else:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials)
def _requestComputeMetadata(self, path):
req = Request('http://metadata/computeMetadata/v1/%s' % path,
headers={'Metadata-Flavor': 'Google'})
resp = urlopen(req)
return compat.as_bytes(resp.read())
def _shouldResolve(self):
if isinstance(self._should_resolve_override, bool):
return self._should_resolve_override
if (self._tpu == compat.as_bytes('') or
self._tpu == compat.as_bytes('local') or
self._tpu.startswith(compat.as_bytes('/bns')) or
self._tpu.startswith(compat.as_bytes('localhost:')) or
self._tpu.startswith(compat.as_bytes('grpc://'))):
return False
return True
@staticmethod
def _inGke():
"""When running in GKE, the environment variable will be set."""
return _GKE_ENV_VARIABLE in os.environ
@staticmethod
def _gkeEndpoints():
return os.environ[_GKE_ENV_VARIABLE]
@staticmethod
def _envVarFallback():
if _DEFAULT_ENV_VARIABLE in os.environ:
return os.environ[_DEFAULT_ENV_VARIABLE]
return None
@staticmethod
def _environmentDiscoveryUrl():
return os.environ.get(_DISCOVERY_SERVICE_URL_ENV_VARIABLE)
def __init__(self,
tpu=None,
zone=None,
project=None,
job_name='worker',
coordinator_name=None,
coordinator_address=None,
credentials='default',
service=None,
discovery_url=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
tpu: Either a string, or a list of strings corresponding to the TPUs to
use. If the single string is the empty string, the string 'local', or a
string that begins with 'grpc://' or '/bns', then it is assumed to not
correspond with a Cloud TPU and will instead be passed as the session
master and no ClusterSpec propagation will be done.
zone: Zone where the TPUs are located. If omitted or empty, we will assume
that the zone of the TPU is the same as the zone of the GCE VM, which we
will try to discover from the GCE metadata service.
project: Name of the GCP project containing Cloud TPUs. If omitted or
empty, we will try to discover the project name of the GCE VM from the
GCE metadata service.
job_name: Name of the TensorFlow job the TPUs belong to.
coordinator_name: The name to use for the coordinator. Set to None if the
coordinator should not be included in the computed ClusterSpec.
coordinator_address: The address of the coordinator (typically an ip:port
pair). If set to None, a TF server will be started. If coordinator_name
is None, a TF server will not be started even if coordinator_address is
None.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
discovery_url: A URL template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URL to the
discovery document for that service. The environment variable
'TPU_API_DISCOVERY_URL' will override this.
Raises:
ImportError: If the googleapiclient is not installed.
ValueError: If no TPUs are specified.
"""
if isinstance(tpu, list):
if not tpu:
raise ValueError('At least one TPU must be specified.')
if len(tpu) != 1:
raise NotImplementedError(
'Using multiple TPUs in a single session is not yet implemented')
tpu = tpu[0]
in_gke = self._inGke()
# When using GKE with Cloud TPUs, the env variable will be set.
if tpu is None:
if in_gke:
tpu = self._gkeEndpoints()
else:
tpu = self._envVarFallback()
if tpu is None:
raise ValueError('Please provide a TPU Name to connect to.')
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
# By default the task_type is 'worker` and the task_index is 0 (which is the
# first worker in the task).
self.task_type = job_name
self.task_index = 0
if tpu.startswith('grpc://'):
# Cloud environment, where we are using GRPC to communicate to TPUs.
self._environment = ''
elif tpu == 'local' or not tpu:
# Google environment, where the TPU is attached to the host.
self._environment = 'google'
elif tpu.startswith('/bns'):
# Google environment, where we reach the TPU through BNS.
self._environment = 'google'
# If TPU is in the Google environment or exists locally, we don't use any
# RPC layer.
if tpu.startswith('/bns') or tpu == 'local' or not tpu:
self.rpc_layer = None
else:
self.rpc_layer = 'grpc'
# Setting this overrides the return value of self._shouldResolve()
self._should_resolve_override = None
# We strip out the protocol if it is included, and override the
# shouldResolve function to never resolve. We are adding the protocol back
# in later in self.master().
if self.rpc_layer is not None and tpu.startswith(self.rpc_layer + '://'):
tpu = tpu[len(self.rpc_layer + '://'):]
self._tpu = tpu
self._should_resolve_override = False
# Whether we should actually attempt to contact Cloud APIs
should_resolve = self._shouldResolve()
# We error out if we are in a non-Cloud environment which cannot talk to the
# Cloud APIs using the standard class and a special object is not passed in.
self._service = service
if (self._service is None and should_resolve and
not _GOOGLE_API_CLIENT_INSTALLED):
raise ImportError('googleapiclient and oauth2client must be installed '
'before using the TPU cluster resolver. Execute: '
'`pip install --upgrade google-api-python-client` '
'and `pip install --upgrade oauth2client` to '
'install with pip.')
# We save user-passed credentials, unless the user didn't pass in anything.
self._credentials = credentials
if (credentials == 'default' and should_resolve and
_GOOGLE_API_CLIENT_INSTALLED):
self._credentials = None
# Automatically detect project and zone if unspecified.
if not project and should_resolve:
project = compat.as_str(
self._requestComputeMetadata('project/project-id'))
if not zone and should_resolve:
zone_path = compat.as_str(self._requestComputeMetadata('instance/zone'))
zone = zone_path.split('/')[-1]
self._project = project
self._zone = zone
self._discovery_url = self._environmentDiscoveryUrl() or discovery_url
self._coordinator_name = coordinator_name
if (coordinator_name and not coordinator_address and
(should_resolve or in_gke)):
self._start_local_server()
else:
self._coordinator_address = coordinator_address
def master(self, task_type=None, task_index=None, rpc_layer=None):
"""Get the Master string to be used for the session.
In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of
first instance in the ClusterSpec returned by the cluster_spec function.
If a non-TPU name is used when constructing a TPUClusterResolver, that will
be returned instead (e.g. If the tpus argument's value when constructing
this TPUClusterResolver was 'grpc://10.240.1.2:8470',
'grpc://10.240.1.2:8470' will be returned).
Args:
task_type: (Optional, string) The type of the TensorFlow task of the
master.
task_index: (Optional, integer) The index of the TensorFlow task of the
master.
rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to
communicate with TPUs.
Returns:
string, the connection string to use when creating a session.
Raises:
ValueError: If none of the TPUs specified exists.
"""
if self._shouldResolve():
# We are going to communicate with the Cloud TPU APIs to get a Cluster.
cluster_spec = self.cluster_spec()
if task_type is not None and task_index is not None:
# task_type and task_index is from the function parameter
master = cluster_spec.task_address(task_type, task_index)
elif self.task_type is not None and self.task_index is not None:
# task_type and task_index is from the object
master = cluster_spec.task_address(self.task_type, self.task_index)
else:
# by default we take the first item in the cluster with the right name
job_tasks = cluster_spec.job_tasks(self.task_type)
if not job_tasks:
raise ValueError('No TPUs with the specified names exist.')
master = job_tasks[0]
else:
if isinstance(self._tpu, (bytes, bytearray)):
master = self._tpu.split(compat.as_bytes(_ENDPOINTS_SEPARATOR))[0]
else:
master = self._tpu.split(_ENDPOINTS_SEPARATOR)[0]
return format_master_url(master, rpc_layer or self.rpc_layer)
def get_master(self):
return self.master()
def get_job_name(self):
if self._shouldResolve():
return self.task_type
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
Raises:
RuntimeError: If the provided TPU is not healthy.
"""
############################################################################
# There are 5 potential cases this code must handle:
# 1. [Normal case.] We should resolve the TPU name to a set of tasks, and
# a. Create a ClusterSpec that includes the coordinator job
# b. Create a ClusterSpec without the coordinator job.
# 2. [GKE / No API Access.] We should not resolve the TPU name to a set of
# tasks and
# a. Create a ClusterSpec with the coordinator
# b. Create a ClusterSpec without the coordinator
# 3. [Other (legacy non-gRPC).] We should return an empty ClusterSpec.
############################################################################
if self._shouldResolve():
# Case 1.
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, compat.as_text(self._tpu))
service = self._tpuService()
request = service.projects().locations().nodes().get(name=full_name)
response = request.execute()
if 'state' in response and response['state'] != 'READY':
raise RuntimeError('TPU "%s" is not yet ready; state: "%s"' %
(compat.as_text(self._tpu), response['state']))
if 'health' in response and response['health'] != 'HEALTHY':
raise RuntimeError('TPU "%s" is unhealthy: "%s"' %
(compat.as_text(self._tpu), response['health']))
if 'networkEndpoints' in response:
worker_list = [
'%s:%s' % (endpoint['ipAddress'], endpoint['port'])
for endpoint in response['networkEndpoints']
]
else:
# Fall back to the deprecated response format
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list = [instance_url]
cluster_spec = {self.task_type: worker_list}
else:
if self.rpc_layer is None:
# Case 3.
return None
# Case 2.
tpus = []
for tpu in self._tpu.split(_ENDPOINTS_SEPARATOR):
# We are working around the fact that GKE environment variable that is
# supplied to us has the protocol string embedded in it, but we want
# to strip it out for the ClusterSpec.
if (self.rpc_layer is not None and
tpu.startswith(self.rpc_layer + '://')):
tpus.append(tpu[len(self.rpc_layer + '://'):])
else:
tpus.append(tpu)
cluster_spec = {self.task_type: tpus}
if self._coordinator_address:
# {1, 2}.a
cluster_spec[self._coordinator_name] = [self._coordinator_address]
return server_lib.ClusterSpec(cluster_spec)
def num_accelerators_per_worker(self, session_config=None):
"""Returns the number of TPU cores per worker.
This defaults to 8 for all current TPU configurations, and we do not need
to query any remote systems for this.
Args:
session_config: Unused. Not currently necessary to query anything as this
number is 8 for all TPU configurations.
"""
del session_config # Unused. Not necessary to query anything.
return 8
@property
def environment(self):
"""Returns the current environment which TensorFlow is running in."""
return self._environment
def _start_local_server(self):
address = self._requestComputeMetadata('instance/network-interfaces/0/ip')
self._server = server_lib.Server(
{
'local': ['0.0.0.0:0']
}, protocol='grpc', config=None, start=True)
# self._server.target is of the form: grpc://ipaddress:port
target = compat.as_bytes(self._server.target)
splits = target.split(compat.as_bytes(':'))
assert len(splits) == 3, self._server.target
assert splits[0] == compat.as_bytes('grpc'), self._server.target
self._coordinator_port = compat.as_text(splits[2])
self._coordinator_address = '%s:%s' % (
address, compat.as_text(self._coordinator_port))
def __deepcopy__(self, memo):
# TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.
return self
|
|
import base64
import calendar
import datetime
import re
import unicodedata
import warnings
from binascii import Error as BinasciiError
from email.utils import formatdate
from urllib.parse import (
ParseResult, SplitResult, _coerce_args, _splitnetloc, _splitparams, quote,
quote_plus, scheme_chars, unquote, unquote_plus,
urlencode as original_urlencode, uses_params,
)
from django.core.exceptions import TooManyFieldsSent
from django.utils.datastructures import MultiValueDict
from django.utils.deprecation import RemovedInDjango21Warning
from django.utils.encoding import force_bytes
from django.utils.functional import keep_lazy_text
# based on RFC 7232, Appendix C
ETAG_MATCH = re.compile(r'''
\A( # start of string and capture group
(?:W/)? # optional weak indicator
" # opening quote
[^"]* # any sequence of non-quote characters
" # end quote
)\Z # end of string and capture group
''', re.X)
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = ":/?#[]@"
RFC3986_SUBDELIMS = "!$&'()*+,;="
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote() function.
(was used for unicode handling on Python 2)
"""
return quote(url, safe)
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A legacy compatibility wrapper to Python's urllib.parse.quote_plus()
function. (was used for unicode handling on Python 2)
"""
return quote_plus(url, safe)
@keep_lazy_text
def urlunquote(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote() function.
(was used for unicode handling on Python 2)
"""
return unquote(quoted_url)
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A legacy compatibility wrapper to Python's urllib.parse.unquote_plus()
function. (was used for unicode handling on Python 2)
"""
return unquote_plus(quoted_url)
def urlencode(query, doseq=False):
"""
A version of Python's urllib.parse.urlencode() function that can operate on
MultiValueDict and non-string values.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(k, [str(i) for i in v] if isinstance(v, (list, tuple)) else str(v))
for k, v in query],
doseq
)
def cookie_date(epoch_seconds=None):
"""
Format the time to ensure compatibility with Netscape's cookie standard.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Format the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
`epoch_seconds` is a floating point number expressed in seconds since the
epoch, in UTC - such as that outputted by time.time(). If set to None, it
defaults to the current time.
Output a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parse a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Return an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception as exc:
raise ValueError("%r is not a valid date" % date) from exc
def parse_http_date_safe(date):
"""
Same as parse_http_date, but return None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Convert a base 36 string to an int. Raise ValueError if the input won't fit
into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is longer than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""Convert an integer to a base36 string."""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encode a bytestring in base64 for use in URLs. Strip any trailing equal
signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decode a base64 encoded string. Add back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parse a string of ETags given in an If-None-Match or If-Match header as
defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags
should be matched.
"""
if etag_str.strip() == '*':
return ['*']
else:
# Parse each ETag individually, and return any that are valid.
etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(','))
return [match.group(1) for match in etag_matches if match]
def quote_etag(etag_str):
"""
If the provided string is already a quoted ETag, return it. Otherwise, wrap
the string in quotes, making it a strong ETag.
"""
if ETAG_MATCH.match(etag_str):
return etag_str
else:
return '"%s"' % etag_str
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None, allowed_hosts=None, require_https=False):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always return ``False`` on an empty url.
If ``require_https`` is ``True``, only 'https' will be considered a valid
scheme, as opposed to 'http' and 'https' with the default, ``False``.
"""
if url is not None:
url = url.strip()
if not url:
return False
if allowed_hosts is None:
allowed_hosts = set()
if host:
warnings.warn(
"The host argument is deprecated, use allowed_hosts instead.",
RemovedInDjango21Warning,
stacklevel=2,
)
# Avoid mutating the passed in allowed_hosts.
allowed_hosts = allowed_hosts | {host}
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return (_is_safe_url(url, allowed_hosts, require_https=require_https) and
_is_safe_url(url.replace('\\', '/'), allowed_hosts, require_https=require_https))
# Copied from urllib.parse.urlparse() but uses fixed urlsplit() function.
def _urlparse(url, scheme='', allow_fragments=True):
"""Parse a URL into 6 components:
<scheme>://<netloc>/<path>;<params>?<query>#<fragment>
Return a 6-tuple: (scheme, netloc, path, params, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
splitresult = _urlsplit(url, scheme, allow_fragments)
scheme, netloc, url, query, fragment = splitresult
if scheme in uses_params and ';' in url:
url, params = _splitparams(url)
else:
params = ''
result = ParseResult(scheme, netloc, url, params, query, fragment)
return _coerce_result(result)
# Copied from urllib.parse.urlsplit() with
# https://github.com/python/cpython/pull/661 applied.
def _urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
url, scheme, _coerce_result = _coerce_args(url, scheme)
allow_fragments = bool(allow_fragments)
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
for c in url[:i]:
if c not in scheme_chars:
break
else:
scheme, url = url[:i].lower(), url[i + 1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
return _coerce_result(v)
def _is_safe_url(url, allowed_hosts, require_https=False):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
try:
url_info = _urlparse(url)
except ValueError: # e.g. invalid IPv6 addresses
return False
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
scheme = url_info.scheme
# Consider URLs without a scheme (e.g. //example.com/p) to be http.
if not url_info.scheme and url_info.netloc:
scheme = 'http'
valid_schemes = ['https'] if require_https else ['http', 'https']
return ((not url_info.netloc or url_info.netloc in allowed_hosts) and
(not scheme or scheme in valid_schemes))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
r.append((name, value))
return r
|
|
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from itertools import chain
from operator import itemgetter
from types import NoneType
from uuid import uuid4
from enum import Enum
from solar.computable_inputs import ComputablePassedTypes
from solar.computable_inputs.processor import get_processor
from solar.config import C
from solar.dblayer.model import check_state_for
from solar.dblayer.model import CompositeIndexField
from solar.dblayer.model import DBLayerException
from solar.dblayer.model import Field
from solar.dblayer.model import IndexedField
from solar.dblayer.model import IndexField
from solar.dblayer.model import IndexFieldWrp
from solar.dblayer.model import Model
from solar.dblayer.model import NONE
from solar.dblayer.model import SingleIndexCache
from solar.dblayer.model import StrInt
from solar.utils import detect_input_schema_by_value
from solar.utils import parse_database_conn
from solar.utils import solar_map
InputTypes = Enum('InputTypes', 'simple list hash list_hash computable')
class DBLayerSolarException(DBLayerException):
pass
class UnknownInput(DBLayerSolarException, KeyError):
def __init__(self, name):
self.name = name
def __str__(self):
return "Unknown input %s" % self.name
class InputAlreadyExists(DBLayerSolarException):
pass
class InputsFieldWrp(IndexFieldWrp):
_simple_types = (NoneType, int, float, basestring, str, unicode)
def __init__(self, *args, **kwargs):
super(InputsFieldWrp, self).__init__(*args, **kwargs)
# TODO: add cache for lookup
self.inputs_index_cache = SingleIndexCache()
self._cache = {}
def _input_type(self, resource, name):
# XXX: it could be worth to precalculate it
if ':' in name:
name = name.split(":", 1)[0]
mi = resource.meta_inputs[name]
schema = mi.get('schema', None)
is_computable = mi.get('computable', None) is not None
if is_computable:
return InputTypes.computable
if isinstance(schema, self._simple_types):
return InputTypes.simple
if isinstance(schema, list):
if len(schema) > 0 and isinstance(schema[0], dict):
return InputTypes.list_hash
return InputTypes.list
if isinstance(schema, dict):
return InputTypes.hash
raise Exception("Unknown type")
def _edges_fmt(self, vals):
for val in vals:
data = val.split('|')
dlen = len(data)
my_resource = data[0]
my_input = data[1]
other_resource = data[2]
other_input = data[3]
if dlen == 5:
meta = None
elif dlen == 7:
meta = {'destination_key': data[5], 'tag': data[4]}
else:
raise Exception("Unsupported case")
yield (other_resource, other_input), (my_resource, my_input), meta
def _edges(self):
inst = self._instance
start = inst.key
my_ind_name = '{}_recv_bin'.format(self.fname)
res = inst._get_index(my_ind_name,
startkey=start + '|',
endkey=start + '|~',
return_terms=True,
max_results=99999).results
vals = map(itemgetter(0), res)
return self._edges_fmt(vals)
def _single_edge(self, name):
inst = self._instance
self._has_own_input(name)
start = '{}|{}'.format(inst.key, name)
my_ind_name = '{}_recv_bin'.format(self.fname)
res = inst._get_index(my_ind_name,
startkey=start + '|',
endkey=start + '|~',
return_terms=True,
max_results=99999).results
vals = map(itemgetter(0), res)
return self._edges_fmt(vals)
def __contains__(self, name):
try:
self._has_own_input(name)
except Exception:
return False
else:
return True
def __iter__(self):
for name in self._instance._data_container[self.fname]:
yield name
def keys(self):
return list(self.__iter__())
def as_dict(self):
items = solar_map(lambda x: (x, self._get_field_val(x)),
[x for x in self],
concurrency=3)
return dict(items)
def _connect_my_simple(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
types_mapping = '|{}_{}'.format(my_type.value, other_type.value)
my_ind_name = '{}_recv_bin'.format(self.fname)
my_ind_val = '{}|{}|{}|{}'.format(my_resource.key, my_inp_name,
other_resource.key, other_inp_name)
my_ind_val += types_mapping
real_my_type = self._input_type(my_resource, my_inp_name)
if real_my_type == InputTypes.simple:
for ind_name, ind_value in my_resource._riak_object.indexes:
if ind_name == my_ind_name:
mr, mn, _ = ind_value.split('|', 2)
if mr == my_resource.key and mn == my_inp_name:
my_resource._remove_index(ind_name, ind_value)
break
my_resource._add_index(my_ind_name, my_ind_val)
return my_inp_name
def _connect_other_simple(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
other_ind_name = '{}_emit_bin'.format(self.fname)
real_my_type = self._input_type(my_resource, my_inp_name)
if real_my_type == InputTypes.simple or ':' not in my_inp_name:
other_ind_val = '{}|{}|{}|{}'.format(other_resource.key,
other_inp_name,
my_resource.key, my_inp_name)
for ind_name, ind_value in my_resource._riak_object.indexes:
if ind_name == other_ind_name:
try:
mr, mn = ind_value.rsplit('|')[2:]
except ValueError:
if len(ind_value.split('|')) == 6:
continue
else:
raise
if mr == my_resource.key and mn == my_inp_name:
my_resource._remove_index(ind_name, ind_value)
break
elif real_my_type in (InputTypes.list_hash, InputTypes.hash,
InputTypes.list):
my_key, my_val = my_inp_name.split(':', 1)
if '|' in my_val:
my_val, my_tag = my_val.split('|', 1)
else:
if real_my_type == InputTypes.hash:
# when single dict then set shared hash for all resources
# TODO: (jnowak) maybe we should remove tags completely
# in this and only this case
my_tag = '_single'
else:
my_tag = other_resource.name
my_inp_name = my_key
other_ind_val = '{}|{}|{}|{}|{}|{}'.format(
other_resource.key, other_inp_name, my_resource.key,
my_inp_name, my_tag, my_val)
for ind_name, ind_value in my_resource._riak_object.indexes:
if ind_name == other_ind_name:
try:
mr, mn, mt, mv = ind_value.rsplit('|')[2:]
except ValueError:
if len(ind_value.split('|')) == 4:
continue
else:
raise
if mr == my_resource.key and mn == my_inp_name \
and mt == my_tag and mv == my_val:
my_resource._remove_index(ind_name, ind_value)
break
else:
raise Exception("Unsupported connection type")
my_resource._add_index(other_ind_name, other_ind_val)
return other_inp_name
def _connect_other_hash(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
return self._connect_other_simple(
my_resource, my_inp_name, other_resource, other_inp_name, my_type,
other_type)
def _connect_other_list(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
return self._connect_other_simple(
my_resource, my_inp_name, other_resource, other_inp_name, my_type,
other_type)
def _connect_other_list_hash(self, my_resource, my_inp_name,
other_resource, other_inp_name, my_type,
other_type):
return self._connect_other_simple(
my_resource, my_inp_name, other_resource, other_inp_name, my_type,
other_type)
def _connect_other_computable(self, my_resource, my_inp_name,
other_resource, other_inp_name, my_type,
other_type):
return self._connect_other_simple(
my_resource, my_inp_name, other_resource, other_inp_name, my_type,
other_type)
def _connect_my_list(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
ret = self._connect_my_simple(my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type)
return ret
def _connect_my_hash(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
my_key, my_val = my_inp_name.split(':', 1)
if '|' in my_val:
my_val, my_tag = my_val.split('|', 1)
else:
# when single dict then set shared hash for all resources
# TODO: (jnowak) maybe we should remove tags completely there
if my_type == InputTypes.hash:
my_tag = '_single'
else:
my_tag = other_resource.name
types_mapping = '|{}_{}'.format(my_type.value, other_type.value)
my_ind_name = '{}_recv_bin'.format(self.fname)
my_ind_val = '{}|{}|{}|{}|{}|{}'.format(my_resource.key, my_key,
other_resource.key,
other_inp_name, my_tag, my_val)
my_ind_val += types_mapping
my_resource._add_index(my_ind_name, my_ind_val)
return my_key
def _connect_my_list_hash(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
return self._connect_my_hash(my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type)
def _connect_my_computable(self, my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type):
return self._connect_my_simple(my_resource, my_inp_name,
other_resource, other_inp_name,
my_type, other_type)
def connect(self, my_inp_name, other_resource, other_inp_name):
my_resource = self._instance
other_type = self._input_type(other_resource, other_inp_name)
my_type = self._input_type(my_resource, my_inp_name)
if my_type == other_type and ':' not in my_inp_name:
# if the type is the same map 1:1, and flat
my_type = InputTypes.simple
other_type = InputTypes.simple
elif my_type == InputTypes.list_hash and other_type == InputTypes.hash:
# whole dict to list with dicts
# TODO: solve this problem
if ':' in my_inp_name:
my_type = InputTypes.hash
else:
my_type = InputTypes.list
# set my side
my_meth = getattr(self, '_connect_my_{}'.format(my_type.name))
my_affected = my_meth(my_resource, my_inp_name, other_resource,
other_inp_name, my_type, other_type)
# set other side
other_meth = getattr(self, '_connect_other_{}'.format(other_type.name))
other_meth(my_resource, my_inp_name, other_resource, other_inp_name,
my_type, other_type)
try:
del self._cache[my_affected]
except KeyError:
pass
with self.inputs_index_cache as c:
c.wipe()
return True
def disconnect(self, name):
# ind_name = '{}_recv_bin'.format(self.fname)
if ':' in name:
# disconnect from hash with tag
normalized_name, tag_and_target = name.split(':', 1)
my_val, my_tag = tag_and_target.split('|', 1)
emit_name = None
# emit_name = '{}|{}'.format(my_tag, my_val)
full_name = '{}|{}|{}'.format(normalized_name, my_tag, my_val)
name = normalized_name
elif '|' in name:
# disconnect everything from given input|resource
my_input, other_resource, other_input = name.split('|', 2)
full_name = my_input
emit_name = '{}|{}'.format(other_resource, other_input)
normalized_name = "{}|{}".format(my_input, other_resource)
name = name.split('|', 1)[0]
my_val, my_tag = None, None
else:
# disconnect everything from given input
full_name = name
emit_name = None
normalized_name = name
my_val, my_tag = None, None
indexes = self._instance._riak_object.indexes
to_dels = []
recvs = filter(lambda x: x[0] == '{}_recv_bin'.format(self.fname),
indexes)
for recv in recvs:
_, ind_value = recv
if ind_value.startswith('{}|{}|'.format(self._instance.key,
normalized_name)):
spl = ind_value.split('|')
if len(spl) == 7 and my_tag and my_val:
if spl[-3] == my_tag and spl[-2] == my_val:
to_dels.append(recv)
else:
to_dels.append(recv)
emits = filter(lambda x: x[0] == '{}_emit_bin'.format(self.fname),
indexes)
for emit in emits:
_, ind_value = emit
if ind_value.endswith('|{}|{}'.format(self._instance.key,
full_name)):
if emit_name:
if ind_value.startswith(emit_name):
to_dels.append(emit)
else:
to_dels.append(emit)
for to_del in to_dels:
self._instance._remove_index(*to_del)
try:
del self._cache[name]
except KeyError:
pass
with self.inputs_index_cache as c:
c.wipe()
def _has_own_input(self, name):
try:
return self._cache[name]
except KeyError:
pass
my_name = self._instance.key
try:
self._get_raw_field_val(name)
except KeyError:
raise DBLayerSolarException('No input {} for {}'.format(name,
my_name))
else:
return True
def _get_field_val(self, name, other=None):
# maybe it should be tco
if other:
full_name = '{}_other_{}'.format(name, other)
else:
full_name = name
try:
return self._cache[full_name]
except KeyError:
pass
with self.inputs_index_cache as c:
check_state_for('index', self._instance)
fname = self.fname
my_name = self._instance.key
self._has_own_input(name)
ind_name = '{}_recv_bin'.format(fname)
kwargs = dict(startkey='{}|'.format(my_name),
endkey='{}|~'.format(my_name),
return_terms=True)
my_type = self._input_type(self._instance, name)
if my_type == InputTypes.simple:
max_results = 1
else:
max_results = 99999
c.get_index(self._instance._get_index, ind_name, **kwargs)
recvs = tuple(c.filter(startkey="{}|{}|".format(my_name, name),
endkey="{}|{}|~".format(my_name, name),
max_results=max_results))
if not recvs:
_res = self._get_raw_field_val(name)
self._cache[name] = _res
if other:
other_res = self._get_field_val(other)
self._cache[full_name] = other_res
return other_res
return _res
my_meth = getattr(self, '_map_field_val_{}'.format(my_type.name))
return my_meth(recvs, name, my_name, other=other)
def _map_field_val_simple(self, recvs, input_name, name, other=None):
recvs = recvs[0]
index_val, obj_key = recvs
_, inp, emitter_key, emitter_inp, _mapping_type = index_val.split('|',
4)
res = Resource.get(emitter_key).inputs._get_field_val(emitter_inp,
other)
self._cache[name] = res
return res
def _map_field_val_list(self, recvs, input_name, name, other=None):
if len(recvs) == 1:
recv = recvs[0]
index_val, obj_key = recv
_, inp, emitter_key, emitter_inp, mapping_type = index_val.split(
'|', 4)
res = Resource.get(emitter_key).inputs._get_field_val(emitter_inp,
other)
if mapping_type != "{}_{}".format(InputTypes.simple.value,
InputTypes.simple.value):
res = [res]
else:
res = []
for recv in recvs:
index_val, obj_key = recv
_, _, emitter_key, emitter_inp, mapping_type = index_val.split(
'|', 4)
cres = Resource.get(emitter_key).inputs._get_field_val(
emitter_inp, other)
res.append(cres)
self._cache[name] = res
return res
def _map_field_val_hash_single(self, recvs, input_name, other):
items = []
tags = set()
for recv in recvs:
index_val, obj_key = recv
(_, _, emitter_key, emitter_inp,
my_tag, my_val, mapping_type) = index_val.split('|', 6)
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp,
other)
items.append((my_tag, my_val, cres))
tags.add(my_tag)
return items, tags
def _map_field_val_hash(self, recvs, input_name, name, other=None):
if len(recvs) == 1:
# just one connected
recv = recvs[0]
index_val, obj_key = recv
splitted = index_val.split('|')
splen = len(splitted)
if splen == 5:
# 1:1
_, inp, emitter_key, emitter_inp, mapping_type = splitted
if mapping_type != "{}_{}".format(InputTypes.simple.value,
InputTypes.simple.value):
raise NotImplementedError()
res = Resource.get(emitter_key).inputs._get_field_val(
emitter_inp, other)
elif splen == 7:
# partial
res = {}
my_resource = self._instance
my_resource_value = my_resource.inputs._get_raw_field_val(
input_name)
if my_resource_value:
for my_val, cres in my_resource_value.iteritems():
res[my_val] = cres
(_, _, emitter_key, emitter_inp,
my_tag, my_val, mapping_type) = splitted
cres = Resource.get(emitter_key).inputs._get_field_val(
emitter_inp, other)
res[my_val] = cres
else:
raise Exception("Not supported splen %s", splen)
else:
items, tags = self._map_field_val_hash_single(recvs, input_name,
other)
my_resource = self._instance
my_resource_value = my_resource.inputs._get_raw_field_val(
input_name)
if my_resource_value:
res = my_resource_value
else:
res = {}
if len(tags) != 1:
# TODO: add it also for during connecting
raise Exception("Detected dict with different tags")
for _, my_val, value in items:
res[my_val] = value
self._cache[name] = res
return res
def _map_field_val_list_hash(self, recvs, input_name, name, other=None):
items = []
for recv in recvs:
index_val, obj_key = recv
splitted_val = index_val.split('|', 6)
if len(splitted_val) == 5:
# it was list hash but with whole dict mapping
_, _, emitter_key, emitter_inp, mapping_type = splitted_val
cres = Resource.get(emitter_key).inputs._get_field_val(
emitter_inp, other)
items.append((emitter_key, None, cres))
else:
(_, _, emitter_key, emitter_inp,
my_tag, my_val, mapping_type) = splitted_val
cres = Resource.get(emitter_key).inputs._get_field_val(
emitter_inp, other)
items.append((my_tag, my_val, cres))
tmp_res = {}
for first, my_val, value in items:
if my_val is None:
tmp_res[first] = value
else:
try:
tmp_res[first][my_val] = value
except KeyError:
tmp_res[first] = {my_val: value}
res = tmp_res.values()
self._cache[name] = res
return res
def _map_field_val_computable(self, recvs, input_name, name, other=None):
to_calc = []
computable = self._instance.meta_inputs[input_name]['computable']
computable_type = computable.get('type',
ComputablePassedTypes.values.name)
for recv in recvs:
index_val, obj_key = recv
splitted = index_val.split('|', 4)
_, inp, emitter_key, emitter_inp, _ = splitted
res = Resource.get(emitter_key)
inp_value = res.inputs._get_field_val(emitter_inp,
other)
if computable_type == ComputablePassedTypes.values.name:
to_calc.append(inp_value)
else:
to_calc.append({'value': inp_value,
'resource': res.name,
'other_input': emitter_inp})
return get_processor(self._instance, input_name,
computable_type, to_calc, other)
def _get_raw_field_val(self, name):
return self._instance._data_container[self.fname][name]
def __getitem__(self, name):
try:
return self._get_field_val(name)
except KeyError:
raise UnknownInput(name)
def __delitem__(self, name):
# TODO: check if something is connected to it
self._has_own_input(name)
self._instance._field_changed(self)
try:
del self._cache[name]
except KeyError:
pass
inst = self._instance
inst._riak_object.remove_index('%s_bin' % self.fname, '{}|{}'.format(
self._instance.key, name))
del inst._data_container[self.fname][name]
def __setitem__(self, name, value):
try:
mi = self._instance.meta_inputs
except KeyError:
pass
else:
if name not in mi:
raise UnknownInput(name)
self._instance._field_changed(self)
return self._set_field_value(name, value)
def items(self):
return self._instance._data_container[self.fname].items()
def get(self, name, default=None):
if self._has_own_input(name):
return self[name]
else:
return default
def _set_field_value(self, name, value):
fname = self.fname
my_name = self._instance.key
ind_name = '{}_recv_bin'.format(fname)
recvs = self._instance._get_index(
ind_name,
startkey='{}|{}|'.format(my_name, name),
endkey='{}|{}|~'.format(my_name, name),
max_results=1,
return_terms=True).results
if recvs:
recvs = recvs[0]
res, inp, emitter_name, emitter_inp = recvs[0].split('|')[:4]
raise Exception("%s:%s is connected with resource %s:%s" %
(res, inp, emitter_name, emitter_inp))
# inst = self._instance
robj = self._instance._riak_object
if name not in robj.data[self.fname]:
self._instance._add_index('%s_bin' % self.fname, '{}|{}'.format(
my_name, name))
robj.data[self.fname][name] = value
with self.inputs_index_cache as c:
c.wipe()
self._cache[name] = value
return True
def to_dict(self):
rst = {}
for key in self._instance._data_container[self.fname].keys():
rst[key] = self[key]
return rst
def add_new(self, name, value=NONE, schema=None):
if value is not NONE and schema is None:
schema = detect_input_schema_by_value(value)
if name in self.keys():
raise InputAlreadyExists()
self._instance.meta_inputs[name] = {'schema': schema}
self[name] = value if value is not NONE else None
return True
def remove_existing(self, name):
del self[name]
del self._instance.meta_inputs[name]
return True
class InputsField(IndexField):
_wrp_class = InputsFieldWrp
def __set__(self, instance, value):
wrp = getattr(instance, self.fname)
instance._data_container[self.fname] = self.default
for inp_name, inp_value in value.iteritems():
wrp[inp_name] = inp_value
class TagsFieldWrp(IndexFieldWrp):
def __getitem__(self, name):
raise TypeError('You cannot get tags like this')
def __setitem__(self, name, value):
raise TypeError('You cannot set tags like this')
def __delitem__(self, name, value):
raise TypeError('You cannot set tags like this')
def __iter__(self):
return iter(self._instance._data_container[self.fname])
def as_list(self):
try:
return self._instance._data_container[self.fname][:]
except KeyError:
return []
def set(self, name, value=None):
if '=' in name and value is None:
name, value = name.split('=', 1)
if value is None:
value = ''
full_value = '{}={}'.format(name, value)
inst = self._instance
try:
fld = inst._data_container[self.fname]
except IndexError:
fld = inst._data_container[self.fname] = []
if full_value in fld:
return
# indexes = inst._riak_object.indexes.copy() # copy it
inst._add_index('{}_bin'.format(self.fname), '{}~{}'.format(name,
value))
try:
fld.append(full_value)
except KeyError:
fld = [full_value]
return True
def has_tag(self, name, subval=None):
fld = self._instance._data_container[self.fname]
if name not in fld:
return False
if subval is not None:
subvals = fld[name]
return subval in subvals
return True
def remove(self, name, value=None):
if '=' in name and value is None:
name, value = name.split('=', 1)
if value is None:
value = ''
inst = self._instance
fld = inst._data_container[self.fname]
full_value = '{}={}'.format(name, value)
try:
fld.remove(full_value)
except ValueError:
pass
else:
inst._remove_index('{}_bin'.format(self.fname), '{}~{}'.format(
name, value))
return True
class TagsField(IndexField):
_wrp_class = TagsFieldWrp
def __set__(self, instance, value):
wrp = getattr(instance, self.fname)
instance._data_container[self.fname] = self.default
for val in value:
wrp.set(val)
def filter(self, name, subval=None):
check_state_for('index', self._declared_in)
if '=' in name and subval is None:
name, subval = name.split('=', 1)
if subval is None:
subval = ''
if not isinstance(subval, basestring):
subval = str(subval)
# maxresults because of riak bug with small number of results
# https://github.com/basho/riak/issues/608
declared = self._declared_in
if not subval.endswith('*'):
res = declared._get_index('{}_bin'.format(self.fname),
startkey='{}~{}'.format(name, subval),
endkey='{}~{} '.format(name, subval),
max_results=100000,
return_terms=True).results
else:
subval = subval.replace('*', '')
res = declared._get_index('{}_bin'.format(self.fname),
startkey='{}~{}'.format(name, subval),
endkey='{}~{}~'.format(name, subval),
max_results=100000,
return_terms=True).results
return set(map(itemgetter(1), res))
# class MetaInput(NestedModel):
# name = Field(str)
# schema = Field(str)
# value = None # TODO: implement it
# is_list = Field(bool)
# is_hash = Field(bool)
class Resource(Model):
name = Field(str)
version = Field(str)
base_name = Field(str)
base_path = Field(str)
actions_path = Field(str)
actions = Field(dict)
handler = Field(str)
meta_inputs = Field(dict, default=dict)
state = Field(str) # on_set/on_get would be useful
events = Field(list, default=list)
managers = Field(list, default=list)
inputs = InputsField(default=dict)
tags = TagsField(default=list)
updated = IndexedField(StrInt)
def _connect_single(self, other_inputs, other_name, my_name):
if isinstance(other_name, (list, tuple)):
# XXX: could be paralelized
for other in other_name:
other_inputs.connect(other, self, my_name)
else:
other_inputs.connect(other_name, self, my_name)
def connect(self, other, mapping):
other_inputs = other.inputs
if mapping is None:
return
if self == other:
for k, v in mapping.items():
if k == v:
raise Exception('Trying to connect value-.* to itself')
solar_map(
lambda (my_name, other_name): self._connect_single(other_inputs,
other_name,
my_name),
mapping.iteritems(),
concurrency=2)
def disconnect(self, other, inputs):
def _to_disconnect((emitter, receiver, meta)):
if not receiver[0] == other_key:
return False
# name there?
if not emitter[0] == self.key:
return False
key = emitter[1]
if key not in converted:
return False
convs = converted[key]
for conv in convs:
if conv:
if meta['tag'] == conv['tag'] \
and meta['destination_key'] == conv['destination_key']:
return True
else:
return True
return False
def _convert_input(input):
spl = input.split('|')
spl_len = len(spl)
if spl_len == 1:
# normal input
return input, None
elif spl_len == 3:
return spl[0], {'tag': spl[1], 'destination_key': spl[2]}
else:
raise Exception("Cannot convert input %r" % input)
def _format_for_disconnect((emitter, receiver, meta)):
input = receiver[1]
if not meta:
return "{}|{}|{}".format(receiver[1], emitter[0], emitter[1])
dest_key = meta['destination_key']
tag = meta.get('tag', other.name)
return '{}:{}|{}'.format(input, dest_key, tag)
converted = defaultdict(list)
for k, v in map(_convert_input, inputs):
converted[k].append(v)
other_key = other.key
edges = other.inputs._edges()
edges = filter(_to_disconnect, edges)
inputs = map(_format_for_disconnect, edges)
solar_map(other.inputs.disconnect, inputs, concurrency=2)
def save(self, *args, **kwargs):
if self.changed():
self.updated = StrInt()
return super(Resource, self).save(*args, **kwargs)
@classmethod
def childs(cls, parents):
all_indexes = cls.bucket.get_index('inputs_recv_bin',
startkey='',
endkey='~',
return_terms=True,
max_results=999999)
tmp = defaultdict(set)
to_visit = parents[:]
visited = set()
for item in all_indexes.results:
data = item[0].split('|')
em, rcv = data[0], data[2]
tmp[rcv].add(em)
while to_visit:
n = to_visit.pop()
for child in tmp[n]:
if child not in visited:
to_visit.append(child)
visited.add(n)
return visited
def delete(self):
inputs_index = self.bucket.get_index('inputs_emit_bin',
startkey=self.key,
endkey=self.key + '~',
return_terms=True,
max_results=999999)
to_disconnect_all = defaultdict(list)
for emit_bin in inputs_index.results:
index_vals = emit_bin[0].split('|')
index_vals_len = len(index_vals)
if index_vals_len == 6: # hash
(_, my_input, other_res,
other_input, my_tag, my_val) = index_vals
to_disconnect_all[other_res].append("{}|{}|{}".format(
my_input, my_tag, my_val))
elif index_vals_len == 4:
_, my_input, other_res, other_input = index_vals
to_disconnect_all[other_res].append(other_input)
else:
raise Exception("Unknown input %r" % index_vals)
for other_obj_key, to_disconnect in to_disconnect_all.items():
other_obj = Resource.get(other_obj_key)
self.disconnect(other_obj, to_disconnect)
super(Resource, self).delete()
class CommitedResource(Model):
inputs = Field(dict, default=dict)
connections = Field(list, default=list)
base_path = Field(str)
tags = Field(list, default=list)
state = Field(str, default=lambda: 'removed')
"""
Type of operations:
- load all tasks for execution
- load single task + childs + all parents
of childs (and transitions between them)
"""
class TasksFieldWrp(IndexFieldWrp):
def add(self, task):
return True
def __iter__(self):
return iter(self._instance._data_container[self.fname])
def all(self, postprocessor=None):
if postprocessor:
return map(postprocessor, self)
return list(self)
def all_names(self):
return self.all(lambda key: key.split('~')[1])
def all_tasks(self):
return self.all(Task.get)
def _add(self, parent, child):
parent._data_container['childs'].append(child.key)
child._data_container['parents'].append(parent.key)
child._add_index('childs_bin', parent.key)
parent._add_index('parents_bin', child.key)
return True
class TasksField(IndexField):
_wrp_class = TasksFieldWrp
def __set__(self, obj, value):
wrp = getattr(obj, self.fname)
obj._data_container[self.fname] = self.default
for val in value:
wrp.add(val)
def _parse_key(self, startkey):
return startkey
class ChildFieldWrp(TasksFieldWrp):
def add(self, task):
return self._add(self._instance, task)
class ChildField(TasksField):
_wrp_class = ChildFieldWrp
class ParentFieldWrp(TasksFieldWrp):
def add(self, task):
return self._add(task, self._instance)
class ParentField(TasksField):
_wrp_class = ParentFieldWrp
class Task(Model):
"""Node object"""
name = Field(basestring)
status = Field(basestring)
target = Field(basestring, default=str)
task_type = Field(basestring)
args = Field(list)
errmsg = Field(basestring, default=str)
timelimit = Field(int, default=int)
retry = Field(int, default=int)
timeout = Field(int, default=int)
start_time = Field(float, default=float)
end_time = Field(float, default=float)
execution = IndexedField(basestring)
parents = ParentField(default=list)
childs = ChildField(default=list)
@classmethod
def new(cls, data):
key = '%s~%s' % (data['execution'], data['name'])
return Task.from_dict(key, data)
"""
system log
1. one bucket for all log items
2. separate logs for stage/history (using index)
3. last log item for resource in history
4. log item in staged log for resource|action
5. keep order of history
"""
_connection, _connection_details = parse_database_conn(C.solar_db)
if _connection.mode == 'sqlite':
class NegativeCounter(Model):
count = Field(int, default=int)
def next(self):
self.count -= 1
self.save()
return self.count
else:
class NegativeCounter(Model):
bucket_type = C.counter_bucket_type
def next(self):
ro = self._riak_object
ro.decrement(1)
ro.store()
val = ro.value
return val
@property
def count(self):
return self._riak_object.value
@classmethod
def get_or_create(cls, key):
return cls.get(key)
@classmethod
def get(cls, key):
try:
return cls._c.obj_cache.get(key)
except KeyError:
riak_object = cls.bucket.get(key)
return cls.from_riakobj(riak_object)
class LogItem(Model):
uid = IndexedField(basestring, default=lambda: str(uuid4()))
resource = Field(basestring)
action = Field(basestring)
diff = Field(list)
connections_diff = Field(list)
state = Field(basestring)
base_path = Field(basestring) # remove me
updated = Field(StrInt)
history = IndexedField(StrInt)
log = Field(basestring) # staged/history
composite = CompositeIndexField(fields=('log', 'resource', 'action'))
@property
def log_action(self):
return '.'.join((self.resource, self.action))
@classmethod
def history_last(cls):
items = cls.history.filter(StrInt.n_max(),
StrInt.n_min(),
max_results=1)
if not items:
return None
return cls.get(items[0])
def save(self):
if any(f in self._modified_fields for f in LogItem.composite.fields):
self.composite.reset()
if 'log' in self._modified_fields and self.log == 'history':
self.history = StrInt(next(NegativeCounter.get_or_create(
'history')))
return super(LogItem, self).save()
@classmethod
def new(cls, data):
vals = {}
if 'uid' not in vals:
vals['uid'] = cls.uid.default
vals.update(data)
return LogItem.from_dict(vals['uid'], vals)
class Lock(Model):
bucket_properties = {
'backend': 'lock_bitcask_mult',
}
bucket_type = C.lock_bucket_type
identity = Field(basestring)
lockers = Field(list, default=list)
@classmethod
def _reduce(cls, lockers):
# TODO: (jnowak) we could remove not needed lockers there
# not needed means already replaced by other lock.
_s = set()
for x in lockers:
_s.add(tuple(x))
res = [list(x) for x in _s]
return res
def sum_all(self):
reduced = self.reduce()
_pos = defaultdict(int)
_neg = defaultdict(int)
for locker, val, stamp in reduced:
k = (locker, stamp)
if val < 0:
if k in _pos:
del _pos[k]
else:
_neg[k] = -1
elif val > 0:
if k in _neg:
del _neg[k]
else:
_pos[k] = 1
# TODO: (jnowak) consider discard all orphaned releases
# # key_diff = set(_neg.keys()) - set(_pos.keys())
# # for k in key_diff:
# # del _neg[k]
return {locker: val for ((locker, stamp), val) in chain(
_pos.items(),
_neg.items()
)}
def reduce(self):
lockers = self.lockers
self.lockers = self._reduce(lockers)
return self.lockers
def am_i_locking(self, uid):
return self.who_is_locking() == uid
def who_is_locking(self):
try:
if self.identity:
return self.identity
return None
except KeyError:
summed = self.sum_all()
if not summed:
return None
to_max = sorted([(v, k) for (k, v) in summed.items()])[-1]
if to_max[0] > 0:
return to_max[1]
return None
def change_locking_state(self, uid, value, stamp):
try:
if self.identity:
if value:
self.identity = uid
else:
raise Exception("Unsupported operation, to release "
"this lock you need to delete it.")
return True
except KeyError:
self.lockers.append([uid, value, stamp])
self.reduce()
return True
def save(self, *args, **kwargs):
self.reduce()
res = super(Lock, self).save(*args, **kwargs)
all_lockers = []
all_lockers.extend(res.data['lockers'])
all_lockers.extend(self.lockers)
self.lockers = self._reduce(all_lockers)
return res
@staticmethod
def conflict_resolver(riak_object):
siblings = riak_object.siblings
sdatas = map(lambda x: x.data.get('lockers', []), siblings)
l = []
for data in sdatas:
l.extend(data)
reduced = Lock._reduce(l)
first_sibling = siblings[0]
first_sibling.data['lockers'] = reduced
riak_object.siblings = [first_sibling]
# del Lock._c.obj_cache[riak_object.key]
|
|
"""Test the search module"""
import pickle
import sys
from collections import Iterable, Sized
from itertools import chain, product
import numpy as np
import scipy.sparse as sp
from scipy.stats import bernoulli, expon, uniform
from sklearn.base import BaseEstimator
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.six.moves import zip
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
from sklearn.model_selection import LabelKFold
from sklearn.model_selection import LabelShuffleSplit
from sklearn.model_selection import LeaveOneLabelOut
from sklearn.model_selection import LeavePLabelOut
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.utils.fixes import sp_version
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
# TODO Import from sklearn.exceptions once merged.
from sklearn.base import ChangedBehaviorWarning
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_labels():
# Check if ValueError (when labels is None) propagates to GridSearchCV
# And also check if labels is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
labels = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
label_cvs = [LeaveOneLabelOut(), LeavePLabelOut(2), LabelKFold(),
LabelShuffleSplit()]
for cv in label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The labels parameter should not be None",
gs.fit, X, y)
gs.fit(X, y, labels)
non_label_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_label_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10,))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv.split(X, y):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
|
# Copyright (c) 2012, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Tool-independent mix-in for parsing IPMI results"""
from pyipmi import IpmiError
import string
import inspect
def str_to_list(val, **params):
"""convert string to list of substrings (default: single words)"""
val = val.strip()
if val == '':
return []
delimiter = params.get('delimiter', " ")
return map(string.strip, val.split(delimiter))
def str2bool(val):
"""True if val is 'true', 'yes' or 'enabled, otherwise false"""
return val.lower() in ['true', 'yes', 'enabled']
def str_to_dict(val, **params):
"""Returns the contents of the string 'val' as a dictionary"""
result = {}
operator = params.get('operator', ':')
delimiter = params.get('delimiter', '\n')
value_parser = params.get('value_parser', str)
params['operator'] = params.get('value_operator', None)
params['delimiter'] = params.get('value_delimiter', None)
entries = val.split(delimiter)
for entry in entries:
key, op, value = entry.partition(operator)
result[field_to_attr(key.strip())] = value_parser(value)
return result
def paren_pair(val):
"""Convert 'foo (bar)' to ['foo', 'bar']"""
return [p.strip(' )') for p in val.split('(')]
def field_to_attr(field_name):
"""Convert a field name to an attribute name
Make the field all lowercase and replace ' ' with '_'
(replace space with underscore)
"""
result = field_name.lower()
if result[0:1].isdigit():
result = "n_" + result
result = result.replace(' ', '_')
result = result.replace('/', '_')
result = result.replace('-', '_')
result = result.replace('.', '_')
result = result.replace('+', '_plus')
return result
class ResponseParserMixIn(object):
"""Add this MixIn to a Command to enable it to parse response strings into
response data structures"""
"""
Supplied parse methods are parse_colon_record() (the default) and
parse_colon_record_list(). Override the default in a derived class
by setting the "response_parser" field to the name of the desired
method.
"""
def parse_colon_record(self, response, err):
"""Parse records of key : value separated lines
This expects response to be a string of newline separated
field/value pairs, with each field/value being separated by a
colon and optional whitespace.
Records this parses look like this:
Sensor Data Type : Blah
Somefield : Somevalue
The type of the result returned and the conversion of key/values
in the text result to attribute names/values in the returned object
are determined by calling get_response_types on this command instance,
which gives a way for the result type and mapping to change based
on the contents of the response.
"""
result_type, mapping = self.get_response_types(response)
if result_type == None:
return None
obj = result_type()
line, sep, rest = response.partition('\n')
left_over = []
while line != '':
colon_index = 10000000
if line.find(':') != -1:
colon_index = line.index(':')
equal_index = 10000000
if line.find('=') != -1:
equal_index = line.index('=')
if colon_index == 10000000 and equal_index == 10000000:
line, sep, rest = rest.partition('\n')
continue
field_seperator = min([colon_index, equal_index])
field = line[0:field_seperator].strip()
value = line[field_seperator + 1:].strip()
field_info = mapping.get(field)
if field_info == None:
left_over.append((field, value))
line, sep, rest = rest.partition('\n')
continue
lines_to_get = field_info.get('lines', 1) - 1
while lines_to_get > 0:
line, sep, rest = rest.partition('\n')
value += '\n' + line
lines_to_get -= 1
self.field_to_objval(obj, field_info, field, value)
line, sep, rest = rest.partition('\n')
return obj
def parse_colon_record_list(self, response, err):
"""Parse multiple groups of colon records
Like colon records, but with multiple groups, each separated
by a blank line (two consecutive newline characters).
This returns a list of result objects rather than a single
result object. The type of each result object can vary based
on its contents, so the list isn't always of the same type
of objects.
"""
results = []
records = response.split('\n\n')
for record in records:
obj = self.parse_colon_record(record.strip(), err)
if obj == None:
continue
results.append(obj)
return results
def parse_single_line(self, response, err):
obj = self.result_type()
attr_name = self.response_fields['attr']
setattr(obj, attr_name, response.strip())
return obj
def field_to_objval(self, obj, field_info, field_name, value):
"""Assign a field's value to an attribute of obj
Arguments:
obj -- the object to set the attribute on. this is some record type
object - the exact varies depending on the command being
executed.
field_info -- a dict describing the field. See "Field Info" below for
more info.
field_name -- the name of the field as given in the IPMI results.
this will be used as the name of the attribute unless a
'attr' key/value is given in the field_info dict.
value -- the value of the field as given in the IPMI results. This
value will be assigned to the attribute unless a 'parser'
key/value is specified in the field_info dict
Field Info:
If an 'attr' key/value is present, the value will be used for the
attribute name of this field instead of 'field_name'.
If a 'parser' key/value is present, the value will be passed to
it, and the result will be assigned to the attribute. The default
parser is str().
"""
str_func = lambda x: str(x)
attr_name = field_info.get('attr', field_to_attr(field_name))
attr_parser = supplied_parser = field_info.get('parser', str_func)
args, varargs, keywords, defaults = inspect.getargspec(attr_parser)
if keywords == None:
attr_parser = lambda x, **y: supplied_parser(x)
setattr(obj, attr_name, attr_parser(value, **field_info))
def get_response_types(self, response):
"""Return the result type and field mappings
The result type is the class of the result to be used. The field
mappings are given in self.response_fields, and are a
dict mapping field names to field info dicts. See 'field info' in
the doc for field_to_objval above.
Arguments:
response -- the text of the command response. It's not used in
this base method, but might be used in a subclass's version of
this method to allow different result types and mappings to be
used based on the contents of the response.
"""
return self.result_type, self.response_fields
def parse_response(self, out, err):
"""Parse the response to a command
Arguments:
out -- the text response of an IPMI command from stdout
err -- the text response of an IPMI command from stderr
"""
return self.response_parser(out, err)
def parse_results(self, out, err):
"""Parse the results if a result type is specified
If there is not 'result_type' attribute for this this command, return
None.
"""
try:
result_type = self.result_type
except AttributeError:
return None
return self.parse_response(out, err)
def handle_command_error(self, out, err):
"""Handle an error from running the command"""
part = err.partition(":")
if part[1] and part[0].strip().upper() == "ERROR":
raise IpmiError(part[2].strip())
else:
raise IpmiError(err.strip())
response_parser = parse_colon_record
|
|
from google.appengine.ext import ndb
from flask import Markup, render_template
from forms import Form
import cgi, datetime
date_format="%Y-%m-%d %H:%M"
class Field(object):
"""docstring for Field"""
def __init__(self,name=None, the_type=None, title=None, identifier=None, placeholder=None, tag="input", options=[], step=False, value="", hidden=""):
super(Field, self).__init__()
self.name = name
self.the_type=the_type
self.title=title
self.identifier=identifier
self.placeholder=placeholder
self.tag=tag
self.options=options
self.step=step
self.value=value
self.hidden=hidden
class Item(ndb.Model) :
name=ndb.StringProperty()
description=ndb.TextProperty()
category=ndb.StringProperty()
price=ndb.FloatProperty()
seller_id=ndb.StringProperty()
seller_name=ndb.StringProperty()
biddable=ndb.BooleanProperty()
new_bid=ndb.BooleanProperty()
sold=ndb.BooleanProperty()
time=ndb.DateTimeProperty(auto_now_add=True)
best_offer=ndb.FloatProperty()
photo=ndb.BlobProperty()
photo_mimetype=ndb.StringProperty()
def get_amount(self) :
if self.biddable and self.best_offer>self.price :
return "$"+str(self.best_offer)
else :
return "$"+str(self.price)
def get_pretty_date_time(self) :
return self.time.strftime("%Y-%m-%d at %I:%M %p")
@ndb.transactional(retries=3)
def update_best_offer(self, new_offer_price) :
if new_offer_price> self.best_offer :
self.best_offer=new_offer_price
self.put()
return "updated"
def display(self, mine=False, detailed=False, tags=[]) :
return Markup(render_template("Item.html", item=self, mine=mine, detailed=detailed, tags=tags))
def display_row(self) :
return Markup(render_template("Item_row.html", item=self))
class Admin(ndb.Model) :
admin_id=ndb.StringProperty()
class User(ndb.Model) :
userID=ndb.StringProperty()
name=ndb.StringProperty()
email=ndb.StringProperty()
password=ndb.StringProperty()
rating=ndb.FloatProperty()
number_ratings=ndb.FloatProperty()
def display(self) :
fields=[]
options=[]
for x in xrange(1,6) :
option={}
option["name"]=x
option["value"]=x
options.append(option)
fields.append(Field(name='rating',
title="Rating",
the_type="select",
identifier="rating",
placeholder="5 is high 1 is low",
tag="select",
options=options,
value="1"))
fields.append(Field(name='reason',
title="Reason",
the_type="textarea",
identifier='reason',
placeholder='Great experience. Item description was accurate.',
tag="textarea"))
title="Write Review"
form=Form(fields=fields,
title=title)
recent_reviews=self.get_recent_reviews()
return Markup(render_template("User.html",
user=self,
reviews=recent_reviews,
user_form=form))
def get_recent_reviews(self) :
reviews=Review.query(Review.user==self.key.id()).order(-Review.time)
the_reviews=[]
count=0
for review in reviews :
if count==10 :
break
the_reviews.append(review)
count+=1
the_reviews.reverse()
return the_reviews
class Review(ndb.Model) :
user=ndb.StringProperty()
reviewer=ndb.StringProperty()
rating=ndb.IntegerProperty()
reason=ndb.TextProperty()
time=ndb.DateTimeProperty(auto_now_add=True)
flagged=ndb.BooleanProperty()
def display(self, remove=False) :
return Markup(render_template("Review.html", review=self, remove=remove))
class Sale(ndb.Model) :
seller=ndb.StringProperty()
buyer=ndb.StringProperty()
item=ndb.IntegerProperty()
price=ndb.FloatProperty()
class Message(ndb.Model) :
sender=ndb.StringProperty()
recipient=ndb.StringProperty()
body=ndb.StringProperty()
time=ndb.DateTimeProperty(auto_now_add=True)
conversation=ndb.IntegerProperty()
link=ndb.StringProperty()
class Notification(ndb.Model) :
user=ndb.StringProperty()
body=ndb.StringProperty()
ntype=ndb.StringProperty()
item=ndb.IntegerProperty()
item_category=ndb.StringProperty()
time=ndb.DateTimeProperty(auto_now_add=True)
noticed=ndb.BooleanProperty()
link=ndb.StringProperty()
def display(self):
return Markup(render_template("Notification.html", notification=self))
class Conversation(ndb.Model) :
user1=ndb.StringProperty()
user2=ndb.StringProperty()
subject=ndb.StringProperty()
item=ndb.IntegerProperty()
item_name=ndb.StringProperty()
read1=ndb.BooleanProperty()
read2=ndb.BooleanProperty()
time=ndb.DateTimeProperty(auto_now=True)
class Offer(ndb.Model) :
amount=ndb.FloatProperty()
message=ndb.TextProperty()
bidder=ndb.StringProperty()
item=ndb.IntegerProperty()
item_name=ndb.StringProperty()
bidder_name=ndb.StringProperty()
accepted=ndb.BooleanProperty()
confirmed=ndb.BooleanProperty()
time=ndb.DateTimeProperty(auto_now=True)
def display(self):
item=Item.get_by_id(self.item)
return Markup(render_template("Offer.html",
offer=self,
item=item))
class Category(ndb.Model) :
categoryID=ndb.StringProperty()
name=ndb.StringProperty()
photo=ndb.BlobProperty()
photo_mimetype=ndb.StringProperty()
def display(self, width=20):
return Markup(render_template("Category.html", category=self, width=width))
class Subcategory(ndb.Model) :
name=ndb.StringProperty()
class Tag(ndb.Model):
name=ndb.StringProperty()
class Item_Tag(ndb.Model):
item=ndb.IntegerProperty()
tag=ndb.StringProperty()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import relay
from tvm.relay import testing
from tvm.relay.backend.interpreter import ConstructorValue
from tvm.relay import create_executor
from tvm.relay.prelude import Prelude, StaticTensorArrayOps
from tvm.relay.testing import count as count_, make_nat_value, make_nat_expr
import numpy as np
prelude = p = Prelude(tvm.IRModule({}))
p.mod.import_from_std("nat.rly")
def count(e):
return count_(p, e)
dev = tvm.device("llvm", 0)
def eval(expr):
# CAUTION: These tests re-process the entire prelude for each test expression.
# Hoisting the create_executor won't improve that since preprocessing won't begin
# until the evaluate.
return create_executor(mod=prelude.mod, device=dev, target="llvm").evaluate(expr)
nat, z, s = prelude.mod.get_type("nat")
double = p.mod.get_global_var("nat_double")
add = p.mod.get_global_var("nat_add")
optional, some, none = prelude.mod.get_type("Option")
rlist, cons, nil = prelude.mod.get_type("List")
hd = p.hd
tl = p.tl
nth = p.nth
update = p.update
length = p.length
map = p.map
foldl = p.foldl
foldr = p.foldr
foldr1 = p.foldr1
sum = p.sum
concat = p.concat
filter = p.filter
zip = p.zip
rev = p.rev
unfoldl = p.unfoldl
unfoldr = p.unfoldr
map_accumr = p.map_accumr
map_accuml = p.map_accuml
tree, rose = prelude.mod.get_type("Tree")
tmap = p.tmap
size = p.size
compose = p.compose
iterate = p.iterate
def to_list(l):
assert isinstance(l, ConstructorValue)
val = l
ret = []
while True:
if val.tag == cons.tag:
ret.append(val.fields[0])
val = val.fields[1]
else:
assert val.tag == nil.tag
break
return ret
def tree_to_dict(t):
assert isinstance(t, ConstructorValue)
ret = {}
assert t.tag == rose.tag
ret["member"] = t.fields[0]
ret["children"] = []
for subtree in to_list(t.fields[1]):
l = tree_to_dict(subtree)
ret["children"].append(l)
return ret
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
if len(o) == 0:
tensor_nil = p.get_var("tensor_nil", dtype=dtype)
if tensor_nil.tag == o.tag:
return [0]
return []
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1], dtype)
hd = vmobj_to_list(o.fields[0], dtype)
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
# turns a scalar-valued relay tensor value into a python number
def get_scalar(tv):
return tv.numpy().item()
# @tvm.testing.uses_gpu
def test_nat_value():
assert count(make_nat_value(p, 10)) == 10
assert count(eval(s(s(z())))) == 2
@tvm.testing.uses_gpu
def test_nat_constructor():
func = relay.Function([], z())
test_z = relay.GlobalVar("test_z")
test_sz = relay.GlobalVar("test_sz")
prelude.mod[test_z] = func
func = relay.Function([], s(z()))
prelude.mod[test_sz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_z].body.checked_type == nat()
assert ck_mod[test_sz].body.checked_type == nat()
@tvm.testing.uses_gpu
def test_double():
assert prelude.mod[double].checked_type == relay.FuncType([nat()], nat())
res = eval(double(s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_add():
assert prelude.mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
res = eval(add(s(z()), s(z())))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_list_constructor():
test_consz = relay.GlobalVar("test_consz")
func = relay.Function([], cons(z(), nil()))
prelude.mod[test_consz] = func
ck_mod = relay.transform.InferType()(prelude.mod)
assert ck_mod[test_consz].body.checked_type == rlist(nat())
@tvm.testing.uses_gpu
def test_hd_tl():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(make_nat_expr(prelude, i), l)
got = []
for i in range(len(expected)):
got.append(count(eval(hd(l))))
l = tl(l)
assert got == expected
@tvm.testing.uses_gpu
def test_nth():
expected = list(range(10))
l = nil()
for i in reversed(expected):
l = cons(relay.const(i), l)
for i in range(len(expected)):
nth = prelude.mod.get_global_var("nth")
item = eval(nth(l, relay.const(i)))
assert get_scalar(item) == i
@tvm.testing.uses_gpu
def test_update():
expected = list(range(10))
l = nil()
# create zero initialized list
for i in range(len(expected)):
l = cons(make_nat_expr(prelude, 0), l)
# set value
for i, v in enumerate(expected):
l = update(l, relay.const(i), make_nat_expr(prelude, v))
got = []
for i in range(len(expected)):
got.append(count(eval(nth(l, relay.const(i)))))
assert got == expected
@tvm.testing.uses_gpu
def test_length():
a = relay.TypeVar("a")
assert prelude.mod[length].checked_type == relay.FuncType(
[rlist(a)], relay.scalar_type("int32"), [a]
)
res = eval(length(cons(z(), cons(z(), cons(z(), nil())))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_map():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[map].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), rlist(a)], rlist(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(map(add_one, cons(z(), cons(z(), nil()))))
ones = to_list(res)
assert len(ones) == 2
assert count(ones[0]) == 1 and count(ones[1]) == 1
@tvm.testing.uses_gpu
def test_foldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldl].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], a), a, rlist(b)], a, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
rev_dup = relay.Function([y, x], cons(x, cons(x, y)))
res = eval(
foldl(
rev_dup,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
reversed = to_list(res)
assert len(reversed) == 6
assert count(reversed[0]) == 3 and count(reversed[1]) == 3
assert count(reversed[2]) == 2 and count(reversed[3]) == 2
assert count(reversed[4]) == 1 and count(reversed[5]) == 1
@tvm.testing.uses_gpu
def test_foldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[foldr].checked_type
rhs = relay.FuncType([relay.FuncType([a, b], b), b, rlist(a)], b, [a, b])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
identity = relay.Function([x, y], cons(x, y))
res = eval(
foldr(
identity,
nil(),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
same = to_list(res)
assert len(same) == 3
assert count(same[0]) == 1 and count(same[1]) == 2 and count(same[2]) == 3
@tvm.testing.uses_gpu
def test_foldr1():
a = relay.TypeVar("a")
lhs = prelude.mod[foldr1].checked_type
rhs = relay.FuncType([relay.FuncType([a, a], a), rlist(a)], a, [a])
assert lhs == rhs
x = relay.Var("x")
y = relay.Var("y")
f = relay.Function([x, y], add(x, y))
res = eval(
foldr1(
f,
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
),
)
)
assert count(res) == 6
@tvm.testing.uses_gpu
def test_sum():
assert prelude.mod[sum].checked_type == relay.FuncType(
[rlist(relay.scalar_type("int32"))], relay.scalar_type("int32")
)
res = eval(sum(cons(relay.const(1), cons(relay.const(2), nil()))))
assert get_scalar(res) == 3
@tvm.testing.uses_gpu
def test_concat():
a = relay.TypeVar("a")
assert prelude.mod[concat].checked_type == relay.FuncType([rlist(a), rlist(a)], rlist(a), [a])
l1 = cons(make_nat_expr(prelude, 1), cons(make_nat_expr(prelude, 2), nil()))
l2 = cons(make_nat_expr(prelude, 3), cons(make_nat_expr(prelude, 4), nil()))
res = eval(concat(l1, l2))
catted = to_list(res)
assert len(catted) == 4
assert count(catted[0]) == 1
assert count(catted[1]) == 2
assert count(catted[2]) == 3
assert count(catted[3]) == 4
@tvm.testing.uses_gpu
def test_filter():
a = relay.TypeVar("a")
expected_type = relay.FuncType(
[relay.FuncType([a], relay.scalar_type("bool")), rlist(a)], rlist(a), [a]
)
assert prelude.mod[filter].checked_type == expected_type
x = relay.Var("x", nat())
greater_than_one = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
s, [relay.PatternConstructor(s, [relay.PatternWildcard()])]
),
relay.const(True),
),
relay.Clause(relay.PatternWildcard(), relay.const(False)),
],
),
)
res = eval(
filter(
greater_than_one,
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 1),
cons(
make_nat_expr(prelude, 3),
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 1), nil())),
),
),
),
),
)
)
filtered = to_list(res)
assert len(filtered) == 2
assert count(filtered[0]) == 3
assert count(filtered[1]) == 5
@tvm.testing.uses_gpu
def test_zip():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType([rlist(a), rlist(b)], rlist(relay.TupleType([a, b])), [a, b])
assert prelude.mod[zip].checked_type == expected_type
l1 = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
l2 = cons(nil(), cons(cons(nil(), nil()), cons(cons(nil(), cons(nil(), nil())), nil())))
res = eval(zip(l1, l2))
zipped = to_list(res)
assert len(zipped) == 3
assert count(zipped[0][0]) == 1
assert len(to_list(zipped[0][1])) == 0
assert count(zipped[1][0]) == 2
assert len(to_list(zipped[1][1])) == 1
assert count(zipped[2][0]) == 3
assert len(to_list(zipped[2][1])) == 2
# test truncation
l3 = cons(make_nat_expr(prelude, 4), cons(make_nat_expr(prelude, 5), nil()))
shorter_res = eval(zip(l3, l2))
truncated = to_list(shorter_res)
assert len(truncated) == 2
assert count(truncated[0][0]) == 4
assert len(to_list(truncated[0][1])) == 0
assert count(truncated[1][0]) == 5
assert len(to_list(truncated[1][1])) == 1
l4 = cons(nil(), nil())
shortest_res = eval(zip(l3, l4))
singleton = to_list(shortest_res)
assert len(singleton) == 1
assert count(singleton[0][0]) == 4
assert len(to_list(singleton[0][1])) == 0
@tvm.testing.uses_gpu
def test_rev():
a = relay.TypeVar("a")
assert prelude.mod[rev].checked_type == relay.FuncType([rlist(a)], rlist(a), [a])
res = eval(
rev(
cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
)
)
reversed = to_list(res)
assert len(reversed) == 3
assert count(reversed[0]) == 3
assert count(reversed[1]) == 2
assert count(reversed[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldr(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 3
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 1
@tvm.testing.uses_gpu
def test_unfoldl():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
expected_type = relay.FuncType(
[relay.FuncType([a], optional(relay.TupleType([a, b]))), a], rlist(b), [a, b]
)
x = relay.Var("x", nat())
n = relay.Var("n", nat())
count_down = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(s, [relay.PatternVar(n)]), some(relay.Tuple([n, x]))
),
relay.Clause(relay.PatternConstructor(z, []), none()),
],
),
)
res = eval(unfoldl(count_down, make_nat_expr(prelude, 3)))
unfolded = to_list(res)
assert len(unfolded) == 3
assert count(unfolded[0]) == 1
assert count(unfolded[1]) == 2
assert count(unfolded[2]) == 3
@tvm.testing.uses_gpu
def test_map_accumr():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accumr].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_acc_to_each = relay.Function([acc, x], relay.Tuple([add(x, acc), add(x, acc)]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accumr(add_acc_to_each, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 6
assert count(new_vals[1]) == 5
assert count(new_vals[2]) == 3
@tvm.testing.uses_gpu
def test_map_accuml():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
c = relay.TypeVar("c")
expected_type = relay.FuncType(
[relay.FuncType([a, b], relay.TupleType([a, c])), a, rlist(b)],
relay.TupleType([a, rlist(c)]),
[a, b, c],
)
assert prelude.mod[map_accuml].checked_type == expected_type
acc = relay.Var("acc", nat())
x = relay.Var("x", nat())
add_to_acc = relay.Function([acc, x], relay.Tuple([add(x, acc), x]))
vals = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
res = eval(map_accuml(add_to_acc, z(), vals))
sum = count(res[0])
new_vals = to_list(res[1])
assert sum == 6
assert len(new_vals) == 3
assert count(new_vals[0]) == 3
assert count(new_vals[1]) == 2
assert count(new_vals[2]) == 1
@tvm.testing.uses_gpu
def test_optional_matching():
x = relay.Var("x")
y = relay.Var("y")
v = relay.Var("v")
condense = relay.Function(
[x, y],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(some, [relay.PatternVar(v)]), cons(v, y)),
relay.Clause(relay.PatternConstructor(none), y),
],
),
)
res = eval(
foldr(
condense,
nil(),
cons(
some(make_nat_expr(prelude, 3)),
cons(none(), cons(some(make_nat_expr(prelude, 1)), nil())),
),
)
)
reduced = to_list(res)
assert len(reduced) == 2
assert count(reduced[0]) == 3
assert count(reduced[1]) == 1
@tvm.testing.uses_gpu
def test_tmap():
a = relay.TypeVar("a")
b = relay.TypeVar("b")
lhs = prelude.mod[tmap].checked_type
rhs = relay.FuncType([relay.FuncType([a], b), tree(a)], tree(b), [a, b])
assert lhs == rhs
x = relay.Var("x")
add_one = relay.Function([x], s(x))
res = eval(tmap(add_one, rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))))
tree_dict = tree_to_dict(res)
assert count(tree_dict["member"]) == 1
assert len(tree_dict["children"]) == 2
for subtree in tree_dict["children"]:
assert count(subtree["member"]) == 1
assert len(subtree["children"]) == 0
@tvm.testing.uses_gpu
def test_size():
a = relay.TypeVar("a")
lhs = prelude.mod[size].checked_type
rhs = relay.FuncType([tree(a)], relay.scalar_type("int32"), [a])
assert lhs == rhs
root = rose(z(), cons(rose(z(), nil()), cons(rose(z(), nil()), nil())))
t = rose(z(), cons(root, cons(root, cons(root, nil()))))
res = eval(size(t))
assert get_scalar(res) == 10
@tvm.testing.uses_gpu
def test_wildcard_match_solo():
x = relay.Var("x", nat())
copy = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternWildcard(), x)]), nat())
res = eval(copy(s(s(s(z())))))
assert count(res) == 3
@tvm.testing.uses_gpu
def test_wildcard_match_order():
x = relay.Var("x", rlist(nat()))
y = relay.Var("y")
a = relay.Var("a")
return_zero = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternWildcard(), z()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(a)]), y
),
relay.Clause(relay.PatternConstructor(nil), s(z())),
],
),
nat(),
)
res = eval(return_zero(cons(s(z()), nil())))
# wildcard pattern is evaluated first
assert count(res) == 0
@tvm.testing.uses_gpu
def test_nested_matches():
a = relay.TypeVar("a")
# TODO(@jroesch): inference should be able to handle this one
x = relay.Var("x", type_annotation=rlist(rlist(a)))
y = relay.Var("y")
w = relay.Var("w")
h = relay.Var("h")
t = relay.Var("t")
flatten = relay.GlobalVar("flatten")
# flatten could be written using a fold, but this way has nested matches
inner_match = relay.Match(
y,
[
relay.Clause(relay.PatternConstructor(nil), flatten(w)),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(h), relay.PatternVar(t)]),
cons(h, flatten(cons(t, w))),
),
],
)
prelude.mod[flatten] = relay.Function(
[x],
relay.Match(
x,
[
relay.Clause(relay.PatternConstructor(nil), nil()),
relay.Clause(
relay.PatternConstructor(cons, [relay.PatternVar(y), relay.PatternVar(w)]),
inner_match,
),
],
),
rlist(a),
[a],
)
first_list = cons(
make_nat_expr(prelude, 1),
cons(make_nat_expr(prelude, 2), cons(make_nat_expr(prelude, 3), nil())),
)
second_list = cons(
make_nat_expr(prelude, 4),
cons(make_nat_expr(prelude, 5), cons(make_nat_expr(prelude, 6), nil())),
)
final_list = cons(first_list, cons(second_list, nil()))
res = eval(flatten(final_list))
flat = to_list(res)
assert len(flat) == 6
for i in range(6):
assert count(flat[i]) == i + 1
@tvm.testing.uses_gpu
def test_match_full_var():
x = relay.Var("x")
v = relay.Var("v")
id_func = relay.Function([x], relay.Match(x, [relay.Clause(relay.PatternVar(v), v)]))
res1 = eval(id_func(nil()))
res2 = eval(id_func(cons(z(), cons(z(), nil()))))
empty = to_list(res1)
assert len(empty) == 0
zeroes = to_list(res2)
assert len(zeroes) == 2
assert count(zeroes[0]) == 0
assert count(zeroes[1]) == 0
@tvm.testing.uses_gpu
def test_nested_pattern_match():
x = relay.Var("x", rlist(nat()))
h1 = relay.Var("h1")
h2 = relay.Var("h2")
t = relay.Var("t")
match = relay.Match(
x,
[
relay.Clause(
relay.PatternConstructor(
cons,
[
relay.PatternVar(h1),
relay.PatternConstructor(cons, [relay.PatternVar(h2), relay.PatternVar(t)]),
],
),
h2,
),
relay.Clause(relay.PatternWildcard(), z()),
],
)
get_second = relay.Function([x], match)
res = eval(get_second(cons(s(z()), cons(s(s(z())), nil()))))
assert count(res) == 2
@tvm.testing.uses_gpu
def test_compose():
n = relay.Var("n")
inc = relay.Function([n], s(n))
x = relay.Var("x")
res = eval(relay.Call(compose(inc, double), [s(s(z()))]))
assert count(res) == 5
@tvm.testing.uses_gpu
def test_iterate():
expr = relay.Call(iterate(double, relay.const(2)), [make_nat_expr(prelude, 3)])
res = eval(relay.Function([], expr)())
assert count(res) == 12
if __name__ == "__main__":
pytest.main([__file__])
|
|
import logging
import datetime, re, stackauth, stackexchange, stackexchange.web, unittest
import stackexchange.sites as stacksites
from stackexchange.core import StackExchangeError
# for Python 3 compatiblity
try:
import htmlentitydefs
except ImportError:
import html.entities as htmlentitydefs
QUESTION_ID = 4
ANSWER_ID = 98
USER_ID = 23901
API_KEY = 'pXlviKYs*UZIwKLPwJGgpg(('
_l = logging.getLogger(__name__)
def _setUp(self):
self.site = stackexchange.Site(stackexchange.StackOverflow, API_KEY, impose_throttling = True)
stackexchange.web.WebRequestManager.debug = True
htmlentitydefs.name2codepoint['#39'] = 39
def html_unescape(text):
return re.sub('&(%s);' % '|'.join(htmlentitydefs.name2codepoint),
lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]), text)
class DataTests(unittest.TestCase):
def setUp(self):
_setUp(self)
def test_fetch_paged(self):
user = stackexchange.Site(stackexchange.Programmers, API_KEY).user(USER_ID)
answers = user.answers.fetch(pagesize=60)
for answer in answers:
# dummy assert.. we're really testing paging here to make sure it doesn't get
# stuck in an infinite loop. there very well may be a better way of testing this,
# but it's been a long day and this does the trick
# this used to test for title's presence, but title has been removed from the
# default filter
self.assertTrue(answer.id is not None)
def test_fetch_question(self):
s = self.site.question(QUESTION_ID)
self.assertEqual(html_unescape(s.title), u"While applying opacity to a form should we use a decimal or double value?")
def test_fetch_answer(self):
s = self.site.answer(ANSWER_ID)
def test_fetch_answer_owner(self):
s = self.site.answer(ANSWER_ID)
self.assertIsInstance(s.owner_id, int)
self.assertIsNotNone(s.owner)
def test_fetch_answer_question(self):
s = self.site.answer(ANSWER_ID)
self.assertIsInstance(s.question_id, int)
self.assertIsNotNone(s.question)
def test_fetch_answer_comment(self):
# First try the comments on an answer with lots of comments
# http://stackoverflow.com/a/22389702
s = self.site.answer(22389702)
s.comments.fetch()
first_comment = s.comments[0]
self.assertNotEqual(first_comment, None)
self.assertTrue(first_comment.body)
def test_fetch_question_comment(self):
# Now try a question
# http://stackoverflow.com/a/22342854
s = self.site.question(22342854)
s.comments.fetch()
first_comment = s.comments[0]
self.assertNotEqual(first_comment, None)
self.assertTrue(first_comment.body)
def test_post_revisions(self):
a = self.site.answer(4673436)
a.revisions.fetch()
first_revision = a.revisions[0]
self.assertNotEqual(first_revision, None)
self.assertEqual(first_revision.post_id, a.id)
def test_has_body(self):
q = self.site.question(QUESTION_ID, body=True)
self.assertTrue(hasattr(q, 'body'))
self.assertNotEqual(q.body, None)
a = self.site.answer(ANSWER_ID, body=True)
self.assertTrue(hasattr(a, 'body'))
self.assertNotEqual(a.body, None)
def test_tag_synonyms(self):
syns = self.site.tag_synonyms()
self.assertTrue(len(syns) > 0)
def test_tag_wiki(self):
tag = self.site.tag('javascript')
self.assertEqual(tag.name, 'javascript')
wiki = tag.wiki.fetch()
self.assertTrue(len(wiki.excerpt) > 0)
def test_tag_wiki2(self):
wiki = self.site.tag_wiki('javascript')
self.assertEqual(wiki[0].tag_name, 'javascript')
wiki = self.site.tag_wiki('java;c++;python;android', page=1, pagesize=4)
self.assertEqual(wiki[0].tag_name, 'android')
self.assertEqual(wiki[1].tag_name, 'c++')
self.assertEqual(wiki[2].tag_name, 'java')
self.assertEqual(wiki[3].tag_name, 'python')
def test_tag_related(self):
related = self.site.tag_related('java', page=1, pagesize=40)
names = tuple(tag.name for tag in related[:10])
self.assertIn('android', names)
self.assertIn('swing', names)
def test_badge_name(self):
badge = self.site.badge(name = 'Nice Answer')
self.assertNotEqual(badge, None)
self.assertEqual(badge.name, 'Nice Answer')
def test_badge_id(self):
badge = self.site.badge(23)
self.assertEqual(badge.name, 'Nice Answer')
def test_rep_change(self):
user = self.site.user(41981)
user.reputation_detail.fetch()
recent_change = user.reputation_detail[0]
self.assertNotEqual(recent_change, None)
self.assertEqual(recent_change.user_id, user.id)
def test_timeline(self):
user = self.site.user(41981)
user.timeline.fetch()
event = user.timeline[0]
self.assertNotEqual(event, None)
self.assertEqual(event.user_id, user.id)
def test_top_tag(self):
user = self.site.user(41981)
user.top_answer_tags.fetch()
answer_tag = user.top_answer_tags[0]
self.assertNotEqual(answer_tag, None)
self.assertTrue(answer_tag.answer_count > 0)
user.top_question_tags.fetch()
question_tag = user.top_question_tags[0]
self.assertNotEqual(question_tag, None)
self.assertTrue(question_tag.question_count > 0)
def test_privilege(self):
privileges = self.site.privileges()
self.assertTrue(len(privileges) > 0)
self.assertTrue(privileges[0].reputation > 0)
def test_stackauth_site_types(self):
s = stackauth.StackAuth()
for site in s.sites():
self.assertTrue(site.site_type in (stackauth.SiteType.MainSite, stackauth.SiteType.MetaSite))
def test_stackauth_site_instantiate(self):
for defn in stackauth.StackAuth().sites():
site_ob = defn.get_site(API_KEY)
# Do the same as test_fetch_answer() and hope we don't get an exception
defn.get_site(API_KEY).answer(ANSWER_ID)
# Only do it once!
break
def test_advanced_search(self):
results = self.site.search_advanced(q = 'python')
self.assertTrue(len(results) > 0)
def test_stats(self):
results = self.site.stats()
self.assertTrue(results.total_users > 0)
def test_info_site_defn(self):
result = self.site.info(site = True)
self.assertNotEqual(result.site_definition, None)
self.assertTrue(len(result.site_definition.name) > 0)
def test_badge_recipients(self):
results = self.site.badge_recipients(22)
self.assertTrue(len(results) > 0)
self.assertTrue(hasattr(results[0], 'user'))
self.assertTrue(hasattr(results[0].user, 'id'))
def test_badge_recipients_field(self):
results = self.site.badge(22).recipients
self.assertNotEqual(next(results), None)
def test_accepted_answer(self):
# our favourite test question...
question = self.site.question(4)
self.assertEqual(type(question.accepted_answer), stackexchange.Answer)
self.assertEqual(question.accepted_answer.id, question.accepted_answer_id)
ans = question.accepted_answer
ans.fetch()
self.assertTrue(hasattr(ans, 'score'))
def test_moderators_elected(self):
moderators = self.site.moderators_elected()
self.assertGreater(len(moderators), 0)
self.assertEqual(type(moderators[0]), stackexchange.User)
class PlumbingTests(unittest.TestCase):
def setUp(self):
_setUp(self)
def test_key_ratelimit(self):
# a key was given, so check the rate limit is 10000
if not hasattr(self.site, 'rate_limit'):
self.site.question(QUESTION_ID)
self.assertTrue(self.site.rate_limit[1] == 10000)
def test_site_constants(self):
# SOFU should always be present
self.assertTrue(hasattr(stacksites, 'StackOverflow'))
self.assertTrue(hasattr(stacksites, 'ServerFault'))
self.assertTrue(hasattr(stacksites, 'SuperUser'))
def test_error(self):
try:
self.site.error(401)
except Exception as e:
self.assertEqual(type(e), StackExchangeError)
self.assertEqual(e.code, 401)
else:
self.fail('did not raise exception on error')
def test_vectorise(self):
# check different types
q = self.site.question(QUESTION_ID)
v = self.site.vectorise(('hello', 10, True, False, q), stackexchange.Question)
self.assertEqual(v, 'hello;10;true;false;%d' % QUESTION_ID)
def test_total(self):
r = self.site.search(tagged = 'python', filter = 'total')
self.assertTrue(hasattr(r, 'total'))
self.assertTrue(r.total > 0)
def test_pagesize_independence(self):
# this test is motivated by pull request #37
# a slightly odd choice of tag indeed, but it has a modest but useful
# number of questions and is unlikely to grow very quickly
qs = self.site.questions(tagged = 'dijkstra', pagesize = 37, filter = '!9YdnSQVoS')
total1 = qs.total
count1 = len(list(qs))
self.assertEqual(count1, total1)
qs = self.site.questions(tagged = 'dijkstra', pagesize = 100, filter = '!9YdnSQVoS')
total2 = qs.total
count2 = len(list(qs))
self.assertEqual(count2, total2)
self.assertEqual(count1, count2)
def test_resultset_independence(self):
# repro code for bug #4 (thanks, beaumartinez!)
# Create two different sites.
a = stackexchange.Site('api.askubuntu.com')
b = self.site
# Create two different searches from the different sites.
a_search = a.search(intitle='vim', pagesize=100)
b_search = b.search(intitle='vim', pagesize=100)
# (We demonstrate that the second search has a second page.)
self.assertEqual(len(b_search.fetch_next()), 100)
# Reset the searches.
a_search = a.search(intitle='vim', pagesize=100)
b_search = b.search(intitle='vim', pagesize=100)
# Exhaust the first search.
while len(a_search) > 0:
a_search = a_search.fetch_next()
# Try get the next page of the second search. It will be empty.
# Here's the bug.
self.assertEqual(len(b_search.fetch_next()), 100)
def test_partial(self):
qn = self.site.question(4)
comment = qn.comments.fetch()[0]
owner = comment.owner.fetch()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os.path
import re
import string
# Header for an assembly file.
_ASM_HEADER = """\
; Copyright {year} Google Inc. All Rights Reserved.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
; This file is generated by {basename}, DO NOT MODIFY.
; Regenerate this file by running syzygy/agent/asan/generate_files.bat.
.386
.MODEL FLAT, C
.CODE
; Allow section and label names to begin with a leading period.
OPTION DOTNAME
"""
# Trailer for an assembly file.
_ASM_TRAILER = """\
END
"""
_REDIRECTORS_EXTERN = """\
; Declare the tail function all the stubs direct to.
EXTERN C asan_redirect_tail:PROC
"""
_REDIRECTORS_PROC_HEADER = """\
; Declare a single top-level function to prevent identical code folding from
; folding the redirectors into one. Each redirector simply calls through to
; the tail function. This allows the tail function to trivially compute the
; redirector's address, which is used to identify the invoked redirector.
asan_redirectors PROC
"""
_REDIRECTORS_PROC_TRAILER = """\
asan_redirectors ENDP
"""
# Declares external functions and data required by the probe implementations.
# Args:
# shadow: The name of the variable housing the shadow memory.
_INTERCEPTORS_PREAMBLE = """\
; Declare the global shadow memory array that probes refer to.
EXTERN C {shadow}:FAR
; Declare the string checking helper function.
EXTERN C asan_check_strings_memory_accesses:PROC
; Declare the redirect function.
EXTERN C asan_redirect_stub_entry:PROC
; Declare the error handling funtion.
EXTERN C asan_report_bad_memory_access:PROC
; Declares the symbols that this compiland exports.
PUBLIC asan_no_check
PUBLIC asan_string_no_check
PUBLIC asan_redirect_tail
PUBLIC asan_shadow_references"""
_INTERCEPTORS_SEGMENT_HEADER = """\
; Create a new text segment to house the memory interceptors.
.probes SEGMENT PAGE PUBLIC READ EXECUTE 'CODE'
"""
_INTERCEPTORS_SEGMENT_FOOTER = """\
.probes ENDS
"""
_RDATA_SEGMENT_HEADER = """\
; Start writing to the read-only .rdata segment.
.rdata SEGMENT PAGE PUBLIC READ 'DATA'
"""
_RDATA_SEGMENT_FOOTER = """\
.rdata ENDS
"""
# Snippets relating to shadow memory.
_SHADOW = "asan_memory_interceptors_shadow_memory"
_SHADOW_REFERENCE_TABLE_HEADER = """\
; This is a null-terminated table of pointers to all shadow memory references.
; This is emitted so that the shadow memory pointer may be rewritten at
; runtime by the dynamic RTL.
ALIGN 4
asan_shadow_references LABEL FAR"""
_SHADOW_REFERENCE_TABLE_ENTRY = """\
DWORD shadow_reference_{shadow_index!s} - 4"""
_SHADOW_REFERENCE_TABLE_FOOTER = """\
DWORD 0
"""
# Generates the single-instance assembly stubs.
_INTERCEPTORS_GLOBAL_FUNCTIONS = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function modifies no other registers,
; in particular it saves and restores EFLAGS.
ALIGN 16
asan_no_check PROC
; Restore EDX.
mov edx, DWORD PTR[esp + 4]
; And return.
ret 4
asan_no_check ENDP
; No state is saved for string instructions.
ALIGN 16
asan_string_no_check PROC
; Just return.
ret
asan_string_no_check ENDP
; On entry, the address to check is in EDX and the stack has:
; - previous contents of EDX.
; - return address to original caller.
; - return address to redirection stub.
ALIGN 16
asan_redirect_tail PROC
; Prologue, save context.
pushfd
pushad
; Normalize the string operation direction.
cld
; Compute the address of the calling function and push it.
mov eax, DWORD PTR[esp + 9 * 4]
sub eax, 5 ; Length of call instruction.
push eax
; Push the original caller's address.
push DWORD PTR[esp + 11 * 4]
call asan_redirect_stub_entry
; Clean arguments off the stack.
add esp, 8
; Overwrite access_size with the stub to return to.
mov DWORD PTR[esp + 9 * 4], eax
; Restore context.
popad
popfd
; return to the stashed stub.
ret
asan_redirect_tail ENDP
"""
# Starts by saving EAX onto the stack and then loads the value of
# the flags into it.
#
# This is a trick for efficient saving/restoring part of the flags register.
# See http://blog.freearrow.com/archives/396.
# Flags (bits 16-31) probably need a pipeline flush on update (POPFD). Thus,
# using LAHF/SAHF instead gives better performance.
# PUSHFD/POPFD: 23.314684 ticks
# LAHF/SAHF: 8.838665 ticks
_SAVE_EFLAGS = """\
; Save the EFLAGS.
push eax
lahf
seto al"""
# Restores the flags.
#
# The previous flags value is assumed to be in EAX and we expect to have the
# previous value of EAX on the top of the stack.
# AL is set to 1 if the overflow flag was set before the call to our hook, 0
# otherwise. We add 0x7F to it so it'll restore the flag. Then we restore the
# low bytes of the flags and EAX.
_RESTORE_EFLAGS = """\
; Restore the EFLAGS.
add al, 7Fh
sahf
pop eax"""
_2GB_CHECK = """\
; Divide by 8 to convert the address to a shadow index. This is a signed
; operation so the sign bit will stay positive if the address is above the 2GB
; threshold, and the check will fail.
sar edx, 3
js report_failure_{probe_index}"""
_4GB_CHECK = """\
; Divide by 8 to convert the address to a shadow index. No range check is
; needed as the address space is 4GB.
shr edx, 3"""
# The common part of the fast path shared between the different
# implementations of the hooks.
#
# This does the following:
# - Saves the memory location in EDX for the slow path.
# - Does an address check if neccessary.
# - Checks for zero shadow for this memory location. We use the cmp
# instruction so it'll set the sign flag if the upper bit of the shadow
# value of this memory location is set to 1.
# - If the shadow byte is not equal to zero then it jumps to the slow path.
# - Otherwise it removes the memory location from the top of the stack.
_FAST_PATH = """\
push edx
{range_check}
movzx edx, BYTE PTR[edx + {shadow}]
; This is a label to the previous shadow memory reference. It will be
; referenced by the table at the end of the 'asan_probes' procedure.
shadow_reference_{shadow_index!s} LABEL NEAR
cmp dl, 0
jnz check_access_slow_{probe_index}
add esp, 4"""
# This is the common part of the slow path shared between the different
# implementations of the hooks.
#
# The memory location is expected to be on top of the stack and the shadow
# value for it is assumed to be in DL at this point.
# This also relies on the fact that the shadow non accessible byte mask has
# its upper bit set to 1 and that we jump to this macro after doing a
# "cmp shadow_byte, 0", so the sign flag would be set to 1 if the value isn't
# accessible.
# We inline the Shadow::IsAccessible function for performance reasons.
# This function does the following:
# - Checks if this byte is accessible and jumps to the error path if it's
# not.
# - Removes the memory location from the top of the stack.
_SLOW_PATH = """\
js report_failure_{probe_index}
mov dh, BYTE PTR[esp]
and dh, 7
cmp dh, dl
jae report_failure_{probe_index}
add esp, 4"""
# The error path.
#
# It expects to have the previous value of EDX at [ESP + 4] and the address
# of the faulty instruction at [ESP].
# This macro takes care of saving and restoring the flags.
_ERROR_PATH ="""\
; Restore original value of EDX, and put memory location on stack.
xchg edx, DWORD PTR[esp + 4]
; Create an Asan registers context on the stack.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 12 bytes (e.g. EFLAGS / EIP / Original EDX).
add DWORD PTR[esp + 12], 12
; Push ARG4: the address of Asan context on stack.
push esp
; Push ARG3: the access size.
push {access_size}
; Push ARG2: the access type.
push {access_mode_value}
; Push ARG1: the memory location.
push DWORD PTR[esp + 52]
call asan_report_bad_memory_access
; Remove 4 x ARG on stack.
add esp, 16
; Restore original registers.
popad
popfd
; Return and remove memory location on stack.
ret 4"""
# Collects the above macros and bundles them up in a dictionary so they can be
# easily expanded by the string format functions.
_MACROS = {
"AsanSaveEflags": _SAVE_EFLAGS,
"AsanRestoreEflags": _RESTORE_EFLAGS,
"AsanFastPath": _FAST_PATH,
"AsanSlowPath": _SLOW_PATH,
"AsanErrorPath": _ERROR_PATH,
}
# Generates the Asan check access functions.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of
# access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
_CHECK_FUNCTION = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function modifies no other registers,
; in particular it saves and restores EFLAGS.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} PROC \
; Probe #{probe_index}.
{AsanSaveEflags}
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 8]
{AsanRestoreEflags}
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanRestoreEflags}
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_{mem_model} ; Probe \
#{probe_index}."""
# Generates a variant of the Asan check access functions that don't save
# the flags.
#
# The name of the generated method will be
# asan_check_(@p access_size)_byte_(@p access_mode_str)_no_flags().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of access.
# probe_index: The index of the probe function. Used to mangle internal labels
# so that they are unique to this probes implementation.
# Note: Calling this function may alter the EFLAGS register only.
_CHECK_FUNCTION_NO_FLAGS = """\
; On entry, the address to check is in EDX and the previous contents of
; EDX are on stack. On exit the previous contents of EDX have been restored
; and popped off the stack. This function may modify EFLAGS, but preserves
; all other registers.
ALIGN 16
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} PROC \
; Probe #{probe_index}.
{AsanFastPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
check_access_slow_{probe_index} LABEL NEAR
{AsanSlowPath}
; Restore original EDX.
mov edx, DWORD PTR[esp + 4]
ret 4
report_failure_{probe_index} LABEL NEAR
; Restore memory location in EDX.
pop edx
{AsanErrorPath}
asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} ENDP
"""
# Declare the check access function public label.
_CHECK_FUNCTION_NO_FLAGS_DECL = """\
PUBLIC asan_check_{access_size}_byte_{access_mode_str}_no_flags_{mem_model} \
; Probe #{probe_index}."""
# Generates the Asan memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p access_size)_byte_(@p access_mode_str)(@p suffix)().
#
# Args:
# access_size: The size of the access (in byte).
# access_mode_str: The string representing the access mode (read_access
# or write_access).
# access_mode_value: The internal value representing this kind of
# access.
# suffix: The suffix - if any - for this function name
_REDIRECT_FUNCTION = """\
asan_redirect_{access_size}_byte_{access_mode_str}{suffix} LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect_{access_size}_byte_{access_mode_str}{suffix}"""
# Generates the Asan check access functions for a string instruction.
#
# The name of the generated method will be
# asan_check_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_CHECK_STRINGS = """\
ALIGN 16
asan_check{prefix}{access_size}_byte_{func}_access PROC ; Probe #{probe_index}.
; Prologue, save context.
pushfd
pushad
; Fix the original value of ESP in the Asan registers context.
; Removing 8 bytes (e.g.EFLAGS / EIP was on stack).
add DWORD PTR[esp + 12], 8
; Setup increment in EBX (depends on direction flag in EFLAGS).
mov ebx, {access_size}
pushfd
pop eax
test eax, 400h
jz skip_neg_direction_{probe_index}
neg ebx
skip_neg_direction_{probe_index} LABEL NEAR
; By standard calling convention, direction flag must be forward.
cld
; Push ARG(context), the Asan registers context.
push esp
; Push ARG(compare), shortcut when memory contents differ.
push {compare}
; Push ARG(increment), increment for EDI/EDI.
push ebx
; Push ARG(access_size), the access size.
push {access_size}
; Push ARG(length), the number of memory accesses.
push {counter}
; Push ARG(src_access_mode), source access type.
push {src_mode}
; Push ARG(src), the source pointer.
push esi
; Push ARG(dst_access_mode), destination access type.
push {dst_mode}
; Push ARG(dst), the destination pointer.
push edi
; Call the generic check strings function.
call asan_check_strings_memory_accesses
add esp, 36
; Epilogue, restore context.
popad
popfd
ret
asan_check{prefix}{access_size}_byte_{func}_access ENDP
"""
# Declare the string checking probe public label.
_CHECK_STRINGS_DECL = """\
PUBLIC asan_check{prefix}{access_size}_byte_{func}_access ; Probe \
#{probe_index}."""
# Generates the Asan string memory accessor redirector stubs.
#
# The name of the generated method will be
# asan_redirect_(@p prefix)(@p access_size)_byte_(@p inst)_access().
#
# Args:
# inst: The instruction mnemonic.
# prefix: The prefix of the instruction (repz or nothing).
# counter: The number of times the instruction must be executed (ECX).
# It may be a register or a constant.
# dst:_mode The memory access mode for destination (EDI).
# src:_mode The memory access mode for destination (ESI).
# access:_size The size of the access (in byte).
# compare: A flag to enable shortcut execution by comparing memory
# contents.
_STRING_REDIRECT_FUNCTION = """\
asan_redirect{prefix}{access_size}_byte_{func}_access LABEL PROC
call asan_redirect_tail"""
# Declare the public label.
_STRING_REDIRECT_FUNCTION_DECL = """\
PUBLIC asan_redirect{prefix}{access_size}_byte_{func}_access"""
class MacroAssembler(string.Formatter):
"""A formatter specialization to inject the AsanXXX macros and make
them easier to use."""
def parse(self, str):
"""Override to trim whitespace on empty trailing line."""
for (lit, fld, fmt, conv) in super(MacroAssembler, self).parse(str):
# Strip trailing whitespace from the previous literal to allow natural
# use of AsanXXX macros.
m = re.match('^(.*\n)( +)$', lit)
if m:
lit = m.group(0)
yield((lit, fld, fmt, conv))
def get_value(self, key, args, kwargs):
"""Override to inject macro definitions."""
if key in _MACROS:
macro = _MACROS[key].format(*args, **kwargs)
# Trim leading whitespace to allow natural use of AsanXXX macros.
macro = macro.lstrip()
return macro
return super(MacroAssembler, self).get_value(key, args, kwargs)
# Access sizes for the memory accessors generated.
_ACCESS_SIZES = (1, 2, 4, 8, 10, 16, 32)
# These values must correspond to those defined in the agent::asan::AccessMode
# enum. See syzygy/agent/asan/error_info.h.
_ASAN_READ_ACCESS = 0
_ASAN_WRITE_ACCESS = 1
_ASAN_UNKNOWN_ACCESS = 2
# Access modes for the memory accessors generated.
_ACCESS_MODES = [
('read_access', _ASAN_READ_ACCESS),
('write_access', _ASAN_WRITE_ACCESS),
]
# Memory models for the generated accessors, and the associated address range
# checks to insert.
_MEMORY_MODELS = [
('2gb', _2GB_CHECK.lstrip()),
('4gb', _4GB_CHECK.lstrip()),
]
# The string accessors generated.
_STRING_ACCESSORS = [
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("cmps", "_repz_", "ecx", _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 4, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 2, 1),
("cmps", "_", 1, _ASAN_READ_ACCESS, _ASAN_READ_ACCESS, 1, 1),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 4, 0),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 2, 0),
("movs", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 1, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 4, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 2, 0),
("movs", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_READ_ACCESS, 1, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 4, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 2, 0),
("stos", "_repz_", "ecx", _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 1, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 4, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 2, 0),
("stos", "_", 1, _ASAN_WRITE_ACCESS, _ASAN_UNKNOWN_ACCESS, 1, 0),
]
class ToStringCounter(object):
"""A helper class that counts how often it is converted to a string."""
def __init__(self, count=0):
self._count = count
def __str__(self):
self._count += 1
return str(self._count - 1)
def count(self):
return self._count
def _IterateOverInterceptors(parts,
formatter,
format,
format_no_flags,
probe_index=0,
shadow_index=0):
"""Helper for _GenerateInterceptorsAsmFile."""
f = formatter
# This variable hides a counter which automatically increments for every
# reference made to it. This allows the probes to use arbitrarily many
# references to the shadow memory and the generator will implicitly track
# these and emit a table entry per reference.
#
# For this mechanism to work reliably all references to 'shadow_index' in the
# formatting strings must be specified using '{shadow_index!s}'. This
# guarantees that the __str__ method of the ToStringCounter instance will be
# called.
shadow_index = ToStringCounter(shadow_index)
for mem_model, range_check in _MEMORY_MODELS:
# Iterate over the probes that have flags.
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
formatted_range_check = f.format(range_check, probe_index=probe_index)
parts.append(f.format(format,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
mem_model=mem_model,
probe_index=probe_index,
range_check=formatted_range_check,
shadow=_SHADOW,
shadow_index=shadow_index))
probe_index += 1
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
formatted_range_check = f.format(range_check, probe_index=probe_index)
parts.append(f.format(format_no_flags,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
mem_model=mem_model,
probe_index=probe_index,
range_check=formatted_range_check,
shadow=_SHADOW,
shadow_index=shadow_index))
probe_index += 1
# Return the probe and shadow memory reference counts.
return (probe_index, shadow_index.count())
def _IterateOverStringInterceptors(parts, formatter, format, probe_index=0):
"""Helper for _GenerateInterceptorsAsmFile."""
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(formatter.format(format,
access_size=size,
compare=compare,
counter=c,
dst_mode=dst_mode,
func=fn,
prefix=p,
probe_index=probe_index,
src_mode=src_mode))
probe_index += 1
return probe_index
def _GenerateInterceptorsAsmFile():
f = MacroAssembler()
parts = [f.format(_ASM_HEADER,
basename=os.path.basename(__file__),
year=datetime.datetime.now().year)]
parts.append(f.format(_INTERCEPTORS_PREAMBLE, shadow=_SHADOW))
probe_index = 0
shadow_index = 0
# Generate the block of public label declarations.
(probe_index, shadow_index) = _IterateOverInterceptors(parts, f,
_CHECK_FUNCTION_DECL, _CHECK_FUNCTION_NO_FLAGS_DECL,
probe_index=probe_index, shadow_index=shadow_index)
probe_index = _IterateOverStringInterceptors(parts, f, _CHECK_STRINGS_DECL,
probe_index=probe_index)
parts.append('')
# Place all of the probe functions in a custom segment.
parts.append(f.format(_INTERCEPTORS_SEGMENT_HEADER))
# Generate the single-instance functions.
parts.append(f.format(_INTERCEPTORS_GLOBAL_FUNCTIONS))
# TODO(siggi): Think about the best way to allow the stubs to communicate
# their own and their alternative identities to the bottleneck function.
# A particularly nice way is to generate an array of N-tuples that can
# be used when patching up IATs, where the redirector and the
# alternatives consume a row each. Passing in the array entry to the
# bottleneck is then the nicest, but the easiest is probably to pass in
# the redirector function itself...
# Reset the probe and shadow indices.
probe_index = 0
shadow_index = 0
# Output the actual interceptors themselves
(probe_index, shadow_index) = _IterateOverInterceptors(parts, f,
_CHECK_FUNCTION, _CHECK_FUNCTION_NO_FLAGS, probe_index=probe_index,
shadow_index=shadow_index)
# Generate string operation accessors.
probe_index = _IterateOverStringInterceptors(parts, f, _CHECK_STRINGS,
probe_index=probe_index)
# Close the custom segment housing the probges.
parts.append(f.format(_INTERCEPTORS_SEGMENT_FOOTER))
# Output the table of shadow references to .rdata.
parts.append(f.format(_RDATA_SEGMENT_HEADER))
parts.append(f.format(_SHADOW_REFERENCE_TABLE_HEADER))
for i in range(0, shadow_index):
parts.append(f.format(_SHADOW_REFERENCE_TABLE_ENTRY, shadow_index=i))
parts.append(_SHADOW_REFERENCE_TABLE_FOOTER)
parts.append(f.format(_RDATA_SEGMENT_FOOTER))
parts.append(f.format(_ASM_TRAILER))
return parts
def _GenerateRedirectorsAsmFile():
f = MacroAssembler()
parts = [f.format(_ASM_HEADER,
basename=os.path.basename(__file__),
year=datetime.datetime.now().year)]
parts.append(f.format(_REDIRECTORS_EXTERN))
# Declare the memory accessor redirectors.
for suffix in ("", "_no_flags"):
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
parts.append(f.format(_REDIRECT_FUNCTION_DECL,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
suffix=suffix))
# Declare string operation redirectors.
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(f.format(_STRING_REDIRECT_FUNCTION_DECL,
func=fn,
prefix=p,
counter=c,
dst_mode=dst_mode,
src_mode=src_mode,
access_size=size,
compare=compare))
parts.append(f.format(_REDIRECTORS_PROC_HEADER))
# Generate the memory accessor redirectors.
for suffix in ("", "_no_flags"):
for access_size in _ACCESS_SIZES:
for access, access_name in _ACCESS_MODES:
parts.append(f.format(_REDIRECT_FUNCTION,
access_size=access_size,
access_mode_str=access,
access_mode_value=access_name,
suffix=suffix))
# Generate string operation redirectors.
for (fn, p, c, dst_mode, src_mode, size, compare) in _STRING_ACCESSORS:
parts.append(f.format(_STRING_REDIRECT_FUNCTION,
func=fn,
prefix=p,
counter=c,
dst_mode=dst_mode,
src_mode=src_mode,
access_size=size,
compare=compare))
parts.append(f.format(_REDIRECTORS_PROC_TRAILER))
parts.append(f.format(_ASM_TRAILER))
return parts
def _WriteFile(file_name, parts):
contents = '\n'.join(parts)
dir = os.path.dirname(__file__)
with open(os.path.join(dir, file_name), "wb") as f:
f.write(contents)
def main():
interceptors_asm = _GenerateInterceptorsAsmFile()
redirectors_asm = _GenerateRedirectorsAsmFile()
_WriteFile('gen/memory_interceptors_impl.asm', interceptors_asm)
_WriteFile('gen/memory_redirectors.asm', redirectors_asm)
if __name__ == '__main__':
main()
|
|
import py
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC, WORD
from rpython.memory.gc.incminimark import GCFLAG_VISITED
from test_direct import BaseDirectGCTest
T = lltype.GcForwardReference()
T.become(lltype.GcStruct('pinning_test_struct2',
('someInt', lltype.Signed)))
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('pinning_test_struct1',
('someInt', lltype.Signed),
('next', lltype.Ptr(T)),
('data', lltype.Ptr(T))))
class PinningGCTest(BaseDirectGCTest):
def setup_method(self, meth):
BaseDirectGCTest.setup_method(self, meth)
max = getattr(meth, 'max_number_of_pinned_objects', 20)
self.gc.max_number_of_pinned_objects = max
if not hasattr(self.gc, 'minor_collection'):
self.gc.minor_collection = self.gc._minor_collection
def test_pin_can_move(self):
# even a pinned object is considered to be movable. Only the caller
# of pin() knows if it is currently movable or not.
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.can_move(adr)
assert self.gc.pin(adr)
assert self.gc.can_move(adr)
def test_pin_twice(self):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
assert not self.gc.pin(adr)
def test_unpin_not_pinned(self):
# this test checks a requirement of the unpin() interface
ptr = self.malloc(S)
py.test.raises(Exception,
self.gc.unpin, llmemory.cast_ptr_to_adr(ptr))
def test__is_pinned(self):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
assert not self.gc._is_pinned(adr)
assert self.gc.pin(adr)
assert self.gc._is_pinned(adr)
self.gc.unpin(adr)
assert not self.gc._is_pinned(adr)
def test_prebuilt_not_pinnable(self):
ptr = lltype.malloc(T, immortal=True)
self.consider_constant(ptr)
assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
self.gc.collect()
assert not self.gc.pin(llmemory.cast_ptr_to_adr(ptr))
# XXX test with multiple mallocs, and only part of them is pinned
def test_random(self):
# scenario: create bunch of objects. randomly pin, unpin, add to
# stackroots and remove from stackroots.
import random
for i in xrange(10**3):
obj = self.malloc(T)
obj.someInt = 100
#
if random.random() < 0.5:
self.stackroots.append(obj)
print("+stack")
if random.random() < 0.5:
self.gc.pin(llmemory.cast_ptr_to_adr(obj))
print("+pin")
self.gc.debug_gc_step(random.randint(1, 4))
for o in self.stackroots[:]:
assert o.someInt == 100
o_adr = llmemory.cast_ptr_to_adr(o)
if random.random() < 0.1 and self.gc._is_pinned(o_adr):
print("-pin")
self.gc.unpin(o_adr)
if random.random() < 0.1:
print("-stack")
self.stackroots.remove(o)
class TestIncminimark(PinningGCTest):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC as GCClass
from rpython.memory.gc.incminimark import STATE_SCANNING, STATE_MARKING
def test_try_pin_gcref_containing_type(self):
# scenario: incminimark's object pinning can't pin objects that may
# contain GC pointers
obj = self.malloc(S)
assert not self.gc.pin(llmemory.cast_ptr_to_adr(obj))
def test_pin_old(self):
# scenario: try pinning an old object. This should be not possible and
# we want to make sure everything stays as it is.
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # test assumption
self.gc.collect()
old_ptr = self.stackroots[0]
# now we try to pin it
old_adr = llmemory.cast_ptr_to_adr(old_ptr)
assert not self.gc.is_in_nursery(old_adr)
assert not self.gc.pin(old_adr)
assert self.gc.pinned_objects_in_nursery == 0
def pin_pin_pinned_object_count(self, collect_func):
# scenario: pin two objects that are referenced from stackroots. Check
# if the pinned objects count is correct, even after an other collection
pinned1_ptr = self.malloc(T)
pinned1_ptr.someInt = 100
self.stackroots.append(pinned1_ptr)
#
pinned2_ptr = self.malloc(T)
pinned2_ptr.someInt = 200
self.stackroots.append(pinned2_ptr)
#
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned1_ptr))
assert self.gc.pinned_objects_in_nursery == 1
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned2_ptr))
assert self.gc.pinned_objects_in_nursery == 2
#
collect_func()
#
assert self.gc.pinned_objects_in_nursery == 2
def test_pin_pin_pinned_object_count_minor_collection(self):
self.pin_pin_pinned_object_count(self.gc.minor_collection)
def test_pin_pin_pinned_object_count_major_collection(self):
self.pin_pin_pinned_object_count(self.gc.collect)
def pin_unpin_pinned_object_count(self, collect_func):
# scenario: pin an object and check the pinned object count. Unpin it
# and check the count again.
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.stackroots.append(pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
#
assert self.gc.pinned_objects_in_nursery == 0
assert self.gc.pin(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 1
collect_func()
assert self.gc.pinned_objects_in_nursery == 1
self.gc.unpin(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 0
collect_func()
assert self.gc.pinned_objects_in_nursery == 0
def test_pin_unpin_pinned_object_count_minor_collection(self):
self.pin_unpin_pinned_object_count(self.gc.minor_collection)
def test_pin_unpin_pinned_object_count_major_collection(self):
self.pin_unpin_pinned_object_count(self.gc.collect)
def pinned_obj_in_stackroot(self, collect_func):
# scenario: a pinned object that is part of the stack roots. Check if
# it is not moved
#
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.is_in_nursery(adr) # to be sure
assert self.gc.pin(adr)
#
# the object shouldn't move from now on
collect_func()
#
# check if it is still at the same location as expected
adr_after_collect = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert self.gc.is_in_nursery(adr_after_collect)
assert adr == adr_after_collect
assert self.gc._is_pinned(adr)
assert ptr.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
def test_pinned_obj_in_stackroot_minor_collection(self):
self.pinned_obj_in_stackroot(self.gc.minor_collection)
def test_pinned_obj_in_stackroot_full_major_collection(self):
self.pinned_obj_in_stackroot(self.gc.collect)
def test_pinned_obj_in_stackroots_stepwise_major_collection(self):
# scenario: same as for 'pinned_obj_in_stackroot' with minor change
# that we do stepwise major collection and check in each step for
# a correct state
#
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.is_in_nursery(adr)
assert self.gc.pin(adr)
#
# the object shouldn't move from now on. Do a full round of major
# steps and check each time for correct state
#
# check that we start at the expected point
assert self.gc.gc_state == self.STATE_SCANNING
done = False
while not done:
self.gc.debug_gc_step()
# check that the pinned object didn't move
ptr_after_collection = self.stackroots[0]
adr_after_collection = llmemory.cast_ptr_to_adr(ptr_after_collection)
assert self.gc.is_in_nursery(adr_after_collection)
assert adr == adr_after_collection
assert self.gc._is_pinned(adr)
assert ptr.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
# as the object is referenced from the stackroots, the gc internal
# 'old_objects_pointing_to_pinned' should be empty
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
#
# break condition
done = self.gc.gc_state == self.STATE_SCANNING
def pin_unpin_moved_stackroot(self, collect_func):
# scenario: test if the pinned object is moved after being unpinned.
# the second part of the scenario is the tested one. The first part
# is already tests by other tests.
ptr = self.malloc(T)
ptr.someInt = 100
self.stackroots.append(ptr)
assert self.stackroots[0] == ptr # validate our assumption
adr = llmemory.cast_ptr_to_adr(ptr)
assert self.gc.pin(adr)
collect_func()
#
# from here on the test really starts. previouse logic is already tested
#
self.gc.unpin(adr)
assert not self.gc._is_pinned(adr)
assert self.gc.is_in_nursery(adr)
#
# now we do another collection and the object should be moved out of
# the nursery.
collect_func()
new_adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(new_adr)
assert self.stackroots[0].someInt == 100
with py.test.raises(RuntimeError) as exinfo:
ptr.someInt = 200
assert "freed" in str(exinfo.value)
def test_pin_unpin_moved_stackroot_minor_collection(self):
self.pin_unpin_moved_stackroot(self.gc.minor_collection)
def test_pin_unpin_moved_stackroot_major_collection(self):
self.pin_unpin_moved_stackroot(self.gc.collect)
def pin_referenced_from_old(self, collect_func):
# scenario: an old object points to a pinned one. Check if the pinned
# object is correctly kept in the nursery and not moved.
#
# create old object
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # validate our assumption
collect_func() # make it old: move it out of the nursery
old_ptr = self.stackroots[0]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
#
# create young pinned one and let the old one reference the young one
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert old_ptr.next.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
#
# do a collection run and make sure the pinned one didn't move
collect_func()
assert old_ptr.next.someInt == pinned_ptr.someInt == 100
assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
assert self.gc.is_in_nursery(pinned_adr)
def test_pin_referenced_from_old_minor_collection(self):
self.pin_referenced_from_old(self.gc.minor_collection)
def test_pin_referenced_from_old_major_collection(self):
self.pin_referenced_from_old(self.gc.collect)
def test_pin_referenced_from_old_stepwise_major_collection(self):
# scenario: same as in 'pin_referenced_from_old'. However,
# this time we do a major collection step by step and check
# between steps that the states are as expected.
#
# create old object
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # validate our assumption
self.gc.minor_collection() # make it old: move it out of the nursery
old_ptr = self.stackroots[0]
old_adr = llmemory.cast_ptr_to_adr(old_ptr)
assert not self.gc.is_in_nursery(old_adr)
#
# create young pinned one and let the old one reference the young one
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert old_ptr.next.someInt == 100
assert self.gc.pinned_objects_in_nursery == 1
#
# stepwise major collection with validation between steps
# check that we start at the expected point
assert self.gc.gc_state == self.STATE_SCANNING
done = False
while not done:
self.gc.debug_gc_step()
#
# make sure pinned object didn't move
assert old_ptr.next.someInt == pinned_ptr.someInt == 100
assert llmemory.cast_ptr_to_adr(old_ptr.next) == pinned_adr
assert self.gc.is_in_nursery(pinned_adr)
assert self.gc.pinned_objects_in_nursery == 1
#
# validate that the old object is part of the internal list
# 'old_objects_pointing_to_pinned' as expected.
should_be_old_adr = self.gc.old_objects_pointing_to_pinned.pop()
assert should_be_old_adr == old_adr
self.gc.old_objects_pointing_to_pinned.append(should_be_old_adr)
#
# break condition
done = self.gc.gc_state == self.STATE_SCANNING
def pin_referenced_from_old_remove_ref(self, collect_func):
# scenario: an old object points to a pinned one. We remove the
# reference from the old one. So nothing points to the pinned object.
# After this the pinned object should be collected (it's dead).
#
# Create the objects and get them to our initial state (this is not
# tested here, should be already tested by other tests)
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
assert self.stackroots[0] == old_ptr # check assumption
collect_func() # make it old
old_ptr = self.stackroots[0]
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
#
collect_func()
# from here on we have our initial state for this test.
#
# first check some basic assumptions.
assert self.gc.is_in_nursery(pinned_adr)
assert self.gc._is_pinned(pinned_adr)
# remove the reference
self.write(old_ptr, 'next', lltype.nullptr(T))
# from now on the pinned object is dead. Do a collection and make sure
# old object still there and the pinned one is gone.
collect_func()
assert self.stackroots[0].someInt == 900
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
with py.test.raises(RuntimeError) as exinfo:
pinned_ptr.someInt = 200
assert "freed" in str(exinfo.value)
def test_pin_referenced_from_old_remove_ref_minor_collection(self):
self.pin_referenced_from_old_remove_ref(self.gc.minor_collection)
def test_pin_referenced_from_old_remove_ref_major_collection(self):
self.pin_referenced_from_old_remove_ref(self.gc.collect)
def pin_referenced_from_old_remove_old(self, collect_func):
# scenario: an old object referenced a pinned object. After removing
# the stackroot reference to the old object, bot objects (old and pinned)
# must be collected.
# This test is important as we expect not reachable pinned objects to
# be collected. At the same time we have an internal list of objects
# pointing to pinned ones and we must make sure that because of it the
# old/pinned object survive.
#
# create the objects and get them to the initial state for this test.
# Everything on the way to the initial state should be covered by
# other tests.
old_ptr = self.malloc(S)
old_ptr.someInt = 900
self.stackroots.append(old_ptr)
collect_func()
old_ptr = self.stackroots[0]
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(old_ptr, 'next', pinned_ptr)
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
#
collect_func()
#
# now we have our initial state: old object referenced from stackroots.
# Old object referencing a young pinned one. Next step is to make some
# basic checks that we got the expected state.
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old_ptr))
assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(pinned_ptr))
assert pinned_ptr == old_ptr.next
#
# now we remove the old object from the stackroots...
self.stackroots.remove(old_ptr)
# ... and do a major collection (otherwise the old object wouldn't be
# gone).
self.gc.collect()
# check that both objects are gone
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
with py.test.raises(RuntimeError) as exinfo_old:
old_ptr.someInt = 800
assert "freed" in str(exinfo_old.value)
#
with py.test.raises(RuntimeError) as exinfo_pinned:
pinned_ptr.someInt = 200
assert "freed" in str(exinfo_pinned.value)
def test_pin_referenced_from_old_remove_old_minor_collection(self):
self.pin_referenced_from_old_remove_old(self.gc.minor_collection)
def test_pin_referenced_from_old_remove_old_major_collection(self):
self.pin_referenced_from_old_remove_old(self.gc.collect)
def pin_referenced_from_young_in_stackroots(self, collect_func):
# scenario: a young object is referenced from the stackroots. This
# young object points to a young pinned object. We check if everything
# behaves as expected after a collection: the young object is moved out
# of the nursery while the pinned one stays where it is.
#
root_ptr = self.malloc(S)
root_ptr.someInt = 900
self.stackroots.append(root_ptr)
assert self.stackroots[0] == root_ptr # validate assumption
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(root_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
# check both are in nursery
assert self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
assert self.gc.is_in_nursery(pinned_adr)
#
# no old object yet pointing to a pinned one
assert not self.gc.old_objects_pointing_to_pinned.non_empty()
#
# now we do a collection and check if the result is as expected
collect_func()
#
# check if objects are where we expect them
root_ptr = self.stackroots[0]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(root_ptr))
assert self.gc.is_in_nursery(pinned_adr)
# and as 'root_ptr' object is now old, it should be tracked specially
should_be_root_adr = self.gc.old_objects_pointing_to_pinned.pop()
assert should_be_root_adr == llmemory.cast_ptr_to_adr(root_ptr)
self.gc.old_objects_pointing_to_pinned.append(should_be_root_adr)
# check that old object still points to the pinned one as expected
assert root_ptr.next == pinned_ptr
def test_pin_referenced_from_young_in_stackroots_minor_collection(self):
self.pin_referenced_from_young_in_stackroots(self.gc.minor_collection)
def test_pin_referenced_from_young_in_stackroots_major_collection(self):
self.pin_referenced_from_young_in_stackroots(self.gc.collect)
def pin_referenced_from_prebuilt(self, collect_func):
# scenario: a prebuilt object points to a pinned object. Check if the
# pinned object doesn't move and is still accessible.
#
prebuilt_ptr = lltype.malloc(S, immortal=True)
prebuilt_ptr.someInt = 900
self.consider_constant(prebuilt_ptr)
prebuilt_adr = llmemory.cast_ptr_to_adr(prebuilt_ptr)
collect_func()
#
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
self.write(prebuilt_ptr, 'next', pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
#
# check if everything is as expected
assert not self.gc.is_in_nursery(prebuilt_adr)
assert self.gc.is_in_nursery(pinned_adr)
assert pinned_ptr == prebuilt_ptr.next
assert pinned_ptr.someInt == 100
#
# do a collection and check again
collect_func()
assert self.gc.is_in_nursery(pinned_adr)
assert pinned_ptr == prebuilt_ptr.next
assert pinned_ptr.someInt == 100
def test_pin_referenced_from_prebuilt_minor_collection(self):
self.pin_referenced_from_prebuilt(self.gc.minor_collection)
def test_pin_referenced_from_prebuilt_major_collection(self):
self.pin_referenced_from_prebuilt(self.gc.collect)
def test_old_objects_pointing_to_pinned_not_exploading(self):
# scenario: two old object, each pointing twice to a pinned object.
# The internal 'old_objects_pointing_to_pinned' should contain
# always two objects.
# In previous implementation the list exploded (grew with every minor
# collection), hence this test.
old1_ptr = self.malloc(S)
old1_ptr.someInt = 900
self.stackroots.append(old1_ptr)
old2_ptr = self.malloc(S)
old2_ptr.someInt = 800
self.stackroots.append(old2_ptr)
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 100
assert self.gc.pin(llmemory.cast_ptr_to_adr(pinned_ptr))
self.write(old1_ptr, 'next', pinned_ptr)
self.write(old1_ptr, 'data', pinned_ptr)
self.write(old2_ptr, 'next', pinned_ptr)
self.write(old2_ptr, 'data', pinned_ptr)
self.gc.collect()
old1_ptr = self.stackroots[0]
old2_ptr = self.stackroots[1]
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old1_ptr))
assert not self.gc.is_in_nursery(llmemory.cast_ptr_to_adr(old2_ptr))
# do multiple rounds to make sure
for _ in range(10):
assert self.gc.old_objects_pointing_to_pinned.length() == 2
self.gc.debug_gc_step()
def pin_shadow_1(self, collect_func):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
assert self.gc.pin(adr)
self.gc.id(ptr) # allocate shadow
collect_func()
assert self.gc.is_in_nursery(adr)
assert ptr.someInt == 100
self.gc.unpin(adr)
collect_func() # move to shadow
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
def test_pin_shadow_1_minor_collection(self):
self.pin_shadow_1(self.gc.minor_collection)
def test_pin_shadow_1_major_collection(self):
self.pin_shadow_1(self.gc.collect)
def test_malloc_different_types(self):
# scenario: malloc two objects of different type and pin them. Do a
# minor and major collection in between. This test showed a bug that was
# present in a previous implementation of pinning.
obj1 = self.malloc(T)
self.stackroots.append(obj1)
assert self.gc.pin(llmemory.cast_ptr_to_adr(obj1))
#
self.gc.collect()
#
obj2 = self.malloc(T)
self.stackroots.append(obj2)
assert self.gc.pin(llmemory.cast_ptr_to_adr(obj2))
def test_objects_to_trace_bug(self):
# scenario: In a previous implementation there was a bug because of a
# dead pointer inside 'objects_to_trace'. This was caused by the first
# major collection step that added the pointer to the list and right
# after the collection step the object is unpinned and freed by the minor
# collection, leaving a dead pointer in the list.
pinned_ptr = self.malloc(T)
pinned_ptr.someInt = 101
self.stackroots.append(pinned_ptr)
pinned_adr = llmemory.cast_ptr_to_adr(pinned_ptr)
assert self.gc.pin(pinned_adr)
self.gc.debug_gc_step()
self.gc.unpin(pinned_adr)
self.gc.debug_gc_step()
def pin_shadow_2(self, collect_func):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
ptr.someInt = 100
assert self.gc.pin(adr)
self.gc.identityhash(ptr) # allocate shadow
collect_func()
assert self.gc.is_in_nursery(adr)
assert ptr.someInt == 100
self.gc.unpin(adr)
collect_func() # move to shadow
adr = llmemory.cast_ptr_to_adr(self.stackroots[0])
assert not self.gc.is_in_nursery(adr)
def test_pin_shadow_2_minor_collection(self):
self.pin_shadow_2(self.gc.minor_collection)
def test_pin_shadow_2_major_collection(self):
self.pin_shadow_2(self.gc.collect)
def test_pin_nursery_top_scenario1(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: no minor collection happened, only three mallocs
# and pins
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# nursery_free -+ |
# nursery_top -+
#
assert adr3 < self.gc.nursery_free
assert self.gc.nursery_free < self.gc.nursery_top
def test_pin_nursery_top_scenario2(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after first GC minor collection
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^
# |
# +- nursery_free
# +- nursery_top
#
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery_top
assert self.gc.nursery_top == self.gc.nursery
assert self.gc.nursery_top < adr3
def test_pin_nursery_top_scenario3(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after unpinning first object and a minor
# collection
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | empty | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# | +- nursery_top
# +- nursery_free
#
self.gc.unpin(adr1)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_top > self.gc.nursery_free
assert self.gc.nursery_top < adr2
def test_pin_nursery_top_scenario4(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: after unpinning first & second object and a minor
# collection
#
# +- nursery
# |
# v
# +-----------------+--------+---------------------...---+
# | empty | pinned | empty |
# +-----------------+--------+---------------------...---+
# ^ ^
# | |
# | +- nursery_top
# +- nursery_free
#
self.gc.unpin(adr1)
self.gc.unpin(adr2)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_free < self.gc.nursery_top
assert self.gc.nursery_top < adr3
def test_pin_nursery_top_scenario5(self):
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.stackroots.append(ptr1)
assert self.gc.pin(adr1)
ptr2 = self.malloc(T)
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr2.someInt = 102
self.stackroots.append(ptr2)
assert self.gc.pin(adr2)
ptr3 = self.malloc(T)
adr3 = llmemory.cast_ptr_to_adr(ptr3)
ptr3.someInt = 103
self.stackroots.append(ptr3)
assert self.gc.pin(adr3)
# scenario: no minor collection happened, only three mallocs
# and pins
#
# +- nursery
# |
# v
# +--------+--------+--------+---------------------...---+
# | pinned | pinned | pinned | empty |
# +--------+--------+--------+---------------------...---+
# ^ ^
# | |
# nursery_free -+ |
# nursery_top -+
#
assert adr3 < self.gc.nursery_free
assert self.gc.nursery_free < self.gc.nursery_top
# scenario: unpin everything and minor collection
#
# +- nursery
# |
# v
# +----------------------------------+-------------...---+
# | reset arena | empty (not reset) |
# +----------------------------------+-------------...---+
# ^ ^
# | |
# +- nursery_free |
# nursery_top -+
#
self.gc.unpin(adr1)
self.gc.unpin(adr2)
self.gc.unpin(adr3)
self.gc.collect()
assert self.gc.nursery_free == self.gc.nursery
assert self.gc.nursery_top > self.gc.nursery_free
def fill_nursery_with_pinned_objects(self):
typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
for instance_nr in xrange(object_mallocs):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
def test_full_pinned_nursery_pin_fail(self):
self.fill_nursery_with_pinned_objects()
# nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
py.test.raises(Exception, self.malloc, T)
def test_full_pinned_nursery_arena_reset(self):
# there were some bugs regarding the 'arena_reset()' calls at
# the end of the minor collection. This test brought them to light.
self.fill_nursery_with_pinned_objects()
self.gc.collect()
def test_pinning_limit(self):
assert self.gc.max_number_of_pinned_objects == 5
for instance_nr in xrange(self.gc.max_number_of_pinned_objects):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
assert self.gc.pin(adr)
#
# now we reached the maximum amount of pinned objects
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
self.stackroots.append(ptr)
assert not self.gc.pin(adr)
test_pinning_limit.max_number_of_pinned_objects = 5
def test_full_pinned_nursery_pin_fail(self):
typeid = self.get_type_id(T)
size = self.gc.fixed_size(typeid) + self.gc.gcheaderbuilder.size_gc_header
raw_size = llmemory.raw_malloc_usage(size)
object_mallocs = self.gc.nursery_size // raw_size
# just to be sure we do not run into the limit as we test not the limiter
# but rather the case of a nursery full with pinned objects.
assert object_mallocs < self.gc.max_number_of_pinned_objects
for instance_nr in xrange(object_mallocs):
ptr = self.malloc(T)
adr = llmemory.cast_ptr_to_adr(ptr)
ptr.someInt = 100 + instance_nr
self.stackroots.append(ptr)
self.gc.pin(adr)
#
# nursery should be full now, at least no space for another `T`.
# Next malloc should fail.
py.test.raises(Exception, self.malloc, T)
test_full_pinned_nursery_pin_fail.max_number_of_pinned_objects = 50
def test_pin_bug1(self):
#
# * the nursery contains a pinned object 'ptr1'
#
# * outside the nursery is another object 'ptr2' pointing to 'ptr1'
#
# * during one incremental tracing step, we see 'ptr2' but don't
# trace 'ptr1' right now: it is left behind on the trace-me-later
# list
#
# * then we run the program, unpin 'ptr1', and remove it from 'ptr2'
#
# * at the next minor collection, we free 'ptr1' because we don't
# find anything pointing to it (it is removed from 'ptr2'),
# but 'ptr1' is still in the trace-me-later list
#
# * the trace-me-later list is deep enough that 'ptr1' is not
# seen right now! it is only seen at some later minor collection
#
# * at that later point, crash, because 'ptr1' in the nursery was
# overwritten
#
ptr2 = self.malloc(S)
ptr2.someInt = 102
self.stackroots.append(ptr2)
self.gc.collect()
ptr2 = self.stackroots[-1] # now outside the nursery
adr2 = llmemory.cast_ptr_to_adr(ptr2)
ptr1 = self.malloc(T)
adr1 = llmemory.cast_ptr_to_adr(ptr1)
ptr1.someInt = 101
self.write(ptr2, 'data', ptr1)
res = self.gc.pin(adr1)
assert res
self.gc.minor_collection()
assert self.gc.gc_state == self.STATE_SCANNING
self.gc.major_collection_step()
assert self.gc.objects_to_trace.tolist() == [adr2]
assert self.gc.more_objects_to_trace.tolist() == []
self.gc.TEST_VISIT_SINGLE_STEP = True
self.gc.minor_collection()
assert self.gc.gc_state == self.STATE_MARKING
self.gc.major_collection_step()
assert self.gc.objects_to_trace.tolist() == []
assert self.gc.more_objects_to_trace.tolist() == [adr2]
self.write(ptr2, 'data', lltype.nullptr(T))
self.gc.unpin(adr1)
assert ptr1.someInt == 101
self.gc.minor_collection() # should free 'ptr1'
py.test.raises(RuntimeError, "ptr1.someInt")
assert self.gc.gc_state == self.STATE_MARKING
self.gc.major_collection_step() # should not crash reading 'ptr1'!
del self.gc.TEST_VISIT_SINGLE_STEP
def test_pin_bug2(self):
#
# * we have an old object A that points to a pinned object B
#
# * we unpin B
#
# * the next minor_collection() is done in STATE_MARKING==1
# when the object A is already black
#
# * _minor_collection() => _visit_old_objects_pointing_to_pinned()
# which will move the now-unpinned B out of the nursery, to B'
#
# At that point we need to take care of colors, otherwise we
# get a black object (A) pointing to a white object (B'),
# which must never occur.
#
ptrA = self.malloc(T)
ptrA.someInt = 42
adrA = llmemory.cast_ptr_to_adr(ptrA)
res = self.gc.pin(adrA)
assert res
ptrC = self.malloc(S)
self.stackroots.append(ptrC)
ptrB = self.malloc(S)
ptrB.data = ptrA
self.stackroots.append(ptrB)
self.gc.collect()
ptrB = self.stackroots[-1] # now old and outside the nursery
ptrC = self.stackroots[-2] # another random old object, traced later
adrB = llmemory.cast_ptr_to_adr(ptrB)
self.gc.minor_collection()
assert self.gc.gc_state == self.STATE_SCANNING
self.gc.major_collection_step()
assert self.gc.gc_state == self.STATE_MARKING
assert not (self.gc.header(adrB).tid & GCFLAG_VISITED) # not black yet
self.gc.TEST_VISIT_SINGLE_STEP = True
self.gc.major_collection_step()
assert self.gc.gc_state == self.STATE_MARKING
assert self.gc.header(adrB).tid & GCFLAG_VISITED # now black
# but ptrC is not traced yet, which is why we're still in STATE_MARKING
assert self.gc.old_objects_pointing_to_pinned.tolist() == [adrB]
self.gc.unpin(adrA)
self.gc.DEBUG = 2
self.gc.minor_collection()
|
|
import pytest
from flask import Flask, url_for
from flask_sqlalchemy import SQLAlchemy
from werkzeug.wrappers import BaseResponse
from flask_marshmallow import Marshmallow
from flask_marshmallow.sqla import HyperlinkRelated
from marshmallow import ValidationError
from tests.conftest import Bunch
try:
from marshmallow_sqlalchemy import SQLAlchemySchema # noqa: F401
except ImportError:
has_sqlalchemyschema = False
else:
has_sqlalchemyschema = True
requires_sqlalchemyschema = pytest.mark.skipif(
not has_sqlalchemyschema, reason="SQLAlchemySchema not available"
)
class TestSQLAlchemy:
@pytest.yield_fixture()
def extapp(self):
app_ = Flask("extapp")
app_.testing = True
app_.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
app_.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
SQLAlchemy(app_)
Marshmallow(app_)
@app_.route("/author/<int:id>")
def author(id):
return f"...view for author {id}..."
@app_.route("/book/<int:id>")
def book(id):
return f"...view for book {id}..."
ctx = app_.test_request_context()
ctx.push()
yield app_
ctx.pop()
@pytest.fixture()
def db(self, extapp):
return extapp.extensions["sqlalchemy"].db
@pytest.fixture()
def extma(self, extapp):
return extapp.extensions["flask-marshmallow"]
@pytest.yield_fixture()
def models(self, db):
class AuthorModel(db.Model):
__tablename__ = "author"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
@property
def url(self):
return url_for("author", id=self.id)
@property
def absolute_url(self):
return url_for("author", id=self.id, _external=True)
class BookModel(db.Model):
__tablename__ = "book"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
author_id = db.Column(db.Integer, db.ForeignKey("author.id"))
author = db.relationship("AuthorModel", backref="books")
@property
def url(self):
return url_for("book", id=self.id)
@property
def absolute_url(self):
return url_for("book", id=self.id, _external=True)
db.create_all()
yield Bunch(Author=AuthorModel, Book=BookModel)
db.drop_all()
def test_can_initialize_extensions(self, extapp):
assert "flask-marshmallow" in extapp.extensions
assert "sqlalchemy" in extapp.extensions
@requires_sqlalchemyschema
def test_can_declare_sqla_schemas(self, extma, models, db):
class AuthorSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Author
id = extma.auto_field()
name = extma.auto_field()
class BookSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Book
id = extma.auto_field()
title = extma.auto_field()
author_id = extma.auto_field()
author_schema = AuthorSchema()
book_schema = BookSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
author_result = author_schema.dump(author)
assert "id" in author_result
assert "name" in author_result
assert author_result["id"] == author.id
assert author_result["name"] == "Chuck Paluhniuk"
book_result = book_schema.dump(book)
assert "id" in book_result
assert "title" in book_result
assert book_result["id"] == book.id
assert book_result["title"] == book.title
assert book_result["author_id"] == book.author_id
resp = author_schema.jsonify(author)
assert isinstance(resp, BaseResponse)
@requires_sqlalchemyschema
def test_can_declare_sqla_auto_schemas(self, extma, models, db):
class AuthorSchema(extma.SQLAlchemyAutoSchema):
class Meta:
model = models.Author
class BookSchema(extma.SQLAlchemyAutoSchema):
class Meta:
model = models.Book
include_fk = True
id = extma.auto_field()
title = extma.auto_field()
author_id = extma.auto_field()
author_schema = AuthorSchema()
book_schema = BookSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
author_result = author_schema.dump(author)
assert "id" in author_result
assert "name" in author_result
assert author_result["id"] == author.id
assert author_result["name"] == "Chuck Paluhniuk"
book_result = book_schema.dump(book)
assert "id" in book_result
assert "title" in book_result
assert book_result["id"] == book.id
assert book_result["title"] == book.title
assert book_result["author_id"] == book.author_id
resp = author_schema.jsonify(author)
assert isinstance(resp, BaseResponse)
@requires_sqlalchemyschema
def test_hyperlink_related_field(self, extma, models, db, extapp):
class BookSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Book
author = extma.HyperlinkRelated("author")
book_schema = BookSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
db.session.add(author)
db.session.add(book)
db.session.flush()
book_result = book_schema.dump(book)
assert book_result["author"] == author.url
deserialized = book_schema.load(book_result)
assert deserialized["author"] == author
@requires_sqlalchemyschema
def test_hyperlink_related_field_serializes_none(self, extma, models):
class BookSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Book
author = extma.HyperlinkRelated("author")
book_schema = BookSchema()
book = models.Book(title="Fight Club", author=None)
book_result = book_schema.dump(book)
assert book_result["author"] is None
@requires_sqlalchemyschema
def test_hyperlink_related_field_errors(self, extma, models, db, extapp):
class BookSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Book
author = HyperlinkRelated("author")
book_schema = BookSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
db.session.add(author)
db.session.add(book)
db.session.flush()
# Deserialization fails on bad endpoint
book_result = book_schema.dump(book)
book_result["author"] = book.url
with pytest.raises(ValidationError) as excinfo:
book_schema.load(book_result)
errors = excinfo.value.messages
assert 'expected "author"' in errors["author"][0]
# Deserialization fails on bad URL key
book_result = book_schema.dump(book)
book_schema.fields["author"].url_key = "pk"
with pytest.raises(ValidationError) as excinfo:
book_schema.load(book_result)
errors = excinfo.value.messages
assert 'URL pattern "pk" not found' in errors["author"][0]
@requires_sqlalchemyschema
def test_hyperlink_related_field_external(self, extma, models, db, extapp):
class BookSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Book
author = HyperlinkRelated("author", external=True)
book_schema = BookSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
db.session.add(author)
db.session.add(book)
db.session.flush()
book_result = book_schema.dump(book)
assert book_result["author"] == author.absolute_url
deserialized = book_schema.load(book_result)
assert deserialized["author"] == author
@requires_sqlalchemyschema
def test_hyperlink_related_field_list(self, extma, models, db, extapp):
class AuthorSchema(extma.SQLAlchemySchema):
class Meta:
model = models.Author
books = extma.List(HyperlinkRelated("book"))
author_schema = AuthorSchema()
author = models.Author(name="Chuck Paluhniuk")
book = models.Book(title="Fight Club", author=author)
db.session.add(author)
db.session.add(book)
db.session.flush()
author_result = author_schema.dump(author)
assert author_result["books"][0] == book.url
deserialized = author_schema.load(author_result)
assert deserialized["books"][0] == book
|
|
from types import GeneratorType
from django.utils.copycompat import copy, deepcopy
class MergeDict(object):
"""
A simple class for creating new "virtual" dictionaries that actually look
up values in more than one dictionary, passed in the constructor.
If a key appears in more than one of the given dictionaries, only the
first occurrence will be used.
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for dict_ in self.dicts:
try:
return dict_[key]
except KeyError:
pass
raise KeyError
def __copy__(self):
return self.__class__(*self.dicts)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def getlist(self, key):
for dict_ in self.dicts:
if key in dict_.keys():
return dict_.getlist(key)
return []
def iteritems(self):
seen = set()
for dict_ in self.dicts:
for item in dict_.iteritems():
k, v = item
if k in seen:
continue
seen.add(k)
yield item
def iterkeys(self):
for k, v in self.iteritems():
yield k
def itervalues(self):
for k, v in self.iteritems():
yield v
def items(self):
return list(self.iteritems())
def keys(self):
return list(self.iterkeys())
def values(self):
return list(self.itervalues())
def has_key(self, key):
for dict_ in self.dicts:
if key in dict_:
return True
return False
__contains__ = has_key
__iter__ = iterkeys
def copy(self):
"""Returns a copy of this object."""
return self.__copy__()
def __str__(self):
'''
Returns something like
"{'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}"
instead of the generic "<object meta-data>" inherited from object.
'''
return str(dict(self.items()))
def __repr__(self):
'''
Returns something like
MergeDict({'key1': 'val1', 'key2': 'val2'}, {'key3': 'val3'})
instead of generic "<object meta-data>" inherited from object.
'''
dictreprs = ', '.join(repr(d) for d in self.dicts)
return '%s(%s)' % (self.__class__.__name__, dictreprs)
class SortedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
"""
def __new__(cls, *args, **kwargs):
instance = super(SortedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
elif isinstance(data, GeneratorType):
# Unfortunately we need to be able to read a generator twice. Once
# to get the data into self with our super().__init__ call and a
# second time to setup keyOrder correctly
data = list(data)
super(SortedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
seen = set()
for key, value in data:
if key not in seen:
self.keyOrder.append(key)
seen.add(key)
def __deepcopy__(self, memo):
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
if key not in self:
self.keyOrder.append(key)
super(SortedDict, self).__setitem__(key, value)
def __delitem__(self, key):
super(SortedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
return iter(self.keyOrder)
def pop(self, k, *args):
result = super(SortedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(SortedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, self[key]
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return map(self.__getitem__, self.keyOrder)
def itervalues(self):
for key in self.keyOrder:
yield self[key]
def update(self, dict_):
for k, v in dict_.iteritems():
self[k] = v
def setdefault(self, key, default):
if key not in self:
self.keyOrder.append(key)
return super(SortedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Returns the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Inserts the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
def copy(self):
"""Returns a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(SortedDict, self).clear()
self.keyOrder = []
class MultiValueDictKeyError(KeyError):
pass
class MultiValueDict(dict):
"""
A subclass of dictionary customized to handle multiple values for the
same key.
>>> d = MultiValueDict({'name': ['Adrian', 'Simon'], 'position': ['Developer']})
>>> d['name']
'Simon'
>>> d.getlist('name')
['Adrian', 'Simon']
>>> d.get('lastname', 'nonexistent')
'nonexistent'
>>> d.setlist('lastname', ['Holovaty', 'Willison'])
This class exists to solve the irritating problem raised by cgi.parse_qs,
which returns a list for every key, even though most Web forms submit
single name-value pairs.
"""
def __init__(self, key_to_list_mapping=()):
super(MultiValueDict, self).__init__(key_to_list_mapping)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
super(MultiValueDict, self).__repr__())
def __getitem__(self, key):
"""
Returns the last data value for this key, or [] if it's an empty list;
raises KeyError if not found.
"""
try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError("Key %r not found in %r" % (key, self))
try:
return list_[-1]
except IndexError:
return []
def __setitem__(self, key, value):
super(MultiValueDict, self).__setitem__(key, [value])
def __copy__(self):
return self.__class__([
(k, v[:])
for k, v in self.lists()
])
def __deepcopy__(self, memo=None):
import django.utils.copycompat as copy
if memo is None:
memo = {}
result = self.__class__()
memo[id(self)] = result
for key, value in dict.items(self):
dict.__setitem__(result, copy.deepcopy(key, memo),
copy.deepcopy(value, memo))
return result
def __getstate__(self):
obj_dict = self.__dict__.copy()
obj_dict['_data'] = dict([(k, self.getlist(k)) for k in self])
return obj_dict
def __setstate__(self, obj_dict):
data = obj_dict.pop('_data', {})
for k, v in data.items():
self.setlist(k, v)
self.__dict__.update(obj_dict)
def get(self, key, default=None):
"""
Returns the last data value for the passed key. If key doesn't exist
or value is an empty list, then default is returned.
"""
try:
val = self[key]
except KeyError:
return default
if val == []:
return default
return val
def getlist(self, key):
"""
Returns the list of values for the passed key. If key doesn't exist,
then an empty list is returned.
"""
try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
return []
def setlist(self, key, list_):
super(MultiValueDict, self).__setitem__(key, list_)
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return default
return self[key]
def setlistdefault(self, key, default_list=None):
if key not in self:
if default_list is None:
default_list = []
self.setlist(key, default_list)
return default_list
return self.getlist(key)
def appendlist(self, key, value):
"""Appends an item to the internal list associated with key."""
self.setlistdefault(key).append(value)
def items(self):
"""
Returns a list of (key, value) pairs, where value is the last item in
the list associated with the key.
"""
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
Yields (key, value) pairs, where value is the last item in the list
associated with the key.
"""
for key in self.keys():
yield (key, self[key])
def lists(self):
"""Returns a list of (key, list) pairs."""
return super(MultiValueDict, self).items()
def iterlists(self):
"""Yields (key, list) pairs."""
return super(MultiValueDict, self).iteritems()
def values(self):
"""Returns a list of the last value on every key list."""
return [self[key] for key in self.keys()]
def itervalues(self):
"""Yield the last value on every key list."""
for key in self.iterkeys():
yield self[key]
def copy(self):
"""Returns a shallow copy of this object."""
return copy(self)
def update(self, *args, **kwargs):
"""
update() extends rather than replaces existing key lists.
Also accepts keyword args.
"""
if len(args) > 1:
raise TypeError("update expected at most 1 arguments, got %d" % len(args))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for key, value_list in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for key, value in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError("MultiValueDict.update() takes either a MultiValueDict or dictionary")
for key, value in kwargs.iteritems():
self.setlistdefault(key).append(value)
class DotExpandedDict(dict):
"""
A special dictionary constructor that takes a dictionary in which the keys
may contain dots to specify inner dictionaries. It's confusing, but this
example should make sense.
>>> d = DotExpandedDict({'person.1.firstname': ['Simon'], \
'person.1.lastname': ['Willison'], \
'person.2.firstname': ['Adrian'], \
'person.2.lastname': ['Holovaty']})
>>> d
{'person': {'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}}
>>> d['person']
{'1': {'lastname': ['Willison'], 'firstname': ['Simon']}, '2': {'lastname': ['Holovaty'], 'firstname': ['Adrian']}}
>>> d['person']['1']
{'lastname': ['Willison'], 'firstname': ['Simon']}
# Gotcha: Results are unpredictable if the dots are "uneven":
>>> DotExpandedDict({'c.1': 2, 'c.2': 3, 'c': 1})
{'c': 1}
"""
def __init__(self, key_to_list_mapping):
for k, v in key_to_list_mapping.items():
current = self
bits = k.split('.')
for bit in bits[:-1]:
current = current.setdefault(bit, {})
# Now assign value to current position
try:
current[bits[-1]] = v
except TypeError: # Special-case if current isn't a dict.
current = {bits[-1]: v}
class ImmutableList(tuple):
"""
A tuple-like object that raises useful errors when it is asked to mutate.
Example::
>>> a = ImmutableList(range(5), warning="You cannot mutate this.")
>>> a[3] = '4'
Traceback (most recent call last):
...
AttributeError: You cannot mutate this.
"""
def __new__(cls, *args, **kwargs):
if 'warning' in kwargs:
warning = kwargs['warning']
del kwargs['warning']
else:
warning = 'ImmutableList object is immutable.'
self = tuple.__new__(cls, *args, **kwargs)
self.warning = warning
return self
def complain(self, *wargs, **kwargs):
if isinstance(self.warning, Exception):
raise self.warning
else:
raise AttributeError(self.warning)
# All list mutation functions complain.
__delitem__ = complain
__delslice__ = complain
__iadd__ = complain
__imul__ = complain
__setitem__ = complain
__setslice__ = complain
append = complain
extend = complain
insert = complain
pop = complain
remove = complain
sort = complain
reverse = complain
class DictWrapper(dict):
"""
Wraps accesses to a dictionary so that certain values (those starting with
the specified prefix) are passed through a function before being returned.
The prefix is removed before looking up the real value.
Used by the SQL construction code to ensure that values are correctly
quoted before being used.
"""
def __init__(self, data, func, prefix):
super(DictWrapper, self).__init__(data)
self.func = func
self.prefix = prefix
def __getitem__(self, key):
"""
Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.
"""
if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
|
"""Psyrun's task loading API."""
from __future__ import print_function
import os
import os.path
import re
import sys
import traceback
import warnings
from psyrun.backend import DefaultBackend
from psyrun.pspace import Param
from psyrun.store import DefaultStore
from psyrun.scheduler import ImmediateRun
class TaskDef(object):
"""Task defined by a Python file.
Parameters
----------
path : str
Python file to load as task.
conf : `Config`
Default values for task parameters.
Attributes
----------
TASK_PATTERN : re.RegexObject
Regular expression to match task filenames.
"""
TASK_PATTERN = re.compile(r'^task_(.*)$')
def __init__(self, path, conf=None):
if conf is None:
taskdir = os.path.dirname(path)
conffile = os.path.join(taskdir, 'psy-conf.py')
if os.path.exists(conffile):
conf = Config.load_from_file(conffile)
else:
conf = Config()
_set_public_attrs_from_dict(
self, _load_pyfile(path), only_existing=False)
self.path = path
if not hasattr(self, 'name'):
prefixed_name, _ = os.path.splitext(os.path.basename(path))
m = self.TASK_PATTERN.match(prefixed_name)
if m:
self.name = m.group(1)
else:
self.name = prefixed_name
conf.apply_as_default(self)
def _load_pyfile(filename):
source = ''
with open(filename, 'r') as f:
source += f.read()
code = compile(source, filename, 'exec')
loaded = {'__file__': filename}
exec(code, loaded) # pylint: disable=exec-used
return loaded
def _set_public_attrs_from_dict(obj, d, only_existing=True):
for k, v in d.items():
if not k.startswith('_') and (not only_existing or hasattr(obj, k)):
setattr(obj, k, v)
class Config(object): # pylint: disable=too-many-instance-attributes
"""Task configuration.
Attributes
----------
backend : `Backend`, default: `DistributeBackend`
The processing backend which determines how work is distributed across
jobs.
exclude_from_result : sequence of str, default: ``[]``
Keys of items to exclude from result. This can be useful if parameters
or parts of the result cannot be saved to disk.
file_dep : sequence of str, default: ``[]``
Additional files the task depends on.
max_jobs : int, default: 100
Maximum number of jobs to start. With less jobs each job has to process
more parameter assignments. It depends on the scheduler and backend
used to which degree these will run in parallel.
min_items : int, default: 1
Minimum number of parameter assignment to evaluate per job. If a single
assignment is fast to evaluate, increasing this number can improve
performance because Psyrun will not start a new job for each parameter
assignment which can save some overhead.
overwrite_dirty : bool, default: True
Whether to overwrite dirty workdirs without a warning.
pool_size : int, default: 1
Number of parallel threads or processes each job will run. This allows
for parallelization without a proper scheduler (e.g. when using
`psyrun.scheduler.ImmediateRun`).
pspace : `ParameterSpace`, required
Parameter space to evaluate.
python : str, default: ``sys.executable``
Path to Python interpreter to use.
resultfile : str or None, default: None
Path to save the results of the finished task at. If None, this
defaults to ``'result.<ext>'`` in the *workdir*.
scheduler : `Scheduler`, default: `ImmediateRun`
Scheduler to use to submit individual jobs.
scheduler_args : dict, default: ``{}``
Additional scheduler arguments. See the documentation of the
scheduler for details.
setup : function, default: None
Function to call after starting a worker process before any parameter
sets are processed. The function gets the ID of the worker process
(usually starting at 0 and incremented by one for each process) as sole
argument. It may return a dictionary of additional arguments to pass
to the processing function. The setup function can be used to
initialize process wide resources.
store : `Store`, default: `PickleStore`
Input/output backend.
workdir : str, default: ``'psy-work'``
Working directory to store results and supporting data to process the
task.
"""
__slots__ = [
'backend', 'exclude_from_result', 'file_dep', 'max_jobs',
'min_items', 'pool_size', 'pspace', 'overwrite_dirty', 'python',
'resultfile', 'scheduler', 'scheduler_args', 'setup', 'store',
'workdir']
def __init__(self):
self.backend = DefaultBackend
self.exclude_from_result = []
self.file_dep = []
self.max_jobs = 100
self.min_items = 1
self.overwrite_dirty = True
self.pool_size = 1
self.pspace = Param()
self.python = sys.executable
self.resultfile = None
self.scheduler = ImmediateRun()
self.scheduler_args = dict()
self.setup = None
self.store = DefaultStore()
self.workdir = os.path.abspath('psy-work')
@classmethod
def load_from_file(cls, filename):
"""Load the configuration values from a Python file.
Parameters
----------
filename : str
Python file to load.
"""
conf = cls()
loaded_conf = _load_pyfile(filename)
_set_public_attrs_from_dict(conf, loaded_conf)
return conf
def apply_as_default(self, task):
"""Copies the attributes to a different object given they are not set
in that object.
Parameters
----------
task : obj
Object to copy the attributes to.
"""
for attr in self.__slots__:
if not hasattr(task, attr):
setattr(task, attr, getattr(self, attr))
class PackageLoader(object):
"""Loads tasks from Python files.
Filenames have to match the regular expression defined in
`TaskDef.TASK_PATTERN`. See `Config` for supported module
level variables in the task definition.
It is possible to set these variables for all tasks by setting them in
the file ``psy-conf.py`` in the *taskdir*.
Parameters
----------
taskdir : str
Directory to load task files from.
Attributes
----------
taskdir : str
Directory to load task files from.
conf : `Config`
Default values for module level task variables.
"""
def __init__(self, taskdir):
super(PackageLoader, self).__init__()
self.taskdir = taskdir
conffile = os.path.join(self.taskdir, 'psy-conf.py')
if os.path.exists(conffile):
self.conf = Config.load_from_file(conffile)
else:
self.conf = Config()
def load_task_defs(self):
"""Load task definitions.
Returns
-------
list of `TaskDef`
Task definitions.
"""
task_defs = []
for filename in os.listdir(self.taskdir):
root, ext = os.path.splitext(filename)
if TaskDef.TASK_PATTERN.match(root) and ext == '.py':
path = os.path.join(self.taskdir, filename)
try:
task_defs.append(TaskDef(path, self.conf))
except Exception: # pylint: disable=broad-except
traceback.print_exc()
warnings.warn("Task {path!r} could not be loaded.".format(
path=path))
return task_defs
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import os
import pickle
from collections import OrderedDict
import logging
from utils import *
logger = logging.getLogger(__name__)
class Base(object):
"""Basic wrapper for the symbols
Parameters
----------
data_shapes : dict
The shapes of tensor variables
sym_gen : mx.sym.Symbol
Symbol of the network
params : None or dict, optional
params_grad : None or dict, optional
aux_states:
initializer:
ctx:
name:
"""
def __init__(self, data_shapes, sym_gen, params=None, aux_states=None,
default_bucket_kwargs=None, learn_init_keys=None,
initializer=mx.init.Xavier(factor_type="in", rnd_type="gaussian", magnitude=2),
ctx=mx.gpu(), name='Net'):
self.sym_gen = sym_gen
bucket_kwargs = default_bucket_kwargs.copy() if \
default_bucket_kwargs is not None else dict()
self.curr_bucket_key = None
self.ctx = ctx
self.name = name
self.initializer = initializer
if params is None:
self.params = None
self.params_grad = None
else:
self.params = OrderedDict([(k, v.copyto(ctx)) for k, v in params.items()])
self.params_grad = OrderedDict([(n, nd.empty(v.shape, ctx=ctx))
for n, v in self.params.items()])
if aux_states is not None:
self.aux_states = OrderedDict([(k, v.copyto(ctx)) for k, v in aux_states.items()])
else:
self.aux_states = None
self._buckets = dict()
self.learn_init_keys = learn_init_keys if learn_init_keys is not None else []
self.learn_init_key_shapes = {k: data_shapes[k] for k in self.learn_init_keys}
self.switch_bucket(bucket_kwargs=bucket_kwargs, data_shapes=data_shapes)
self.acc_grad = None
@property
def exe(self):
"""Get the current executor
Returns
-------
exe : mxnet.executor.Executor
"""
return self._buckets[self.curr_bucket_key]['exe'][tuple(self.data_shapes.items())]
@property
def data_shapes(self):
return self._buckets[self.curr_bucket_key]['data_shapes']
@property
def sym(self):
return self._buckets[self.curr_bucket_key]['sym']
def switch_bucket(self, bucket_kwargs=None, data_shapes=None):
if bucket_kwargs is not None:
self.curr_bucket_key = get_bucket_key(bucket_kwargs=bucket_kwargs)
# 1. Check if bucket key exists
if self.curr_bucket_key in self._buckets:
if data_shapes is not None:
if tuple(data_shapes.items()) not in self._buckets[self.curr_bucket_key]['exe']:
#TODO Optimize the reshaping functionality!
self._buckets[self.curr_bucket_key]['exe'][tuple(data_shapes.items())] = \
self.exe.reshape(partial_shaping=True, allow_up_sizing=True, **data_shapes)
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
else:
self._buckets[self.curr_bucket_key]['data_shapes'] = data_shapes
return
# 2. If the bucket key does not exist, create new symbol + executor
assert data_shapes is not None, "Must set data_shapes for new bucket!"
if isinstance(self.sym_gen, mx.symbol.Symbol):
sym = self.sym_gen
else:
sym = self.sym_gen(**dict(self.curr_bucket_key))
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
param_names = [n for n in arg_names
if n in self.learn_init_keys or (n not in data_shapes.keys())]
for k, v in data_shapes.items():
assert isinstance(v, tuple), "Data_shapes must be tuple! Find k=%s, v=%s, " \
"data_shapes=%s" % (k, str(v), str(data_shapes))
arg_shapes, _, aux_shapes = sym.infer_shape(**data_shapes)
arg_name_shape = OrderedDict([(k, s) for k, s in zip(arg_names, arg_shapes)])
if self.params is None:
self.params = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
self.params_grad = OrderedDict([(n, nd.empty(arg_name_shape[n], ctx=self.ctx))
for n in param_names])
if len(self.params) > 0:
assert self.initializer is not None, \
'We must set the initializer if we donnot initialize' \
'manually the free parameters of the network!!'
for k, v in self.params.items():
self.initializer(k, v)
else:
assert set(arg_name_shape.items()) == \
set(list(data_shapes.items()) + list([(k, v.shape) for k, v in self.params.items()]))
if self.aux_states is None:
self.aux_states = OrderedDict([(k, nd.empty(s, ctx=self.ctx))
for k, s in zip(aux_names, aux_shapes)])
data_inputs = {k: mx.nd.empty(data_shapes[k], ctx=self.ctx)
for k in set(data_shapes.keys()) - set(self.learn_init_keys)}
if len(self._buckets) > 0:
shared_exe = list(list(self._buckets.values())[0]['exe'].values())[0]
else:
shared_exe = None
self._buckets[self.curr_bucket_key] = {
'exe': {tuple(data_shapes.items()):
sym.bind(ctx=self.ctx,
args=dict(self.params, **data_inputs),
args_grad=dict(self.params_grad.items()),
aux_states=self.aux_states,
shared_exec=shared_exe)
},
'data_shapes': data_shapes,
'sym': sym
}
def save_params(self, dir_path="", epoch=None):
param_saving_path = save_params(dir_path=dir_path, name=self.name, epoch=epoch,
params=self.params,
aux_states=self.aux_states)
misc_saving_path = save_misc(dir_path=dir_path, epoch=epoch, name=self.name,
content={'data_shapes': {k: map(int, v) for k, v in self.data_shapes.items()}})
logging.info('Saving %s, params: \"%s\", misc: \"%s\"',
self.name, param_saving_path, misc_saving_path)
def load_params(self, name="", dir_path="", epoch=None):
params, aux_states, param_loading_path = load_params(dir_path=dir_path, epoch=epoch, name=name)
logging.info('Loading params from \"%s\" to %s' % (param_loading_path, self.name))
for k, v in params.items():
if k in self.params:
logging.debug(' Loading %s %s' %(k, str(v.shape)))
self.params[k][:] = v
else:
logging.warn("Found unused param in the saved model file: %s" % k)
for k, v in aux_states.items():
self.aux_states[k][:] = v
@property
def internal_sym_names(self):
return self.sym.get_internals().list_outputs()
@property
def output_keys(self):
return self.sym.list_outputs()
def compute_internal(self, sym_name, bucket_kwargs=None, **arg_dict):
"""
View the internal symbols using the forward function.
:param sym_name:
:param bucket_kwargs:
:param input_dict:
:return:
"""
data_shapes = {k: v.shape for k, v in arg_dict.items()}
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
internal_sym = self.sym.get_internals()[sym_name]
data_inputs = {k: mx.nd.empty(v, ctx=self.ctx)
for k, v in self.data_shapes.items()
if k in internal_sym.list_arguments()}
params = {k: v for k, v in self.params.items() if
k in internal_sym.list_arguments()}
aux_states = {k: v for k, v in self.aux_states.items()
if k in internal_sym.list_auxiliary_states()}
exe = internal_sym.bind(ctx=self.ctx,
args=dict(params, **data_inputs),
args_grad=None,
grad_req='null',
aux_states=aux_states,
shared_exec=self.exe)
for k, v in arg_dict.items():
exe.arg_dict[k][:] = v
exe.forward(is_train=False)
assert 1 == len(exe.outputs)
for output in exe.outputs:
output.wait_to_read()
return exe.outputs[0]
def forward(self, is_train=False, bucket_kwargs=None, **arg_dict):
#import time
#start = time.time()
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
#end = time.time()
#print 'Swith Bucket:', end - start
#start = time.time()
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape,\
"Shape not match: key %s, need %s, received %s" \
%(k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=is_train)
for output in self.exe.outputs:
output.wait_to_read()
#end = time.time()
#print 'Forward:', end - start
return self.exe.outputs
def backward(self, out_grads=None, **arg_dict):
for k, v in arg_dict.items():
assert self.exe.arg_dict[k].shape == v.shape, \
"Shape not match: key %s, need %s, received %s" \
% (k, str(self.exe.arg_dict[k].shape), str(v.shape))
self.exe.arg_dict[k][:] = v
self.exe.backward(out_grads=out_grads)
def forward_backward(self, bucket_kwargs=None, out_grads=None, **arg_dict):
data_shapes = {k: v.shape for k, v in arg_dict.items()}
for name in self.learn_init_keys:
data_shapes[name] = self.learn_init_key_shapes[name]
self.switch_bucket(bucket_kwargs=bucket_kwargs,
data_shapes=data_shapes)
for k, v in arg_dict.items():
self.exe.arg_dict[k][:] = v
self.exe.forward(is_train=True)
self.exe.backward(out_grads=out_grads)
for output in self.exe.outputs:
output.wait_to_read()
return self.exe.outputs
def update(self, updater, params_grad=None):
if params_grad is None:
params_grad = self.params_grad
assert type(params_grad) is OrderedDict
for ind, k in enumerate(self.params.keys()):
updater(index=ind, grad=params_grad[k], weight=self.params[k])
def update_acc_grad(self):
if self.acc_grad is None:
self.acc_grad = OrderedDict([(n, nd.zeros(v.shape, ctx=self.ctx))
for n, v in self.params_grad.items()])
for k, v in self.acc_grad.items():
v[:] = v + self.params_grad[k]
def reset_acc_grad(self):
for v in self.acc_grad.values():
v[:] = 0
def copy(self, name=None, ctx=None):
if ctx is None:
ctx = self.ctx
if name is None:
name = self.name + '-copy-' + str(ctx)
return Base(data_shapes=self.data_shapes,
sym_gen=self.sym_gen,
default_bucket_kwargs=dict(self.curr_bucket_key),
params=self.params,
aux_states=self.aux_states, ctx=ctx, name=name)
def copy_params_to(self, dst):
for k, v in self.params.items():
dst.params[k][:] = v
# TODO `wait_to_read()` here seems unnecessary, remove it in the future!
dst.params[k].wait_to_read()
@property
def total_param_num(self):
return sum(v.size for v in self.params.values())
def print_stat(self):
logging.info("Name: %s" % self.name)
assert self.params is not None, "Fatal Error!"
logging.info("Params: ")
for k, v in self.params.items():
logging.info(" %s: %s" % (k, v.shape))
if self.aux_states is None or 0 == len(self.aux_states):
logging.info("Aux States: None")
else:
logging.info("Aux States: " + ' '.join(
["%s:%s" % (str(k), str(v.shape)) for k, v in self.aux_states.items()]))
logging.info("Total Parameter Num: " + str(self.total_param_num))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import os
import tarfile
import tempfile
import time
import warnings
from datetime import datetime
from functools import partial
from typing import Any, Callable, Dict, Generator, List, Optional, Set, cast
from botocore.exceptions import ClientError
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.utils import timezone
class LogState:
"""
Enum-style class holding all possible states of CloudWatch log streams.
https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.LogState
"""
STARTING = 1
WAIT_IN_PROGRESS = 2
TAILING = 3
JOB_COMPLETE = 4
COMPLETE = 5
# Position is a tuple that includes the last read timestamp and the number of items that were read
# at that time. This is used to figure out which event to start with on the next read.
Position = collections.namedtuple('Position', ['timestamp', 'skip'])
def argmin(arr, f: Callable) -> Optional[int]:
"""Return the index, i, in arr that minimizes f(arr[i])"""
min_value = None
min_idx = None
for idx, item in enumerate(arr):
if item is not None:
if min_value is None or f(item) < min_value:
min_value = f(item)
min_idx = idx
return min_idx
def secondary_training_status_changed(current_job_description: dict, prev_job_description: dict) -> bool:
"""
Returns true if training job's secondary status message has changed.
:param current_job_description: Current job description, returned from DescribeTrainingJob call.
:param prev_job_description: Previous job description, returned from DescribeTrainingJob call.
:return: Whether the secondary status message of a training job changed or not.
"""
current_secondary_status_transitions = current_job_description.get('SecondaryStatusTransitions')
if current_secondary_status_transitions is None or len(current_secondary_status_transitions) == 0:
return False
prev_job_secondary_status_transitions = (
prev_job_description.get('SecondaryStatusTransitions') if prev_job_description is not None else None
)
last_message = (
prev_job_secondary_status_transitions[-1]['StatusMessage']
if prev_job_secondary_status_transitions is not None
and len(prev_job_secondary_status_transitions) > 0
else ''
)
message = current_job_description['SecondaryStatusTransitions'][-1]['StatusMessage']
return message != last_message
def secondary_training_status_message(
job_description: Dict[str, List[Any]], prev_description: Optional[dict]
) -> str:
"""
Returns a string contains start time and the secondary training job status message.
:param job_description: Returned response from DescribeTrainingJob call
:param prev_description: Previous job description from DescribeTrainingJob call
:return: Job status string to be printed.
"""
current_transitions = job_description.get('SecondaryStatusTransitions')
if current_transitions is None or len(current_transitions) == 0:
return ''
prev_transitions_num = 0
if prev_description is not None:
if prev_description.get('SecondaryStatusTransitions') is not None:
prev_transitions_num = len(prev_description['SecondaryStatusTransitions'])
transitions_to_print = (
current_transitions[-1:]
if len(current_transitions) == prev_transitions_num
else current_transitions[prev_transitions_num - len(current_transitions) :]
)
status_strs = []
for transition in transitions_to_print:
message = transition['StatusMessage']
time_str = timezone.convert_to_utc(cast(datetime, job_description['LastModifiedTime'])).strftime(
'%Y-%m-%d %H:%M:%S'
)
status_strs.append(f"{time_str} {transition['Status']} - {message}")
return '\n'.join(status_strs)
class SageMakerHook(AwsBaseHook):
"""
Interact with Amazon SageMaker.
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
:class:`~airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
"""
non_terminal_states = {'InProgress', 'Stopping'}
endpoint_non_terminal_states = {'Creating', 'Updating', 'SystemUpdating', 'RollingBack', 'Deleting'}
failed_states = {'Failed'}
def __init__(self, *args, **kwargs):
super().__init__(client_type='sagemaker', *args, **kwargs)
self.s3_hook = S3Hook(aws_conn_id=self.aws_conn_id)
self.logs_hook = AwsLogsHook(aws_conn_id=self.aws_conn_id)
def tar_and_s3_upload(self, path: str, key: str, bucket: str) -> None:
"""
Tar the local file or directory and upload to s3
:param path: local file or directory
:param key: s3 key
:param bucket: s3 bucket
:return: None
"""
with tempfile.TemporaryFile() as temp_file:
if os.path.isdir(path):
files = [os.path.join(path, name) for name in os.listdir(path)]
else:
files = [path]
with tarfile.open(mode='w:gz', fileobj=temp_file) as tar_file:
for f in files:
tar_file.add(f, arcname=os.path.basename(f))
temp_file.seek(0)
self.s3_hook.load_file_obj(temp_file, key, bucket, replace=True)
def configure_s3_resources(self, config: dict) -> None:
"""
Extract the S3 operations from the configuration and execute them.
:param config: config of SageMaker operation
:rtype: dict
"""
s3_operations = config.pop('S3Operations', None)
if s3_operations is not None:
create_bucket_ops = s3_operations.get('S3CreateBucket', [])
upload_ops = s3_operations.get('S3Upload', [])
for op in create_bucket_ops:
self.s3_hook.create_bucket(bucket_name=op['Bucket'])
for op in upload_ops:
if op['Tar']:
self.tar_and_s3_upload(op['Path'], op['Key'], op['Bucket'])
else:
self.s3_hook.load_file(op['Path'], op['Key'], op['Bucket'])
def check_s3_url(self, s3url: str) -> bool:
"""
Check if an S3 URL exists
:param s3url: S3 url
:rtype: bool
"""
bucket, key = S3Hook.parse_s3_url(s3url)
if not self.s3_hook.check_for_bucket(bucket_name=bucket):
raise AirflowException(f"The input S3 Bucket {bucket} does not exist ")
if (
key
and not self.s3_hook.check_for_key(key=key, bucket_name=bucket)
and not self.s3_hook.check_for_prefix(prefix=key, bucket_name=bucket, delimiter='/')
):
# check if s3 key exists in the case user provides a single file
# or if s3 prefix exists in the case user provides multiple files in
# a prefix
raise AirflowException(
f"The input S3 Key or Prefix {s3url} does not exist in the Bucket {bucket}"
)
return True
def check_training_config(self, training_config: dict) -> None:
"""
Check if a training configuration is valid
:param training_config: training_config
:return: None
"""
if "InputDataConfig" in training_config:
for channel in training_config['InputDataConfig']:
if "S3DataSource" in channel['DataSource']:
self.check_s3_url(channel['DataSource']['S3DataSource']['S3Uri'])
def check_tuning_config(self, tuning_config: dict) -> None:
"""
Check if a tuning configuration is valid
:param tuning_config: tuning_config
:return: None
"""
for channel in tuning_config['TrainingJobDefinition']['InputDataConfig']:
if "S3DataSource" in channel['DataSource']:
self.check_s3_url(channel['DataSource']['S3DataSource']['S3Uri'])
def get_log_conn(self):
"""
This method is deprecated.
Please use :py:meth:`airflow.providers.amazon.aws.hooks.logs.AwsLogsHook.get_conn` instead.
"""
warnings.warn(
"Method `get_log_conn` has been deprecated. "
"Please use `airflow.providers.amazon.aws.hooks.logs.AwsLogsHook.get_conn` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return self.logs_hook.get_conn()
def log_stream(self, log_group, stream_name, start_time=0, skip=0):
"""
This method is deprecated.
Please use
:py:meth:`airflow.providers.amazon.aws.hooks.logs.AwsLogsHook.get_log_events` instead.
"""
warnings.warn(
"Method `log_stream` has been deprecated. "
"Please use "
"`airflow.providers.amazon.aws.hooks.logs.AwsLogsHook.get_log_events` instead.",
category=DeprecationWarning,
stacklevel=2,
)
return self.logs_hook.get_log_events(log_group, stream_name, start_time, skip)
def multi_stream_iter(self, log_group: str, streams: list, positions=None) -> Generator:
"""
Iterate over the available events coming from a set of log streams in a single log group
interleaving the events from each stream so they're yielded in timestamp order.
:param log_group: The name of the log group.
:param streams: A list of the log stream names. The position of the stream in this list is
the stream number.
:param positions: A list of pairs of (timestamp, skip) which represents the last record
read from each stream.
:return: A tuple of (stream number, cloudwatch log event).
"""
positions = positions or {s: Position(timestamp=0, skip=0) for s in streams}
event_iters = [
self.logs_hook.get_log_events(log_group, s, positions[s].timestamp, positions[s].skip)
for s in streams
]
events: List[Optional[Any]] = []
for event_stream in event_iters:
if not event_stream:
events.append(None)
continue
try:
events.append(next(event_stream))
except StopIteration:
events.append(None)
while any(events):
i = argmin(events, lambda x: x['timestamp'] if x else 9999999999) or 0
yield i, events[i]
try:
events[i] = next(event_iters[i])
except StopIteration:
events[i] = None
def create_training_job(
self,
config: dict,
wait_for_completion: bool = True,
print_log: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Create a training job
:param config: the config for training
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to training job creation
"""
self.check_training_config(config)
response = self.get_conn().create_training_job(**config)
if print_log:
self.check_training_status_with_log(
config['TrainingJobName'],
self.non_terminal_states,
self.failed_states,
wait_for_completion,
check_interval,
max_ingestion_time,
)
elif wait_for_completion:
describe_response = self.check_status(
config['TrainingJobName'],
'TrainingJobStatus',
self.describe_training_job,
check_interval,
max_ingestion_time,
)
billable_time = (
describe_response['TrainingEndTime'] - describe_response['TrainingStartTime']
) * describe_response['ResourceConfig']['InstanceCount']
self.log.info('Billable seconds: %d', int(billable_time.total_seconds()) + 1)
return response
def create_tuning_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Create a tuning job
:param config: the config for tuning
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to tuning job creation
"""
self.check_tuning_config(config)
response = self.get_conn().create_hyper_parameter_tuning_job(**config)
if wait_for_completion:
self.check_status(
config['HyperParameterTuningJobName'],
'HyperParameterTuningJobStatus',
self.describe_tuning_job,
check_interval,
max_ingestion_time,
)
return response
def create_transform_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Create a transform job
:param config: the config for transform job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
if "S3DataSource" in config['TransformInput']['DataSource']:
self.check_s3_url(config['TransformInput']['DataSource']['S3DataSource']['S3Uri'])
response = self.get_conn().create_transform_job(**config)
if wait_for_completion:
self.check_status(
config['TransformJobName'],
'TransformJobStatus',
self.describe_transform_job,
check_interval,
max_ingestion_time,
)
return response
def create_processing_job(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Create a processing job
:param config: the config for processing job
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to transform job creation
"""
response = self.get_conn().create_processing_job(**config)
if wait_for_completion:
self.check_status(
config['ProcessingJobName'],
'ProcessingJobStatus',
self.describe_processing_job,
check_interval,
max_ingestion_time,
)
return response
def create_model(self, config: dict):
"""
Create a model job
:param config: the config for model
:return: A response to model creation
"""
return self.get_conn().create_model(**config)
def create_endpoint_config(self, config: dict):
"""
Create an endpoint config
:param config: the config for endpoint-config
:return: A response to endpoint config creation
"""
return self.get_conn().create_endpoint_config(**config)
def create_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Create an endpoint
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint creation
"""
response = self.get_conn().create_endpoint(**config)
if wait_for_completion:
self.check_status(
config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def update_endpoint(
self,
config: dict,
wait_for_completion: bool = True,
check_interval: int = 30,
max_ingestion_time: Optional[int] = None,
):
"""
Update an endpoint
:param config: the config for endpoint
:param wait_for_completion: if the program should keep running until job finishes
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: A response to endpoint update
"""
response = self.get_conn().update_endpoint(**config)
if wait_for_completion:
self.check_status(
config['EndpointName'],
'EndpointStatus',
self.describe_endpoint,
check_interval,
max_ingestion_time,
non_terminal_states=self.endpoint_non_terminal_states,
)
return response
def describe_training_job(self, name: str):
"""
Return the training job info associated with the name
:param name: the name of the training job
:return: A dict contains all the training job info
"""
return self.get_conn().describe_training_job(TrainingJobName=name)
def describe_training_job_with_log(
self,
job_name: str,
positions,
stream_names: list,
instance_count: int,
state: int,
last_description: dict,
last_describe_job_call: float,
):
"""Return the training job info associated with job_name and print CloudWatch logs"""
log_group = '/aws/sagemaker/TrainingJobs'
if len(stream_names) < instance_count:
# Log streams are created whenever a container starts writing to stdout/err, so this list
# may be dynamic until we have a stream for every instance.
logs_conn = self.logs_hook.get_conn()
try:
streams = logs_conn.describe_log_streams(
logGroupName=log_group,
logStreamNamePrefix=job_name + '/',
orderBy='LogStreamName',
limit=instance_count,
)
stream_names = [s['logStreamName'] for s in streams['logStreams']]
positions.update(
[(s, Position(timestamp=0, skip=0)) for s in stream_names if s not in positions]
)
except logs_conn.exceptions.ResourceNotFoundException:
# On the very first training job run on an account, there's no log group until
# the container starts logging, so ignore any errors thrown about that
pass
if len(stream_names) > 0:
for idx, event in self.multi_stream_iter(log_group, stream_names, positions):
self.log.info(event['message'])
ts, count = positions[stream_names[idx]]
if event['timestamp'] == ts:
positions[stream_names[idx]] = Position(timestamp=ts, skip=count + 1)
else:
positions[stream_names[idx]] = Position(timestamp=event['timestamp'], skip=1)
if state == LogState.COMPLETE:
return state, last_description, last_describe_job_call
if state == LogState.JOB_COMPLETE:
state = LogState.COMPLETE
elif time.monotonic() - last_describe_job_call >= 30:
description = self.describe_training_job(job_name)
last_describe_job_call = time.monotonic()
if secondary_training_status_changed(description, last_description):
self.log.info(secondary_training_status_message(description, last_description))
last_description = description
status = description['TrainingJobStatus']
if status not in self.non_terminal_states:
state = LogState.JOB_COMPLETE
return state, last_description, last_describe_job_call
def describe_tuning_job(self, name: str) -> dict:
"""
Return the tuning job info associated with the name
:param name: the name of the tuning job
:return: A dict contains all the tuning job info
"""
return self.get_conn().describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=name)
def describe_model(self, name: str) -> dict:
"""
Return the SageMaker model info associated with the name
:param name: the name of the SageMaker model
:return: A dict contains all the model info
"""
return self.get_conn().describe_model(ModelName=name)
def describe_transform_job(self, name: str) -> dict:
"""
Return the transform job info associated with the name
:param name: the name of the transform job
:return: A dict contains all the transform job info
"""
return self.get_conn().describe_transform_job(TransformJobName=name)
def describe_processing_job(self, name: str) -> dict:
"""
Return the processing job info associated with the name
:param name: the name of the processing job
:return: A dict contains all the processing job info
"""
return self.get_conn().describe_processing_job(ProcessingJobName=name)
def describe_endpoint_config(self, name: str) -> dict:
"""
Return the endpoint config info associated with the name
:param name: the name of the endpoint config
:return: A dict contains all the endpoint config info
"""
return self.get_conn().describe_endpoint_config(EndpointConfigName=name)
def describe_endpoint(self, name: str) -> dict:
"""
:param name: the name of the endpoint
:return: A dict contains all the endpoint info
"""
return self.get_conn().describe_endpoint(EndpointName=name)
def check_status(
self,
job_name: str,
key: str,
describe_function: Callable,
check_interval: int,
max_ingestion_time: Optional[int] = None,
non_terminal_states: Optional[Set] = None,
):
"""
Check status of a SageMaker job
:param job_name: name of the job to check status
:param key: the key of the response dict
that points to the state
:param describe_function: the function used to retrieve the status
:param args: the arguments for the function
:param check_interval: the time interval in seconds which the operator
will check the status of any SageMaker job
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:param non_terminal_states: the set of nonterminal states
:return: response of describe call after job is done
"""
if not non_terminal_states:
non_terminal_states = self.non_terminal_states
sec = 0
running = True
while running:
time.sleep(check_interval)
sec += check_interval
try:
response = describe_function(job_name)
status = response[key]
self.log.info('Job still running for %s seconds... current status is %s', sec, status)
except KeyError:
raise AirflowException('Could not get status of the SageMaker job')
except ClientError:
raise AirflowException('AWS request failed, check logs for more info')
if status in non_terminal_states:
running = True
elif status in self.failed_states:
raise AirflowException(f"SageMaker job failed because {response['FailureReason']}")
else:
running = False
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException(f'SageMaker job took more than {max_ingestion_time} seconds')
self.log.info('SageMaker Job completed')
response = describe_function(job_name)
return response
def check_training_status_with_log(
self,
job_name: str,
non_terminal_states: set,
failed_states: set,
wait_for_completion: bool,
check_interval: int,
max_ingestion_time: Optional[int] = None,
):
"""
Display the logs for a given training job, optionally tailing them until the
job is complete.
:param job_name: name of the training job to check status and display logs for
:param non_terminal_states: the set of non_terminal states
:param failed_states: the set of failed states
:param wait_for_completion: Whether to keep looking for new log entries
until the job completes
:param check_interval: The interval in seconds between polling for new log entries and job completion
:param max_ingestion_time: the maximum ingestion time in seconds. Any
SageMaker jobs that run longer than this will fail. Setting this to
None implies no timeout for any SageMaker job.
:return: None
"""
sec = 0
description = self.describe_training_job(job_name)
self.log.info(secondary_training_status_message(description, None))
instance_count = description['ResourceConfig']['InstanceCount']
status = description['TrainingJobStatus']
stream_names: list = [] # The list of log streams
positions: dict = {} # The current position in each stream, map of stream name -> position
job_already_completed = status not in non_terminal_states
state = LogState.TAILING if wait_for_completion and not job_already_completed else LogState.COMPLETE
# The loop below implements a state machine that alternates between checking the job status and
# reading whatever is available in the logs at this point. Note, that if we were called with
# wait_for_completion == False, we never check the job status.
#
# If wait_for_completion == TRUE and job is not completed, the initial state is TAILING
# If wait_for_completion == FALSE, the initial state is COMPLETE
# (doesn't matter if the job really is complete).
#
# The state table:
#
# STATE ACTIONS CONDITION NEW STATE
# ---------------- ---------------- ----------------- ----------------
# TAILING Read logs, Pause, Get status Job complete JOB_COMPLETE
# Else TAILING
# JOB_COMPLETE Read logs, Pause Any COMPLETE
# COMPLETE Read logs, Exit N/A
#
# Notes:
# - The JOB_COMPLETE state forces us to do an extra pause and read any items that
# got to Cloudwatch after the job was marked complete.
last_describe_job_call = time.monotonic()
last_description = description
while True:
time.sleep(check_interval)
sec += check_interval
state, last_description, last_describe_job_call = self.describe_training_job_with_log(
job_name,
positions,
stream_names,
instance_count,
state,
last_description,
last_describe_job_call,
)
if state == LogState.COMPLETE:
break
if max_ingestion_time and sec > max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
raise AirflowException(f'SageMaker job took more than {max_ingestion_time} seconds')
if wait_for_completion:
status = last_description['TrainingJobStatus']
if status in failed_states:
reason = last_description.get('FailureReason', '(No reason provided)')
raise AirflowException(f'Error training {job_name}: {status} Reason: {reason}')
billable_time = (
last_description['TrainingEndTime'] - last_description['TrainingStartTime']
) * instance_count
self.log.info('Billable seconds: %d', int(billable_time.total_seconds()) + 1)
def list_training_jobs(
self, name_contains: Optional[str] = None, max_results: Optional[int] = None, **kwargs
) -> List[Dict]:
"""
This method wraps boto3's `list_training_jobs`. The training job name and max results are configurable
via arguments. Other arguments are not, and should be provided via kwargs. Note boto3 expects these in
CamelCase format, for example:
.. code-block:: python
list_training_jobs(name_contains="myjob", StatusEquals="Failed")
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_training_jobs
:param name_contains: (optional) partial name to match
:param max_results: (optional) maximum number of results to return. None returns infinite results
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_training_jobs request
"""
config = {}
if name_contains:
if "NameContains" in kwargs:
raise AirflowException("Either name_contains or NameContains can be provided, not both.")
config["NameContains"] = name_contains
if "MaxResults" in kwargs and kwargs["MaxResults"] is not None:
if max_results:
raise AirflowException("Either max_results or MaxResults can be provided, not both.")
# Unset MaxResults, we'll use the SageMakerHook's internal method for iteratively fetching results
max_results = kwargs["MaxResults"]
del kwargs["MaxResults"]
config.update(kwargs)
list_training_jobs_request = partial(self.get_conn().list_training_jobs, **config)
results = self._list_request(
list_training_jobs_request, "TrainingJobSummaries", max_results=max_results
)
return results
def list_processing_jobs(self, **kwargs) -> List[Dict]:
"""
This method wraps boto3's `list_processing_jobs`. All arguments should be provided via kwargs.
Note boto3 expects these in CamelCase format, for example:
.. code-block:: python
list_processing_jobs(NameContains="myjob", StatusEquals="Failed")
.. seealso::
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sagemaker.html#SageMaker.Client.list_processing_jobs
:param kwargs: (optional) kwargs to boto3's list_training_jobs method
:return: results of the list_processing_jobs request
"""
list_processing_jobs_request = partial(self.get_conn().list_processing_jobs, **kwargs)
results = self._list_request(
list_processing_jobs_request, "ProcessingJobSummaries", max_results=kwargs.get("MaxResults")
)
return results
def _list_request(
self, partial_func: Callable, result_key: str, max_results: Optional[int] = None
) -> List[Dict]:
"""
All AWS boto3 list_* requests return results in batches (if the key "NextToken" is contained in the
result, there are more results to fetch). The default AWS batch size is 10, and configurable up to
100. This function iteratively loads all results (or up to a given maximum).
Each boto3 list_* function returns the results in a list with a different name. The key of this
structure must be given to iterate over the results, e.g. "TransformJobSummaries" for
list_transform_jobs().
:param partial_func: boto3 function with arguments
:param result_key: the result key to iterate over
:param max_results: maximum number of results to return (None = infinite)
:return: Results of the list_* request
"""
sagemaker_max_results = 100 # Fixed number set by AWS
results: List[Dict] = []
next_token = None
while True:
kwargs = {}
if next_token is not None:
kwargs["NextToken"] = next_token
if max_results is None:
kwargs["MaxResults"] = sagemaker_max_results
else:
kwargs["MaxResults"] = min(max_results - len(results), sagemaker_max_results)
response = partial_func(**kwargs)
self.log.debug("Fetched %s results.", len(response[result_key]))
results.extend(response[result_key])
if "NextToken" not in response or (max_results is not None and len(results) == max_results):
# Return when there are no results left (no NextToken) or when we've reached max_results.
return results
else:
next_token = response["NextToken"]
def find_processing_job_by_name(self, processing_job_name: str) -> bool:
"""Query processing job by name"""
try:
self.get_conn().describe_processing_job(ProcessingJobName=processing_job_name)
return True
except ClientError as e:
if e.response['Error']['Code'] in ['ValidationException', 'ResourceNotFound']:
return False
raise
def delete_model(self, model_name: str):
"""Delete SageMaker model
:param model_name: name of the model
"""
try:
self.get_conn().delete_model(ModelName=model_name)
except Exception as general_error:
self.log.error("Failed to delete model, error: %s", general_error)
raise
|
|
#!/usr/bin/env python
"""Preprocess a C source file using gcc and convert the result into
a token stream
Reference is C99:
* http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
"""
__docformat__ = "restructuredtext"
import os, re, shlex, sys, tokenize, traceback, subprocess
import ctypes
from . import lex, yacc
from .lex import TOKEN, LexError
from . import pplexer
# --------------------------------------------------------------------------
# Lexers
# --------------------------------------------------------------------------
class PreprocessorLexer(lex.Lexer):
def __init__(self):
lex.Lexer.__init__(self)
self.filename = "<input>"
self.in_define = False
def input(self, data, filename=None):
if filename:
self.filename = filename
self.lasttoken = None
self.input_stack = []
lex.Lexer.input(self, data)
def push_input(self, data, filename):
self.input_stack.append((self.lexdata, self.lexpos, self.filename, self.lineno))
self.lexdata = data
self.lexpos = 0
self.lineno = 1
self.filename = filename
self.lexlen = len(self.lexdata)
def pop_input(self):
self.lexdata, self.lexpos, self.filename, self.lineno = self.input_stack.pop()
self.lexlen = len(self.lexdata)
def token(self):
result = lex.Lexer.token(self)
while result is None and self.input_stack:
self.pop_input()
result = lex.Lexer.token(self)
if result:
self.lasttoken = result.type
result.filename = self.filename
else:
self.lasttoken = None
return result
class TokenListLexer(object):
def __init__(self, tokens):
self.tokens = tokens
self.pos = 0
def token(self):
if self.pos < len(self.tokens):
t = self.tokens[self.pos]
self.pos += 1
return t
else:
return None
def symbol_to_token(sym):
if isinstance(sym, yacc.YaccSymbol):
return sym.value
elif isinstance(sym, lex.LexToken):
return sym
else:
assert False, "Not a symbol: %r" % sym
def create_token(type, value, production=None):
"""Create a token of type and value, at the position where 'production'
was reduced. Don't specify production if the token is built-in"""
t = lex.LexToken()
t.type = type
t.value = value
t.lexpos = -1
if production:
t.lineno = production.slice[1].lineno
t.filename = production.slice[1].filename
else:
t.lineno = -1
t.filename = "<builtin>"
return t
# --------------------------------------------------------------------------
# Grammars
# --------------------------------------------------------------------------
class PreprocessorParser(object):
def __init__(self, options, cparser):
self.defines = [
"inline=",
"__inline__=",
"__extension__=",
"__const=const",
"__asm__(x)=",
"__asm(x)=",
"CTYPESGEN=1",
]
# On OSX, explicitly add these defines to keep from getting syntax
# errors in the OSX standard headers.
if sys.platform == "darwin":
self.defines += ["__uint16_t=uint16_t", "__uint32_t=uint32_t", "__uint64_t=uint64_t"]
self.matches = []
self.output = []
optimize = options.optimize_lexer if hasattr(options, "optimize_lexer") else False
self.lexer = lex.lex(
cls=PreprocessorLexer,
optimize=optimize,
lextab="lextab",
outputdir=os.path.dirname(__file__),
module=pplexer,
)
self.options = options
self.cparser = cparser # An instance of CParser
def parse(self, filename):
"""Parse a file and save its output"""
cmd = self.options.cpp
cmd += " -U __GNUC__ -dD"
for undefine in self.options.cpp_undefines:
cmd += " -U%s" % undefine
# This fixes Issue #6 where OS X 10.6+ adds a C extension that breaks
# the parser. Blocks shouldn't be needed for ctypesgen support anyway.
if sys.platform == "darwin":
cmd += " -U __BLOCKS__"
for path in self.options.include_search_paths:
cmd += ' -I"%s"' % path
for define in self.defines + self.options.cpp_defines:
cmd += ' "-D%s"' % define
cmd += ' "' + filename + '"'
self.cparser.handle_status(cmd)
pp = subprocess.Popen(
cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
ppout, pperr = pp.communicate()
for line in pperr.split("\n"):
if line:
self.cparser.handle_pp_error(line)
# We separate lines to two groups: directives and c-source. Note that
# #pragma directives actually belong to the source category for this.
# This is necessary because some source files intermix preprocessor
# directives with source--this is not tolerated by ctypesgen's single
# grammar.
# We put all the source lines first, then all the #define lines.
source_lines = []
define_lines = []
first_token_reg = re.compile(r"^#\s*([^ ]+)($|\s)")
for line in ppout.split("\n"):
line += "\n"
search = first_token_reg.match(line)
hash_token = search.group(1) if search else None
if (not hash_token) or hash_token == "pragma":
source_lines.append(line)
define_lines.append("\n")
elif hash_token.isdigit():
# Line number information has to go with both groups
source_lines.append(line)
define_lines.append(line)
else: # hash_token in ("define", "undef"):
source_lines.append("\n")
define_lines.append(line)
text = "".join(source_lines + define_lines)
if self.options.save_preprocessed_headers:
self.cparser.handle_status(
"Saving preprocessed headers to %s." % self.options.save_preprocessed_headers
)
try:
with open(self.options.save_preprocessed_headers, "w") as f:
f.write(text)
except IOError:
self.cparser.handle_error("Couldn't save headers.")
self.lexer.input(text)
self.output = []
try:
while True:
token = self.lexer.token()
if token is not None:
self.output.append(token)
else:
break
except LexError as e:
self.cparser.handle_error("{}; {}".format(e, e.text.partition("\n")[0]), filename, 0)
|
|
# Copyright (c) 2021, Frappe Technologies and contributors
# License: MIT. See LICENSE
import frappe, json
from frappe.model.document import Document
from frappe.permissions import (get_valid_perms, update_permission_property)
from frappe import _
from frappe.utils import cstr
from frappe.core.utils import find
from frappe.desk.form.linked_with import get_linked_doctypes
class UserPermission(Document):
def validate(self):
self.validate_user_permission()
self.validate_default_permission()
def on_update(self):
frappe.cache().hdel('user_permissions', self.user)
frappe.publish_realtime('update_user_permissions')
def on_trash(self): # pylint: disable=no-self-use
frappe.cache().hdel('user_permissions', self.user)
frappe.publish_realtime('update_user_permissions')
def validate_user_permission(self):
''' checks for duplicate user permission records'''
duplicate_exists = frappe.db.get_all(self.doctype, filters={
'allow': self.allow,
'for_value': self.for_value,
'user': self.user,
'applicable_for': cstr(self.applicable_for),
'apply_to_all_doctypes': self.apply_to_all_doctypes,
'name': ['!=', self.name]
}, limit=1)
if duplicate_exists:
frappe.throw(_("User permission already exists"), frappe.DuplicateEntryError)
def validate_default_permission(self):
''' validate user permission overlap for default value of a particular doctype '''
overlap_exists = []
if self.is_default:
overlap_exists = frappe.get_all(self.doctype, filters={
'allow': self.allow,
'user': self.user,
'is_default': 1,
'name': ['!=', self.name]
}, or_filters={
'applicable_for': cstr(self.applicable_for),
'apply_to_all_doctypes': 1,
}, limit=1)
if overlap_exists:
ref_link = frappe.get_desk_link(self.doctype, overlap_exists[0].name)
frappe.throw(_("{0} has already assigned default value for {1}.").format(ref_link, self.allow))
@frappe.whitelist()
def get_user_permissions(user=None):
'''Get all users permissions for the user as a dict of doctype'''
# if this is called from client-side,
# user can access only his/her user permissions
if frappe.request and frappe.local.form_dict.cmd == 'get_user_permissions':
user = frappe.session.user
if not user:
user = frappe.session.user
if not user or user in ("Administrator", "Guest"):
return {}
cached_user_permissions = frappe.cache().hget("user_permissions", user)
if cached_user_permissions is not None:
return cached_user_permissions
out = {}
def add_doc_to_perm(perm, doc_name, is_default):
# group rules for each type
# for example if allow is "Customer", then build all allowed customers
# in a list
if not out.get(perm.allow):
out[perm.allow] = []
out[perm.allow].append(frappe._dict({
'doc': doc_name,
'applicable_for': perm.get('applicable_for'),
'is_default': is_default
}))
try:
for perm in frappe.get_all('User Permission',
fields=['allow', 'for_value', 'applicable_for', 'is_default', 'hide_descendants'],
filters=dict(user=user)):
meta = frappe.get_meta(perm.allow)
add_doc_to_perm(perm, perm.for_value, perm.is_default)
if meta.is_nested_set() and not perm.hide_descendants:
decendants = frappe.db.get_descendants(perm.allow, perm.for_value)
for doc in decendants:
add_doc_to_perm(perm, doc, False)
out = frappe._dict(out)
frappe.cache().hset("user_permissions", user, out)
except frappe.db.SQLError as e:
if frappe.db.is_table_missing(e):
# called from patch
pass
return out
def user_permission_exists(user, allow, for_value, applicable_for=None):
'''Checks if similar user permission already exists'''
user_permissions = get_user_permissions(user).get(allow, [])
if not user_permissions: return None
has_same_user_permission = find(user_permissions, lambda perm:perm["doc"] == for_value and perm.get('applicable_for') == applicable_for)
return has_same_user_permission
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def get_applicable_for_doctype_list(doctype, txt, searchfield, start, page_len, filters):
linked_doctypes_map = get_linked_doctypes(doctype, True)
linked_doctypes = []
for linked_doctype, linked_doctype_values in linked_doctypes_map.items():
linked_doctypes.append(linked_doctype)
child_doctype = linked_doctype_values.get("child_doctype")
if child_doctype:
linked_doctypes.append(child_doctype)
linked_doctypes += [doctype]
if txt:
linked_doctypes = [d for d in linked_doctypes if txt.lower() in d.lower()]
linked_doctypes.sort()
return_list = []
for doctype in linked_doctypes[start:page_len]:
return_list.append([doctype])
return return_list
def get_permitted_documents(doctype):
''' Returns permitted documents from the given doctype for the session user '''
# sort permissions in a way to make the first permission in the list to be default
user_perm_list = sorted(get_user_permissions().get(doctype, []), key=lambda x: x.get('is_default'), reverse=True)
return [d.get('doc') for d in user_perm_list \
if d.get('doc')]
@frappe.whitelist()
def check_applicable_doc_perm(user, doctype, docname):
frappe.only_for('System Manager')
applicable = []
doc_exists = frappe.get_all('User Permission',
fields=['name'],
filters={"user": user,
"allow": doctype,
"for_value": docname,
"apply_to_all_doctypes":1,
}, limit=1)
if doc_exists:
applicable = get_linked_doctypes(doctype).keys()
else:
data = frappe.get_all('User Permission',
fields=['applicable_for'],
filters={"user": user,
"allow": doctype,
"for_value":docname,
})
for permission in data:
applicable.append(permission.applicable_for)
return applicable
@frappe.whitelist()
def clear_user_permissions(user, for_doctype):
frappe.only_for("System Manager")
total = frappe.db.count("User Permission", {"user": user, "allow": for_doctype})
if total:
frappe.db.delete("User Permission", {
"allow": for_doctype,
"user": user,
})
frappe.clear_cache()
return total
@frappe.whitelist()
def add_user_permissions(data):
''' Add and update the user permissions '''
frappe.only_for('System Manager')
if isinstance(data, str):
data = json.loads(data)
data = frappe._dict(data)
# get all doctypes on whom this permission is applied
perm_applied_docs = check_applicable_doc_perm(data.user, data.doctype, data.docname)
exists = frappe.db.exists("User Permission", {
"user": data.user,
"allow": data.doctype,
"for_value": data.docname,
"apply_to_all_doctypes": 1
})
if data.apply_to_all_doctypes == 1 and not exists:
remove_applicable(perm_applied_docs, data.user, data.doctype, data.docname)
insert_user_perm(data.user, data.doctype, data.docname, data.is_default, data.hide_descendants, apply_to_all=1)
return 1
elif len(data.applicable_doctypes) > 0 and data.apply_to_all_doctypes != 1:
remove_apply_to_all(data.user, data.doctype, data.docname)
update_applicable(perm_applied_docs, data.applicable_doctypes, data.user, data.doctype, data.docname)
for applicable in data.applicable_doctypes :
if applicable not in perm_applied_docs:
insert_user_perm(data.user, data.doctype, data.docname, data.is_default, data.hide_descendants, applicable=applicable)
elif exists:
insert_user_perm(data.user, data.doctype, data.docname, data.is_default, data.hide_descendants, applicable=applicable)
return 1
return 0
def insert_user_perm(user, doctype, docname, is_default=0, hide_descendants=0, apply_to_all=None, applicable=None):
user_perm = frappe.new_doc("User Permission")
user_perm.user = user
user_perm.allow = doctype
user_perm.for_value = docname
user_perm.is_default = is_default
user_perm.hide_descendants = hide_descendants
if applicable:
user_perm.applicable_for = applicable
user_perm.apply_to_all_doctypes = 0
else:
user_perm.apply_to_all_doctypes = 1
user_perm.insert()
def remove_applicable(perm_applied_docs, user, doctype, docname):
for applicable_for in perm_applied_docs:
frappe.db.delete("User Permission", {
"applicable_for": applicable_for,
"for_value": docname,
"allow": doctype,
"user": user,
})
def remove_apply_to_all(user, doctype, docname):
frappe.db.delete("User Permission", {
"apply_to_all_doctypes": 1,
"for_value": docname,
"allow": doctype,
"user": user,
})
def update_applicable(already_applied, to_apply, user, doctype, docname):
for applied in already_applied:
if applied not in to_apply:
frappe.db.delete("User Permission", {
"applicable_for": applied,
"for_value": docname,
"allow": doctype,
"user": user,
})
|
|
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import hashlib
import json
import os
import stat
import tempfile
import time
from oslo.config import cfg
from cinder.brick.remotefs import remotefs
from cinder import compute
from cinder import db
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('glusterfs_shares_config',
default='/etc/cinder/glusterfs_shares',
help='File with the list of available gluster shares'),
cfg.BoolOpt('glusterfs_sparsed_volumes',
default=True,
help=('Create volumes as sparsed files which take no space.'
'If set to False volume is created as regular file.'
'In such case volume creation takes a lot of time.')),
cfg.BoolOpt('glusterfs_qcow2_volumes',
default=False,
help=('Create volumes as QCOW2 files rather than raw files.')),
cfg.StrOpt('glusterfs_mount_point_base',
default='$state_path/mnt',
help='Base dir containing mount points for gluster shares.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
CONF.import_opt('volume_name_template', 'cinder.db')
class GlusterfsDriver(nfs.RemoteFsDriver):
"""Gluster based cinder driver. Creates file on Gluster share for using it
as block device on hypervisor.
Operations such as create/delete/extend volume/snapshot use locking on a
per-process basis to prevent multiple threads from modifying qcow2 chains
or the snapshot .info file simultaneously.
"""
driver_volume_type = 'glusterfs'
driver_prefix = 'glusterfs'
volume_backend_name = 'GlusterFS'
VERSION = '1.1.1'
def __init__(self, execute=processutils.execute, *args, **kwargs):
self._remotefsclient = None
super(GlusterfsDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self._nova = None
self.base = getattr(self.configuration,
'glusterfs_mount_point_base',
CONF.glusterfs_mount_point_base)
self._remotefsclient = remotefs.RemoteFsClient(
'glusterfs',
execute,
glusterfs_mount_point_base=self.base)
def set_execute(self, execute):
super(GlusterfsDriver, self).set_execute(execute)
if self._remotefsclient:
self._remotefsclient.set_execute(execute)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(GlusterfsDriver, self).do_setup(context)
self._nova = compute.API()
config = self.configuration.glusterfs_shares_config
if not config:
msg = (_("There's no Gluster config file configured (%s)") %
'glusterfs_shares_config')
LOG.warn(msg)
raise exception.GlusterfsException(msg)
if not os.path.exists(config):
msg = (_("Gluster config file at %(config)s doesn't exist") %
{'config': config})
LOG.warn(msg)
raise exception.GlusterfsException(msg)
self.shares = {}
try:
self._execute('mount.glusterfs', check_exit_code=False)
except OSError as exc:
if exc.errno == errno.ENOENT:
raise exception.GlusterfsException(
_('mount.glusterfs is not installed'))
else:
raise
self._refresh_mounts()
def _unmount_shares(self):
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._do_umount(True, share)
except Exception as exc:
LOG.warning(_('Exception during unmounting %s') % (exc))
def _do_umount(self, ignore_not_mounted, share):
mount_path = self._get_mount_point_for_share(share)
command = ['umount', mount_path]
try:
self._execute(*command, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ignore_not_mounted and 'not mounted' in exc.stderr:
LOG.info(_("%s is already umounted"), share)
else:
LOG.error(_("Failed to umount %(share)s, reason=%(stderr)s"),
{'share': share, 'stderr': exc.stderr})
raise
def _refresh_mounts(self):
try:
self._unmount_shares()
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.stderr:
LOG.warn(_("Failed to refresh mounts, reason=%s") %
exc.stderr)
else:
raise
self._ensure_shares_mounted()
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def _local_volume_dir(self, volume):
hashed = self._get_hash_str(volume['provider_location'])
path = '%s/%s' % (self.configuration.glusterfs_mount_point_base,
hashed)
return path
def _local_path_volume(self, volume):
path_to_disk = '%s/%s' % (
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _local_path_volume_info(self, volume):
return '%s%s' % (self._local_path_volume(volume), '.info')
def _qemu_img_info(self, path):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
info.backing_file = os.path.basename(info.backing_file)
return info
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if snap_info == {}:
# No info file = no snapshots exist
vol_path = os.path.basename(self._local_path_volume(volume))
return vol_path
return snap_info['active']
@utils.synchronized('glusterfs', external=False)
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
LOG.info(_('Cloning volume %(src)s to volume %(dst)s') %
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
src_vref['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
@utils.synchronized('glusterfs', external=False)
def create_volume(self, volume):
"""Creates a volume."""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_('casted to %s') % volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
@utils.synchronized('glusterfs', external=False)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
snapshot['volume_size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
"""Copy data from snapshot to destination volume.
This is done with a qemu-img convert to raw/qcow2 from the snapshot
qcow2.
"""
LOG.debug("snapshot: %(snap)s, volume: %(vol)s, "
"volume_size: %(size)s"
% {'snap': snapshot['id'],
'vol': volume['id'],
'size': volume_size})
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
vol_path = self._local_volume_dir(snapshot['volume'])
forward_file = snap_info[snapshot['id']]
forward_path = os.path.join(vol_path, forward_file)
# Find the file which backs this file, which represents the point
# when this snapshot was created.
img_info = self._qemu_img_info(forward_path)
path_to_snap_img = os.path.join(vol_path, img_info.backing_file)
path_to_new_vol = self._local_path_volume(volume)
LOG.debug("will copy from snapshot at %s" % path_to_snap_img)
if self.configuration.glusterfs_qcow2_volumes:
out_format = 'qcow2'
else:
out_format = 'raw'
image_utils.convert_image(path_to_snap_img,
path_to_new_vol,
out_format)
self._set_rw_permissions_for_all(path_to_new_vol)
@utils.synchronized('glusterfs', external=False)
def delete_volume(self, volume):
"""Deletes a logical volume."""
if not volume['provider_location']:
LOG.warn(_('Volume %s does not have provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
volume_dir = self._local_volume_dir(volume)
mounted_path = os.path.join(volume_dir,
self.get_active_image_from_info(volume))
self._execute('rm', '-f', mounted_path, run_as_root=True)
# If an exception (e.g. timeout) occurred during delete_snapshot, the
# base volume may linger around, so just delete it if it exists
base_volume_path = self._local_path_volume(volume)
fileutils.delete_if_exists(base_volume_path)
info_path = self._local_path_volume_info(volume)
fileutils.delete_if_exists(info_path)
@utils.synchronized('glusterfs', external=False)
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot,
providing a qcow2 file.
Otherwise, create locally with qemu-img.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb',
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' }
4. First snapshot deleted:
volume-1234 <- volume-1234.aaaa(* now with bbbb's data)
volume-1234.aaaa is removed (logically) from the snapshot chain.
The data from volume-1234.bbbb is merged into it.
(*) Since bbbb's data was committed into the aaaa file, we have
"removed" aaaa's snapshot point but the .aaaa file now
represents snapshot with id "bbbb".
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.aaaa' (* changed!)
}
5. Second snapshot deleted:
volume-1234
volume-1234.bbbb is removed from the snapshot chain, as above.
The base image, volume-1234, becomes the active image for this
volume again. If in-use, the VM begins using the volume-1234.bbbb
file immediately as part of the snapshot delete process.
info file: { 'active': 'volume-1234' }
For the above operations, Cinder handles manipulation of qcow2 files
when the volume is detached. When attached, Cinder creates and deletes
qcow2 files, but Nova is responsible for transitioning the VM between
them and handling live transfers of data between files as required.
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
if status == 'in-use':
# Perform online snapshot via Nova
context = snapshot['context']
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
path_to_disk = self._local_path_volume(snapshot['volume'])
new_snap_path = '%s.%s' % (
self._local_path_volume(snapshot['volume']),
snapshot['id'])
self._create_qcow2_snap_file(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s' % result)
except Exception as e:
LOG.error(_('Call to Nova to create snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.GlusterfsException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s' % {
'id': snapshot['id'],
'status': s['status']
})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.GlusterfsException(msg)
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path, empty_if_missing=True)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
return
LOG.debug('create snapshot: %s' % snapshot)
LOG.debug('volume id: %s' % snapshot['volume_id'])
path_to_disk = self._local_path_volume(snapshot['volume'])
self._create_snapshot_offline(snapshot, path_to_disk)
def _create_qcow2_snap_file(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = '%s/%s' % (
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=True)
info = self._qemu_img_info(backing_path_full_path)
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=True)
self._set_rw_permissions_for_all(new_snap_path)
def _create_snapshot_offline(self, snapshot, path_to_disk):
"""Create snapshot (offline case)."""
# Requires volume status = 'available'
new_snap_path = '%s.%s' % (path_to_disk, snapshot['id'])
backing_filename = self.get_active_image_from_info(snapshot['volume'])
self._create_qcow2_snap_file(snapshot,
backing_filename,
new_snap_path)
# Update info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path,
empty_if_missing=True)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information."""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.GlusterfsException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _get_matching_backing_file(self, backing_chain, snapshot_file):
return next(f for f in backing_chain
if f.get('backing-filename', '') == snapshot_file)
@utils.synchronized('glusterfs', external=False)
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
self._delete_snapshot(snapshot)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: GlusterfsException(msg) if operation fails
:returns: None
"""
LOG.debug('deleting snapshot %s' % snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
self._ensure_share_writable(
self._local_volume_dir(snapshot['volume']))
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.') % snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is %s' % snapshot_file)
snapshot_path = '%s/%s' % (self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(snapshot_path)
vol_path = self._local_volume_dir(snapshot['volume'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = '%s/%s' % (vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
msg = _('No backing file found for %s, allowing snapshot '
'to be deleted.') % snapshot_path
LOG.warn(msg)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(
self._local_volume_dir(snapshot['volume']), base_file)
base_file_img_info = self._qemu_img_info(base_path)
new_base_file = base_file_img_info.backing_file
base_id = None
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
for key, value in snap_info.iteritems():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
msg = 'No %(base_id)s found for %(file)s' % {
'base_id': 'base_id',
'file': snapshot_file}
LOG.debug(msg)
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# Need to merge snapshot_file into its backing file
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted) |
# exist) | |
base_file = snapshot_path_img_info.backing_file
self._qemu_img_commit(snapshot_path)
self._execute('rm', '-f', snapshot_path, run_as_root=True)
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
del(snap_info[snapshot['id']])
# Active file has changed
snap_info['active'] = base_file
self._write_info_file(info_path, snap_info)
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
#(guaranteed to | (being deleted)|(guaranteed to | (may exist,
# exist, not | | exist, being |needs ptr update
# used here) | | committed down)| if so)
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.GlusterfsException(msg)
snap_info = self._read_info_file(info_path)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.GlusterfsException(msg)
# Is there a file depending on higher_file?
highest_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
higher_file),
None)
if highest_file is None:
msg = 'No file depends on %s.' % higher_file
LOG.debug(msg)
# Committing higher_file into snapshot_file
# And update pointer in highest_file
higher_file_path = '%s/%s' % (vol_path, higher_file)
self._qemu_img_commit(higher_file_path)
if highest_file is not None:
highest_file_path = '%s/%s' % (vol_path, highest_file)
info = self._qemu_img_info(snapshot_path)
snapshot_file_fmt = info.file_format
backing_fmt = ('-F', snapshot_file_fmt)
self._execute('qemu-img', 'rebase', '-u',
'-b', snapshot_file,
highest_file_path, *backing_fmt,
run_as_root=True)
self._execute('rm', '-f', higher_file_path, run_as_root=True)
# Remove snapshot_file from info
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
del(snap_info[snapshot['id']])
snap_info[higher_id] = snapshot_file
if higher_file == active_file:
if highest_file is not None:
msg = _('Check condition failed: '
'%s expected to be None.') % 'highest_file'
raise exception.GlusterfsException(msg)
# Active file has changed
snap_info['active'] = snapshot_file
self._write_info_file(info_path, snap_info)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception as e:
LOG.error(_('Call to Nova delete snapshot failed'))
LOG.exception(e)
raise e
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
msg = ('status of snapshot %s is '
'still "deleting"... waiting') % snapshot['id']
LOG.debug(msg)
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.GlusterfsException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.GlusterfsException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if snapshot['id'] in snap_info:
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_('Deleting stale snapshot: %s') % snapshot['id'])
fileutils.delete_if_exists(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path)
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path)
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _qemu_img_commit(self, path):
return self._execute('qemu-img', 'commit', path, run_as_root=True)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume):
"""Exports the volume."""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def validate_connector(self, connector):
pass
@utils.synchronized('glusterfs', external=False)
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
# Find active qcow2 file
active_file = self.get_active_image_from_info(volume)
path = '%s/%s/%s' % (self.configuration.glusterfs_mount_point_base,
self._get_hash_str(volume['provider_location']),
active_file)
data = {'export': volume['provider_location'],
'name': active_file}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
# Test file for raw vs. qcow2 format
info = self._qemu_img_info(path)
data['format'] = info.file_format
if data['format'] not in ['raw', 'qcow2']:
msg = _('%s must be a valid raw or qcow2 image.') % path
raise exception.InvalidVolume(msg)
return {
'driver_volume_type': 'glusterfs',
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
@utils.synchronized('glusterfs', external=False)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = '%s/%s' % (self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path)
backing_file = info.backing_file
if backing_file:
snapshots_exist = True
else:
snapshots_exist = False
root_file_fmt = info.file_format
temp_path = None
try:
if snapshots_exist or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
temp_path = '%s/%s.temp_image.%s' % (
self._local_volume_dir(volume),
volume['id'],
image_meta['id'])
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
finally:
if temp_path is not None:
self._execute('rm', '-f', temp_path)
@utils.synchronized('glusterfs', external=False)
def extend_volume(self, volume, size_gb):
volume_path = self.local_path(volume)
volume_filename = os.path.basename(volume_path)
# Ensure no snapshots exist for the volume
active_image = self.get_active_image_from_info(volume)
if volume_filename != active_image:
msg = _('Extend volume is only supported for this'
' driver when no snapshots exist.')
raise exception.InvalidVolume(msg)
info = self._qemu_img_info(volume_path)
backing_fmt = info.file_format
if backing_fmt not in ['raw', 'qcow2']:
msg = _('Unrecognized backing format: %s')
raise exception.InvalidVolume(msg % backing_fmt)
# qemu-img can resize both raw and qcow2 files
image_utils.resize_image(volume_path, size_gb)
def _do_create_volume(self, volume):
"""Create a volume on given glusterfs_share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
LOG.debug("creating new volume at %s" % volume_path)
if os.path.exists(volume_path):
msg = _('file already exists at %s') % volume_path
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
if self.configuration.glusterfs_qcow2_volumes:
self._create_qcow2_file(volume_path, volume_size)
else:
if self.configuration.glusterfs_sparsed_volumes:
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions_for_all(volume_path)
def _ensure_shares_mounted(self):
"""Mount all configured GlusterFS shares."""
self._mounted_shares = []
self._load_shares_config(self.configuration.glusterfs_shares_config)
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
self._mounted_shares.append(share)
except Exception as exc:
LOG.error(_('Exception during mounting %s') % (exc,))
LOG.debug('Available shares: %s' % self._mounted_shares)
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: GlusterfsException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('GlusterFS share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.GlusterfsException(msg)
def _ensure_share_mounted(self, glusterfs_share):
"""Mount GlusterFS share.
:param glusterfs_share: string
"""
mount_path = self._get_mount_point_for_share(glusterfs_share)
self._mount_glusterfs(glusterfs_share, mount_path, ensure=True)
# Ensure we can write to this share
group_id = os.getegid()
current_group_id = utils.get_file_gid(mount_path)
current_mode = utils.get_file_mode(mount_path)
if group_id != current_group_id:
cmd = ['chgrp', group_id, mount_path]
self._execute(*cmd, run_as_root=True)
if not (current_mode & stat.S_IWGRP):
cmd = ['chmod', 'g+w', mount_path]
self._execute(*cmd, run_as_root=True)
self._ensure_share_writable(mount_path)
def _find_share(self, volume_size_for):
"""Choose GlusterFS share among available ones for given volume size.
Current implementation looks for greatest capacity.
:param volume_size_for: int size in GB
"""
if not self._mounted_shares:
raise exception.GlusterfsNoSharesMounted()
greatest_size = 0
greatest_share = None
for glusterfs_share in self._mounted_shares:
capacity = self._get_available_capacity(glusterfs_share)[0]
if capacity > greatest_size:
greatest_share = glusterfs_share
greatest_size = capacity
if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str
(in a hex format).
"""
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, glusterfs_share):
"""Return mount point for share.
:param glusterfs_share: example 172.18.194.100:/var/glusterfs
"""
return self._remotefsclient.get_mount_point(glusterfs_share)
def _get_available_capacity(self, glusterfs_share):
"""Calculate available space on the GlusterFS share.
:param glusterfs_share: example 172.18.194.100:/var/glusterfs
"""
mount_point = self._get_mount_point_for_share(glusterfs_share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point, run_as_root=True)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, glusterfs_share):
available, size = self._get_available_capacity(glusterfs_share)
return size, available, size - available
def _mount_glusterfs(self, glusterfs_share, mount_path, ensure=False):
"""Mount GlusterFS share to mount path."""
self._execute('mkdir', '-p', mount_path)
command = ['mount', '-t', 'glusterfs', glusterfs_share,
mount_path]
if self.shares.get(glusterfs_share) is not None:
command.extend(self.shares[glusterfs_share].split())
self._do_mount(command, ensure, glusterfs_share)
def _get_mount_point_base(self):
return self.base
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume.
Allow a backup to occur only if no snapshots exist.
Check both Cinder and the file on-disk. The latter is only
a safety mechanism to prevent further damage if the snapshot
information is already inconsistent.
"""
snapshots = self.db.snapshot_get_all_for_volume(context,
backup['volume_id'])
snap_error_msg = _('Backup is not supported for GlusterFS '
'volumes with snapshots.')
if len(snapshots) > 0:
raise exception.InvalidVolume(snap_error_msg)
volume = self.db.volume_get(context, backup['volume_id'])
volume_dir = self._local_volume_dir(volume)
active_file_path = os.path.join(
volume_dir,
self.get_active_image_from_info(volume))
info = self._qemu_img_info(active_file_path)
if info.backing_file is not None:
msg = _('No snapshots found in database, but '
'%(path)s has backing file '
'%(backing_file)s!') % {'path': active_file_path,
'backing_file': info.backing_file}
LOG.error(msg)
raise exception.InvalidVolume(snap_error_msg)
if info.file_format != 'raw':
msg = _('Backup is only supported for raw-formatted '
'GlusterFS volumes.')
raise exception.InvalidVolume(msg)
return super(GlusterfsDriver, self).backup_volume(
context, backup, backup_service)
|
|
import base64
from datetime import timedelta
import os
import shutil
import string
import tempfile
import unittest
import warnings
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import get_cache
from django.core import management
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings, patch_logger
from django.utils import six
from django.utils import timezone
from django.contrib.sessions.exceptions import InvalidSessionKey
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_failure_logged_to_security(self):
bad_encode = base64.b64encode(b'flaskdj:alkdjf')
with patch_logger('django.security.SuspiciousSession', 'warning') as calls:
self.assertEqual({}, self.session.decode(bad_encode))
# check that the failed decode is logged
self.assertEqual(len(calls), 1)
self.assertTrue('corrupted' in calls[0])
def test_actual_expiry(self):
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# This key should be refused and a new session should be created
self.assertTrue(self.backend("a\\b\\c").load())
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal.
# This is tested directly on _key_to_file, as load() will swallow
# a SuspiciousOperation in the same way as an IOError - by creating
# a new session, making it unclear whether the slashes were detected.
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a\\b\\c")
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(InvalidSessionKey,
self.backend()._key_to_file, "a/b/c")
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(get_cache('default').get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
# Re-initalize the session backend to make use of overridden settings.
self.session = self.backend()
self.session.save()
self.assertEqual(get_cache('default').get(self.session.cache_key), None)
self.assertNotEqual(get_cache('sessions').get(self.session.cache_key), None)
class SessionMiddlewareTests(unittest.TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
class CookieSessionTests(SessionTestsMixin, TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
|
|
#!/usr/bin/env python
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, vstack, Column
from astropy.time import Time
import healpy as hp
from dlnpyutils import utils as dln, coords, bindata
import subprocess
import time
from argparse import ArgumentParser
import socket
from dustmaps.sfd import SFDQuery
from astropy.coordinates import SkyCoord
from sklearn.cluster import DBSCAN
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
import sqlite3
import gc
import psutil
def writecat2db(cat,dbfile):
""" Write a catalog to the database """
ncat = dln.size(cat)
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
#db = sqlite3.connect('test.db')
#db.text_factory = lambda x: str(x, 'latin1')
#db.row_factory = sqlite3.Row
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="meas"').fetchall()) < 1:
c.execute('''CREATE TABLE meas(measid TEXT, objlabel INTEGER, exposure TEXT, ccdnum INTEGER, filter TEXT, mjd REAL,
ra REAL, raerr REAL, dec REAL, decerr REAL, mag_auto REAL, magerr_auto REAL, asemi REAL, asemierr REAL,
bsemi REAL, bsemierr REAL, theta REAL, thetaerr REAL, fwhm REAL, flags INTEGER, class_star REAL)''')
data = list(zip(cat['measid'],np.zeros(ncat,int)-1,cat['exposure'],cat['ccdnum'],cat['filter'],cat['mjd'],cat['ra'],
cat['raerr'],cat['dec'],cat['decerr'],cat['mag_auto'],cat['magerr_auto'],cat['asemi'],cat['asemierr'],
cat['bsemi'],cat['bsemierr'],cat['theta'],cat['thetaerr'],cat['fwhm'],cat['flags'],cat['class_star']))
c.executemany('''INSERT INTO meas(measid,objlabel,exposure,ccdnum,filter,mjd,ra,raerr,dec,decerr,mag_auto,magerr_auto,
asemi,asemierr,bsemi,bsemierr,theta,thetaerr,fwhm,flags,class_star)
VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', data)
db.commit()
db.close()
def getdbcoords(dbfile):
""" Get the coordinates and ROWID from the database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT rowid,ra,dec FROM meas''')
data = c.fetchall()
db.close()
# Convert to nump structured array
dtype = np.dtype([('ROWID',int),('RA',np.float64),('DEC',np.float64)])
cat = np.zeros(len(data),dtype=dtype)
cat[...] = data
del data
return cat
def createindexdb(dbfile,col='measid',table='meas',unique=True):
""" Index a column in the database """
t0 = time.time()
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
index_name = 'idx_'+col+'_'+table
# Check if the index exists first
c.execute('select name from sqlite_master')
d = c.fetchall()
for nn in d:
if nn[0]==index_name:
print(index_name+' already exists')
return
# Create the index
print('Indexing '+col)
if unique:
c.execute('CREATE UNIQUE INDEX '+index_name+' ON '+table+'('+col+')')
else:
c.execute('CREATE INDEX '+index_name+' ON '+table+'('+col+')')
data = c.fetchall()
db.close()
print('indexing done after '+str(time.time()-t0)+' sec')
def insertobjlabelsdb(rowid,labels,dbfile):
""" Insert objectlabel values into the database """
print('Inserting object labels')
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(labels,rowid))
c.executemany('''UPDATE meas SET objlabel=? WHERE rowid=?''', data)
db.commit()
db.close()
print('inserting done after '+str(time.time()-t0)+' sec')
def updatecoldb(selcolname,selcoldata,updcolname,updcoldata,table,dbfile):
""" Update column in database """
print('Updating '+updcolname+' column in '+table+' table using '+selcolname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(updcoldata,selcoldata))
c.executemany('''UPDATE '''+table+''' SET '''+updcolname+'''=? WHERE '''+selcolname+'''=?''', data)
db.commit()
db.close()
print('updating done after '+str(time.time()-t0)+' sec')
def deleterowsdb(colname,coldata,table,dbfile):
""" Delete rows from the database using rowid"""
print('Deleting rows from '+table+' table using '+colname)
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
data = list(zip(coldata))
c.executemany('''DELETE from '''+table+''' WHERE '''+colname+'''=?''', data)
db.commit()
db.close()
print('deleting done after '+str(time.time()-t0)+' sec')
def writeidstr2db(cat,dbfile):
""" Insert IDSTR database values """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
# Create the table
# the primary key ROWID is automatically generated
if len(c.execute('SELECT name from sqlite_master where type= "table" and name="idstr"').fetchall()) < 1:
c.execute('''CREATE TABLE idstr(measid TEXT, exposure TEXT, objectid TEXT, objectindex INTEGER)''')
data = list(zip(cat['measid'],cat['exposure'],cat['objectid'],cat['objectindex']))
c.executemany('''INSERT INTO idstr(measid,exposure,objectid,objectindex)
VALUES(?,?,?,?)''', data)
db.commit()
db.close()
#print('inserting done after '+str(time.time()-t0)+' sec')
def querydb(dbfile,table='meas',cols='rowid,*',where=None):
""" Query database table """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
if where is not None: cmd += ' WHERE '+where
cur.execute(cmd)
data = cur.fetchall()
db.close()
# Return results
return data
def executedb(dbfile,cmd):
""" Execute a database command """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cur.execute(cmd)
data = cur.fetchall()
db.close()
return data
def getdatadb(dbfile,table='meas',cols='rowid,*',objlabel=None,rar=None,decr=None,verbose=False):
""" Get measurements for an object(s) from the database """
t0 = time.time()
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
cur = db.cursor()
cmd = 'SELECT '+cols+' FROM '+table
# OBJLABEL constraints
if objlabel is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
if dln.size(objlabel)==2:
cmd += 'objlabel>='+str(objlabel[0])+' AND objlabel<='+str(objlabel[1])
else:
cmd += 'objlabel='+str(objlabel)
# RA constraints
if rar is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'ra>='+str(rar[0])+' AND ra<'+str(rar[1])
# DEC constraints
if decr is not None:
if cmd.find('WHERE') == -1:
cmd += ' WHERE '
else:
cmd += ' AND '
cmd += 'dec>='+str(decr[0])+' AND dec<'+str(decr[1])
# Execute the select command
#print('CMD = '+cmd)
cur.execute(cmd)
data = cur.fetchall()
db.close()
# No results
if len(data)==0:
return np.array([])
# Convert to numpy structured array
dtype_hicat = np.dtype([('ROWID',int),('MEASID',np.str,30),('OBJLABEL',int),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
cat = np.zeros(len(data),dtype=dtype_hicat)
cat[...] = data
del data
if verbose: print('got data in '+str(time.time()-t0)+' sec.')
return cat
def getradecrangedb(dbfile):
""" Get RA/DEC ranges from database """
sqlite3.register_adapter(np.int16, int)
sqlite3.register_adapter(np.int64, int)
sqlite3.register_adapter(np.float64, float)
sqlite3.register_adapter(np.float32, float)
db = sqlite3.connect(dbfile, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
c = db.cursor()
c.execute('''SELECT MIN(ra),MAX(ra),MIN(dec),MAX(dec) FROM meas''')
data = c.fetchall()
db.close()
return data[0]
def add_elements(cat,nnew=300000):
""" Add more elements to a catalog"""
ncat = len(cat)
old = cat.copy()
nnew = dln.gt(nnew,ncat)
cat = np.zeros(ncat+nnew,dtype=old.dtype)
cat[0:ncat] = old
del old
return cat
# Combine data for one NSC healpix region
if __name__ == "__main__":
parser = ArgumentParser(description='Combine NSC data for one healpix region.')
parser.add_argument('pix', type=str, nargs=1, help='HEALPix pixel number')
parser.add_argument('version', type=str, nargs=1, help='Version number')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
radeg = np.float64(180.00) / np.pi
# Inputs
pix = int(args.pix[0])
version = args.version[0]
# on thing/hulk use
if (host == "thing") or (host == "hulk"):
dir = "/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/mss1/"
localdir = "/d0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
# on gp09 use
if (host == "gp09") or (host == "gp08") or (host == "gp07") or (host == "gp06") or (host == "gp05"):
dir = "/net/dl1/users/dnidever/nsc/instcal/"+version+"/"
mssdir = "/net/mss1/"
localdir = "/data0/"
tmproot = localdir+"dnidever/nsc/instcal/"+version+"/tmp/"
t0 = time.time()
outdir=dir+'combine/'
subdir = str(int(pix)//1000) # use the thousands to create subdirectory grouping
# IDSTR schema
dtype_idstr = np.dtype([('measid',np.str,200),('exposure',np.str,200),('objectid',np.str,200),('objectindex',int)])
# OBJ schema
dtype_obj = np.dtype([('objectid',np.str,100),('pix',int),('ra',np.float64),('dec',np.float64),('raerr',np.float32),('decerr',np.float32),
('pmra',np.float32),('pmdec',np.float32),('pmraerr',np.float32),('pmdecerr',np.float32),('mjd',np.float64),
('deltamjd',np.float32),('ndet',np.int16),('nphot',np.int16),
('ndetu',np.int16),('nphotu',np.int16),('umag',np.float32),('urms',np.float32),('uerr',np.float32),
('uasemi',np.float32),('ubsemi',np.float32),('utheta',np.float32),
('ndetg',np.int16),('nphotg',np.int16),('gmag',np.float32),('grms',np.float32),('gerr',np.float32),
('gasemi',np.float32),('gbsemi',np.float32),('gtheta',np.float32),
('ndetr',np.int16),('nphotr',np.int16),('rmag',np.float32),('rrms',np.float32),('rerr',np.float32),
('rasemi',np.float32),('rbsemi',np.float32),('rtheta',np.float32),
('ndeti',np.int16),('nphoti',np.int16),('imag',np.float32),('irms',np.float32),('ierr',np.float32),
('iasemi',np.float32),('ibsemi',np.float32),('itheta',np.float32),
('ndetz',np.int16),('nphotz',np.int16),('zmag',np.float32),('zrms',np.float32),('zerr',np.float32),
('zasemi',np.float32),('zbsemi',np.float32),('ztheta',np.float32),
('ndety',np.int16),('nphoty',np.int16),('ymag',np.float32),('yrms',np.float32),('yerr',np.float32),
('yasemi',np.float32),('ybsemi',np.float32),('ytheta',np.float32),
('ndetvr',np.int16),('nphotvr',np.int16),('vrmag',np.float32),('vrrms',np.float32),('vrerr',np.float32),
('vrasemi',np.float32),('vrbsemi',np.float32),('vrtheta',np.float32),
('asemi',np.float32),('asemierr',np.float32),('bsemi',np.float32),('bsemierr',np.float32),
('theta',np.float32),('thetaerr',np.float32),('fwhm',np.float32),('flags',np.int16),('class_star',np.float32),
('ebv',np.float32),('rmsvar',np.float32),('madvar',np.float32),('iqrvar',np.float32),('etavar',np.float32),
('jvar',np.float32),('kvar',np.float32),('chivar',np.float32),('romsvar',np.float32),
('variable10sig',np.int16),('nsigvar',np.float32)])
dbfile = tmproot+str(pix)+'_combine.db'
# IDSTR database file
dbfile_idstr = outdir+'/'+subdir+'/'+str(pix)+'_idstr.db'
usedb = True
# Load the object structured array
obj = fits.getdata(outdir+'/'+subdir+'/'+str(pix)+'.fits.gz',2)
nobj = len(obj)
# Initialize the OBJ structured array
#obj = np.zeros(nobj,dtype=dtype_obj)
#obj['objectid'] = dln.strjoin( str(pix)+'.', ((np.arange(nobj)+1).astype(np.str)) )
#obj['pix'] = pix
## all bad to start
#for f in ['pmra','pmraerr','pmdec','pmdecerr','asemi','bsemi','theta','asemierr',
# 'bsemierr','thetaerr','fwhm','class_star','rmsvar','madvar','iqrvar',
# 'etavar','jvar','kvar','chivar','romsvar']: obj[f]=np.nan
#for f in ['u','g','r','i','z','y','vr']:
# obj[f+'mag'] = 99.99
# obj[f+'err'] = 9.99
# obj[f+'rms'] = np.nan
# obj[f+'asemi'] = np.nan
# obj[f+'bsemi'] = np.nan
# obj[f+'theta'] = np.nan
#obj['variable10sig'] = 0
#obj['nsigvar'] = np.nan
#idstr = np.zeros(ncat,dtype=dtype_idstr)
## Higher precision catalog
#dtype_hicat = np.dtype([('MEASID',np.str,30),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
# ('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
# ('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
# ('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
## Convert to nump structured array
#dtype_hicatdb = np.dtype([('MEASID',np.str,30),('OBJLABEL',int),('EXPOSURE',np.str,40),('CCDNUM',int),('FILTER',np.str,3),
# ('MJD',float),('RA',float),('RAERR',float),('DEC',float),('DECERR',float),
# ('MAG_AUTO',float),('MAGERR_AUTO',float),('ASEMI',float),('ASEMIERR',float),('BSEMI',float),('BSEMIERR',float),
# ('THETA',float),('THETAERR',float),('FWHM',float),('FLAGS',int),('CLASS_STAR',float)])
dtype_sumstr = np.dtype([('objectid',np.str,100),('maxdist',float)])
sumstr = np.zeros(nobj,dtype=dtype_sumstr)
t1 = time.time()
import pdb; pdb.set_trace()
# Loop over the objects
meascount = 0
ngroup = -1
grpcount = 0
maxmeasload = 50000
ngrpcat = 0
ncat1 = 0
fidmag = np.zeros(nobj,float)+np.nan # fiducial magnitude
for i,objid in enumerate(obj['objectid']):
if (i % 1000)==0: print(i)
# Get meas data for this object
if usedb is False:
oindx = np.arange(objstr['LO'][i],objstr['HI'][i]+1) # this fails if start,stop are the same
if objstr['NMEAS'][i]==1: oindx=np.atleast_1d(objstr['LO'][i])
ncat1 = dln.size(oindx)
cat1_orig = cat[oindx]
# Upgrade precisions of catalog
cat1 = np.zeros(ncat1,dtype=dtype_hicat)
cat1[...] = cat1_orig # stuff in the data
#for n in dtype_hicat.names: cat1[n] = cat1_orig[n]
del cat1_orig
# Get from the database
else:
# Get next group of object measurements
if grpcount>=ngroup:
# Use maxmeasload to figure out how many objects we can load
if i==0:
ngroup = np.max(np.where(meascumcount[i:]<=maxmeasload)[0])+1
else:
ngroup = np.max(np.where((meascumcount[i:]-meascumcount[i-1])<=maxmeasload)[0])+1
ngroup = np.max([1,ngroup]) # need to load at least 1
objid0 = objid
objid1 = obj['objectid'][np.min([i+ngroup-1,nobj-1])]
#lab1 = labelindex['value'][np.min([i+ngroup-1,nobj-1])]
if ngrpcat>0: del grpcat
if ncat1>0: del cat1
grpcat = getdatadb(dbfile,objectid=[objid0,objid1])
ngrpcat = dln.size(grpcat)
grpindex = dln.create_index(grpcat['OBJLABEL'])
#ngroup = len(grpindex['value'])
grpcount = 0
# Get the measurement data for this object
gindx = grpindex['index'][grpindex['lo'][grpcount]:grpindex['hi'][grpcount]+1]
cat1 = np.atleast_1d(grpcat[gindx])
ncat1 = len(cat1)
grpcount += 1
oindx = np.arange(ncat1)+meascount
meascount += ncat1
import pdb; pdb.set_trace()
# Compute maximum spherical distance of the measurements from the mean value
sumstr['objectid'][i] = obj['objectid'][i]
sumstr['maxdist'][i] = np.max(dist)
#obj['ndet'][i] = ncat1
# Write the output file
outfile = outdir+'/'+subdir+'/'+str(pix)+'_summary.fits'
print('Writing combined catalog to '+outfile)
Table(sumstr).write(outfile) # first, summary table
dt = time.time()-t0
print('dt = '+str(dt)+' sec.')
|
|
# (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from __future__ import absolute_import, division, print_function, unicode_literals
from logging import getLogger
import os
from os.path import abspath, basename, exists, isdir
from . import common
from .common import check_non_admin
from .. import CondaError
from .._vendor.auxlib.ish import dals
from ..base.constants import ROOT_ENV_NAME
from ..base.context import context, locate_prefix_by_name
from ..common.compat import on_win, text_type
from ..core.index import calculate_channel_urls, get_index
from ..core.prefix_data import PrefixData
from ..core.solve import Solver
from ..exceptions import (CondaExitZero, CondaImportError, CondaOSError, CondaSystemExit,
CondaValueError, DirectoryNotFoundError, DryRunExit,
EnvironmentLocationNotFound,
PackageNotInstalledError, PackagesNotFoundError, TooManyArgumentsError,
UnsatisfiableError)
from ..misc import clone_env, explicit, touch_nonadmin
from ..models.match_spec import MatchSpec
from ..plan import (revert_actions)
from ..resolve import ResolvePackageNotFound
log = getLogger(__name__)
stderrlog = getLogger('conda.stderr')
def check_prefix(prefix, json=False):
name = basename(prefix)
error = None
if name == ROOT_ENV_NAME:
error = "'%s' is a reserved environment name" % name
if exists(prefix):
if isdir(prefix) and 'conda-meta' not in os.listdir(prefix):
return None
error = "prefix already exists: %s" % prefix
if error:
raise CondaValueError(error, json)
if ' ' in prefix:
stderrlog.warn("WARNING: A space was detected in your requested environment path\n"
"'%s'\n"
"Spaces in paths can sometimes be problematic." % prefix)
def clone(src_arg, dst_prefix, json=False, quiet=False, index_args=None):
if os.sep in src_arg:
src_prefix = abspath(src_arg)
if not isdir(src_prefix):
raise DirectoryNotFoundError(src_arg)
else:
assert context._argparse_args.clone is not None
src_prefix = locate_prefix_by_name(context._argparse_args.clone)
if not json:
print("Source: %s" % src_prefix)
print("Destination: %s" % dst_prefix)
actions, untracked_files = clone_env(src_prefix, dst_prefix,
verbose=not json,
quiet=quiet,
index_args=index_args)
if json:
common.stdout_json_success(
actions=actions,
untracked_files=list(untracked_files),
src_prefix=src_prefix,
dst_prefix=dst_prefix
)
def print_activate(env_name_or_prefix): # pragma: no cover
if not context.quiet and not context.json:
if 'CONDA_SHLVL' in os.environ or os.path.split(os.environ.get('SHELL', ''))[-1] == 'fish':
message = dals("""
#
# To activate this environment, use
#
# $ conda activate %s
#
# To deactivate an active environment, use
#
# $ conda deactivate
""") % env_name_or_prefix
elif on_win:
message = dals("""
#
# To activate this environment, use:
# > activate %s
#
# To deactivate an active environment, use:
# > deactivate
#
# * for power-users using bash, you must source
#
""") % env_name_or_prefix
else:
message = dals("""
#
# To activate this environment, use:
# > source activate %s
#
# To deactivate an active environment, use:
# > source deactivate
#
""") % env_name_or_prefix
print(message) # TODO: use logger
def get_revision(arg, json=False):
try:
return int(arg)
except ValueError:
raise CondaValueError("expected revision number, not: '%s'" % arg, json)
def install(args, parser, command='install'):
"""
conda install, conda update, and conda create
"""
context.validate_configuration()
check_non_admin()
newenv = bool(command == 'create')
isupdate = bool(command == 'update')
isinstall = bool(command == 'install')
if newenv:
common.ensure_name_or_prefix(args, command)
prefix = context.target_prefix
if newenv:
check_prefix(prefix, json=context.json)
if context.force_32bit and prefix == context.root_prefix:
raise CondaValueError("cannot use CONDA_FORCE_32BIT=1 in base env")
if isupdate and not (args.file or args.all or args.packages):
raise CondaValueError("""no package names supplied
# If you want to update to a newer version of Anaconda, type:
#
# $ conda update --prefix %s anaconda
""" % prefix)
args_packages = [s.strip('"\'') for s in args.packages]
if newenv and not args.no_default_packages:
# Override defaults if they are specified at the command line
# TODO: rework in 4.4 branch using MatchSpec
args_packages_names = [pkg.replace(' ', '=').split('=', 1)[0] for pkg in args_packages]
for default_pkg in context.create_default_packages:
default_pkg_name = default_pkg.replace(' ', '=').split('=', 1)[0]
if default_pkg_name not in args_packages_names:
args_packages.append(default_pkg)
index_args = {
'use_cache': args.use_index_cache,
'channel_urls': context.channels,
'unknown': args.unknown,
'prepend': not args.override_channels,
'use_local': args.use_local
}
num_cp = sum(s.endswith('.tar.bz2') for s in args_packages)
if num_cp:
if num_cp == len(args_packages):
explicit(args_packages, prefix, verbose=not context.quiet)
return
else:
raise CondaValueError("cannot mix specifications with conda package"
" filenames")
specs = []
if args.file:
for fpath in args.file:
specs.extend(common.specs_from_url(fpath, json=context.json))
if '@EXPLICIT' in specs:
explicit(specs, prefix, verbose=not context.quiet, index_args=index_args)
return
specs.extend(common.specs_from_args(args_packages, json=context.json))
if isinstall and args.revision:
get_revision(args.revision, json=context.json)
elif isinstall and not (args.file or args_packages):
raise CondaValueError("too few arguments, "
"must supply command line package specs or --file")
# for 'conda update', make sure the requested specs actually exist in the prefix
# and that they are name-only specs
if isupdate and not args.all:
prefix_data = PrefixData(prefix)
for spec in specs:
spec = MatchSpec(spec)
if not spec.is_name_only_spec:
raise CondaError("Invalid spec for 'conda update': %s\n"
"Use 'conda install' instead." % spec)
if not prefix_data.get(spec.name, None):
raise PackageNotInstalledError(prefix, spec.name)
if newenv and args.clone:
if args.packages:
raise TooManyArgumentsError(0, len(args.packages), list(args.packages),
'did not expect any arguments for --clone')
clone(args.clone, prefix, json=context.json, quiet=context.quiet, index_args=index_args)
touch_nonadmin(prefix)
print_activate(args.name if args.name else prefix)
return
if not isdir(prefix) and not newenv:
if args.mkdir:
try:
os.makedirs(prefix)
except OSError:
raise CondaOSError("Error: could not create directory: %s" % prefix)
else:
raise EnvironmentLocationNotFound(prefix)
try:
if isinstall and args.revision:
index = get_index(channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'], platform=None,
use_local=index_args['use_local'], use_cache=index_args['use_cache'],
unknown=index_args['unknown'], prefix=prefix)
unlink_link_transaction = revert_actions(prefix, get_revision(args.revision), index)
else:
solver = Solver(prefix, context.channels, context.subdirs, specs_to_add=specs)
unlink_link_transaction = solver.solve_for_transaction(
force_reinstall=context.force,
)
except ResolvePackageNotFound as e:
channels_urls = tuple(calculate_channel_urls(
channel_urls=index_args['channel_urls'],
prepend=index_args['prepend'],
platform=None,
use_local=index_args['use_local'],
))
raise PackagesNotFoundError(e.bad_deps, channels_urls)
except (UnsatisfiableError, SystemExit) as e:
# Unsatisfiable package specifications/no such revision/import error
if e.args and 'could not import' in e.args[0]:
raise CondaImportError(text_type(e))
raise
handle_txn(unlink_link_transaction, prefix, args, newenv)
def handle_txn(unlink_link_transaction, prefix, args, newenv, remove_op=False):
if unlink_link_transaction.nothing_to_do:
if remove_op:
# No packages found to remove from environment
raise PackagesNotFoundError(args.package_names)
elif not newenv:
if context.json:
common.stdout_json_success(message='All requested packages already installed.')
else:
print('\n# All requested packages already installed.\n')
return
if not context.json:
unlink_link_transaction.print_transaction_summary()
common.confirm_yn()
elif context.dry_run:
actions = unlink_link_transaction._make_legacy_action_groups()[0]
common.stdout_json_success(prefix=prefix, actions=actions, dry_run=True)
raise DryRunExit()
try:
unlink_link_transaction.download_and_extract()
if context.download_only:
raise CondaExitZero('Package caches prepared. UnlinkLinkTransaction cancelled with '
'--download-only option.')
unlink_link_transaction.execute()
except SystemExit as e:
raise CondaSystemExit('Exiting', e)
if newenv:
touch_nonadmin(prefix)
print_activate(args.name if args.name else prefix)
if context.json:
actions = unlink_link_transaction._make_legacy_action_groups()[0]
common.stdout_json_success(prefix=prefix, actions=actions)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.servicedirectory_v1beta1.types import lookup_service
from .base import LookupServiceTransport, DEFAULT_CLIENT_INFO
class LookupServiceGrpcTransport(LookupServiceTransport):
"""gRPC backend transport for LookupService.
Service Directory API for looking up service data at runtime.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def resolve_service(
self,
) -> Callable[
[lookup_service.ResolveServiceRequest], lookup_service.ResolveServiceResponse
]:
r"""Return a callable for the resolve service method over gRPC.
Returns a
[service][google.cloud.servicedirectory.v1beta1.Service] and its
associated endpoints. Resolving a service is not considered an
active developer method.
Returns:
Callable[[~.ResolveServiceRequest],
~.ResolveServiceResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "resolve_service" not in self._stubs:
self._stubs["resolve_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.LookupService/ResolveService",
request_serializer=lookup_service.ResolveServiceRequest.serialize,
response_deserializer=lookup_service.ResolveServiceResponse.deserialize,
)
return self._stubs["resolve_service"]
def close(self):
self.grpc_channel.close()
__all__ = ("LookupServiceGrpcTransport",)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_05_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_05_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
|
import sys
import time
from django.conf import settings
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.functional import cached_property
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = settings_dict.copy()
settings_dict.update(USER=user, PASSWORD=password)
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self._maindb_connection.close() # done with main user -- test user and tablespaces created
self._switch_to_test_user(parameters)
return self.connection.settings_dict['NAME']
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
try:
self._execute_statements(cursor, [extra], parameters, verbosity, allow_quiet_fail=True)
except DatabaseError as err:
description = str(err)
if 'ORA-01031' in description:
if verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
else:
raise
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
return self._test_settings_get('PASSWORD', default=PASSWORD)
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
|
|
from typing import List, Union, Text, Optional, Any, Tuple, Dict
import logging
import scipy.sparse
import numpy as np
import tensorflow as tf
from rasa.utils.tensorflow.constants import SEQUENCE, BALANCED
from rasa.utils.tensorflow.model_data import RasaModelData, Data, FeatureArray
logger = logging.getLogger(__name__)
class RasaDataGenerator(tf.keras.utils.Sequence):
"""Abstract data generator."""
def __init__(
self,
model_data: RasaModelData,
batch_size: Union[int, List[int]],
batch_strategy: Text = SEQUENCE,
shuffle: bool = True,
):
"""Initializes the data generator.
Args:
model_data: The model data to use.
batch_size: The batch size(s).
batch_strategy: The batch strategy.
shuffle: If 'True', data should be shuffled.
"""
self.model_data = model_data
self.batch_size = batch_size
self.shuffle = shuffle
self.batch_strategy = batch_strategy
def __len__(self) -> int:
"""Number of batches in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch (tuple of input data and target data).
"""
raise NotImplementedError
def on_epoch_end(self) -> None:
"""Update the data after every epoch."""
raise NotImplementedError
def _shuffle_and_balance(self, batch_size: int) -> Data:
data = self.model_data.data
if self.shuffle:
data = self.model_data.shuffled_data(data)
if self.batch_strategy == BALANCED:
data = self.model_data.balanced_data(data, batch_size, self.shuffle)
# do not override self.model_data.data, because we need original data for
# balancing on the next epoch
return data
@staticmethod
def prepare_batch(
data: Data,
start: Optional[int] = None,
end: Optional[int] = None,
tuple_sizes: Optional[Dict[Text, int]] = None,
) -> Tuple[Optional[np.ndarray], ...]:
"""Slices model data into batch using given start and end value.
Args:
data: The data to prepare.
start: The start index of the batch
end: The end index of the batch
tuple_sizes: In case the feature is not present we propagate the batch with
None. Tuple sizes contains the number of how many None values to add for
what kind of feature.
Returns:
The features of the batch.
"""
batch_data = []
for key, attribute_data in data.items():
for sub_key, f_data in attribute_data.items():
# add None for not present values during processing
if not f_data:
if tuple_sizes:
batch_data += [None] * tuple_sizes[key]
else:
batch_data.append(None)
continue
for v in f_data:
if start is not None and end is not None:
_data = v[start:end]
elif start is not None:
_data = v[start:]
elif end is not None:
_data = v[:end]
else:
_data = v[:]
if _data.is_sparse:
batch_data.extend(
RasaDataGenerator._scipy_matrix_to_values(_data)
)
else:
batch_data.append(RasaDataGenerator._pad_dense_data(_data))
# len of batch_data is equal to the number of keys in model data
return tuple(batch_data)
@staticmethod
def _pad_dense_data(array_of_dense: FeatureArray) -> np.ndarray:
"""Pad data of different lengths.
Sequential data is padded with zeros. Zeros are added to the end of data.
Args:
array_of_dense: The array to pad.
Returns:
The padded array.
"""
if array_of_dense.number_of_dimensions == 4:
return RasaDataGenerator._pad_4d_dense_data(array_of_dense)
if array_of_dense[0].ndim < 2:
# data doesn't contain a sequence
return array_of_dense.astype(np.float32)
data_size = len(array_of_dense)
max_seq_len = max([x.shape[0] for x in array_of_dense])
data_padded = np.zeros(
[data_size, max_seq_len, array_of_dense[0].shape[-1]],
dtype=array_of_dense[0].dtype,
)
for i in range(data_size):
data_padded[i, : array_of_dense[i].shape[0], :] = array_of_dense[i]
return data_padded.astype(np.float32)
@staticmethod
def _pad_4d_dense_data(array_of_array_of_dense: FeatureArray) -> np.ndarray:
# in case of dialogue data we may have 4 dimensions
# batch size x dialogue history length x sequence length x number of features
# as transformers cannot handle 4D tensors pad and reshape the data
# so that the resulting tensor is 3D
# the shape is (sum of dialogue history length for all tensors in the
# batch x max sequence length x number of features)
# the original shape and the original dialogue length is passed on to the model
# it can be used to transform the 3D tensor back into 4D
# in order to create 4d tensor inputs, we created "fake" zero features
# for nonexistent inputs. To save calculation we filter this features before
# input to tf methods.
number_of_features = array_of_array_of_dense[0][0].shape[-1]
array_of_array_of_dense = RasaDataGenerator._filter_out_fake_inputs(
array_of_array_of_dense
)
if not array_of_array_of_dense:
# return empty 3d array with appropriate last dims
return np.zeros((0, 0, number_of_features), dtype=np.float32)
combined_dialogue_len = sum(
len(array_of_dense) for array_of_dense in array_of_array_of_dense
)
max_seq_len = max(
[
x.shape[0]
for array_of_dense in array_of_array_of_dense
for x in array_of_dense
]
)
data_padded = np.zeros(
[combined_dialogue_len, max_seq_len, number_of_features],
dtype=array_of_array_of_dense[0][0].dtype,
)
current_sum_dialogue_len = 0
for i, array_of_dense in enumerate(array_of_array_of_dense):
for j, dense in enumerate(array_of_dense):
data_padded[current_sum_dialogue_len + j, : dense.shape[0], :] = dense
current_sum_dialogue_len += len(array_of_dense)
return data_padded.astype(np.float32)
@staticmethod
def _scipy_matrix_to_values(array_of_sparse: FeatureArray) -> List[np.ndarray]:
"""Convert a scipy matrix into indices, data, and shape.
Args:
array_of_sparse: The sparse data array.
Returns:
A list of dense numpy arrays representing the sparse data.
"""
if array_of_sparse.number_of_dimensions == 4:
return RasaDataGenerator._4d_scipy_matrix_to_values(array_of_sparse)
# we need to make sure that the matrices are coo_matrices otherwise the
# transformation does not work (e.g. you cannot access x.row, x.col)
if not isinstance(array_of_sparse[0], scipy.sparse.coo_matrix):
array_of_sparse = [x.tocoo() for x in array_of_sparse]
max_seq_len = max([x.shape[0] for x in array_of_sparse])
# get the indices of values
indices = np.hstack(
[
np.vstack([i * np.ones_like(x.row), x.row, x.col])
for i, x in enumerate(array_of_sparse)
]
).T
data = np.hstack([x.data for x in array_of_sparse])
number_of_features = array_of_sparse[0].shape[-1]
shape = np.array((len(array_of_sparse), max_seq_len, number_of_features))
return [
indices.astype(np.int64),
data.astype(np.float32),
shape.astype(np.int64),
]
@staticmethod
def _4d_scipy_matrix_to_values(
array_of_array_of_sparse: FeatureArray,
) -> List[np.ndarray]:
# in case of dialogue data we may have 4 dimensions
# batch size x dialogue history length x sequence length x number of features
# transformers cannot handle 4D tensors, therefore pad and reshape the data
# so that the resulting tensor is 3D
# the shape is (sum of dialogue history length for all tensors in the
# batch x max sequence length x number of features)
# the original shape and the original dialogue length is passed on to the model
# it can be used to transform the 3D tensor back into 4D
# in order to create 4d tensor inputs, we created "fake" zero features
# for nonexistent inputs. To save calculation we filter this features before
# input to tf methods.
number_of_features = array_of_array_of_sparse[0][0].shape[-1]
array_of_array_of_sparse = RasaDataGenerator._filter_out_fake_inputs(
array_of_array_of_sparse
)
if not array_of_array_of_sparse:
# create empty array with appropriate last dims
return [
np.empty((0, 3), dtype=np.int64),
np.array([], dtype=np.float32),
np.array([0, 0, number_of_features], dtype=np.int64),
]
# we need to make sure that the matrices are coo_matrices otherwise the
# transformation does not work (e.g. you cannot access x.row, x.col)
if not isinstance(array_of_array_of_sparse[0][0], scipy.sparse.coo_matrix):
array_of_array_of_sparse = [
[x.tocoo() for x in array_of_sparse]
for array_of_sparse in array_of_array_of_sparse
]
dialogue_len = [
len(array_of_sparse) for array_of_sparse in array_of_array_of_sparse
]
combined_dialogue_len = sum(dialogue_len)
max_seq_len = max(
[
x.shape[0]
for array_of_sparse in array_of_array_of_sparse
for x in array_of_sparse
]
)
# get the indices of values
indices = np.hstack(
[
np.vstack(
[sum(dialogue_len[:i]) + j * np.ones_like(x.row), x.row, x.col]
)
for i, array_of_sparse in enumerate(array_of_array_of_sparse)
for j, x in enumerate(array_of_sparse)
]
).T
data = np.hstack(
[
x.data
for array_of_sparse in array_of_array_of_sparse
for x in array_of_sparse
]
)
shape = np.array((combined_dialogue_len, max_seq_len, number_of_features))
return [
indices.astype(np.int64),
data.astype(np.float32),
shape.astype(np.int64),
]
@staticmethod
def _filter_out_fake_inputs(
array_of_array_of_features: FeatureArray,
) -> Union[List[List[np.ndarray]], List[List[scipy.sparse.spmatrix]]]:
return list(
filter(
# filter empty lists created by another filter
lambda x: len(x) > 0,
[
# filter all the "fake" inputs, we know the input is "fake",
# when sequence dimension is `0`
list(filter(lambda x: x.shape[0] > 0, array_of_features))
for array_of_features in array_of_array_of_features
],
)
)
class RasaBatchDataGenerator(RasaDataGenerator):
"""Data generator with an optional increasing batch size."""
def __init__(
self,
model_data: RasaModelData,
batch_size: Union[List[int], int],
epochs: int = 1,
batch_strategy: Text = SEQUENCE,
shuffle: bool = True,
):
"""Initializes the increasing batch size data generator.
Args:
model_data: The model data to use.
batch_size: The batch size.
epochs: The total number of epochs.
batch_strategy: The batch strategy.
shuffle: If 'True', data will be shuffled.
"""
super().__init__(model_data, batch_size, batch_strategy, shuffle)
if isinstance(batch_size, list):
logger.debug(
"The provided batch size is a list, this data generator will use a "
"linear increasing batch size."
)
self._epochs = epochs
# we use `on_epoch_end` method to prepare data for the next epoch
# set current epoch to `-1`, so that `on_epoch_end` will increase it to `0`
self._current_epoch = -1
# actual batch size will be set inside `on_epoch_end`
self._current_batch_size = 0
# create separate data variable that will store modified data for each batch
self._data = {}
self.on_epoch_end()
def __len__(self) -> int:
"""Number of batches in the Sequence.
Returns:
The number of batches in the Sequence.
"""
# data was rebalanced, so need to recalculate number of examples
num_examples = self.model_data.number_of_examples(self._data)
batch_size = self._current_batch_size
return num_examples // batch_size + int(num_examples % batch_size > 0)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch (tuple of input data and target data).
"""
start = index * self._current_batch_size
end = start + self._current_batch_size
# return input and target data, as our target data is inside the input
# data return None for the target data
return self.prepare_batch(self._data, start, end), None
def on_epoch_end(self) -> None:
"""Update the data after every epoch."""
self._current_epoch += 1
self._current_batch_size = self._linearly_increasing_batch_size()
self._data = self._shuffle_and_balance(self._current_batch_size)
def _linearly_increasing_batch_size(self) -> int:
"""Linearly increase batch size with every epoch.
The idea comes from https://arxiv.org/abs/1711.00489.
Returns:
The batch size to use in this epoch.
"""
if not isinstance(self.batch_size, list):
return int(self.batch_size)
if self._epochs > 1:
return int(
self.batch_size[0]
+ self._current_epoch
* (self.batch_size[1] - self.batch_size[0])
/ (self._epochs - 1)
)
else:
return int(self.batch_size[0])
|
|
import pytest
from tests.utils import async
import io
import hashlib
import aiohttpretty
from waterbutler.core import streams
from waterbutler.core import metadata
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.s3 import S3Provider
from waterbutler.providers.s3.metadata import S3FileMetadata
from waterbutler.providers.s3.metadata import S3FolderMetadata
@pytest.fixture
def auth():
return {
'name': 'cat',
'email': 'cat@cat.com',
}
@pytest.fixture
def credentials():
return {
'access_key': 'Dont dead',
'secret_key': 'open inside',
}
@pytest.fixture
def settings():
return {'bucket': 'that kerning'}
@pytest.fixture
def provider(auth, credentials, settings):
return S3Provider(auth, credentials, settings)
@pytest.fixture
def file_content():
return b'sleepy'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def folder_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>my-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>my-third-image.jpg</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"1b2cf535f27731c974343645a3985328"</ETag>
<Size>64994</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<CommonPrefixes>
<Prefix> photos/</Prefix>
</CommonPrefixes>
</ListBucketResult>'''
@pytest.fixture
def just_a_folder_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>naptime/</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
@pytest.fixture
def contents_and_self():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>thisfolder/</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>thisfolder/item1</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>thisfolder/item2</Key>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>0</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
@pytest.fixture
def folder_empty_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<IsTruncated>false</IsTruncated>
</ListBucketResult>'''
@pytest.fixture
def file_metadata():
return {
'Content-Length': 9001,
'Last-Modified': 'SomeTime',
'Content-Type': 'binary/octet-stream',
'ETag': '"fba9dede5f27731c9771645a39863328"'
}
@pytest.fixture
def version_metadata():
return b'''<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name>
<Prefix>my</Prefix>
<KeyMarker/>
<VersionIdMarker/>
<MaxKeys>5</MaxKeys>
<IsTruncated>false</IsTruncated>
<Version>
<Key>my-image.jpg</Key>
<VersionId>3/L4kqtJl40Nr8X8gdRQBpUMLUo</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>"fba9dede5f27731c9771645a39863328"</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<Version>
<Key>my-image.jpg</Key>
<VersionId>QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</VersionId>
<IsLatest>false</IsLatest>
<LastModified>2009-10-10T17:50:30.000Z</LastModified>
<ETag>"9b2cf535f27731c974343645a3985328"</ETag>
<Size>166434</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<Version>
<Key>my-image.jpg</Key>
<VersionId>UIORUnfndfhnw89493jJFJ</VersionId>
<IsLatest>false</IsLatest>
<LastModified>2009-10-11T12:50:30.000Z</LastModified>
<ETag>"772cf535f27731c974343645a3985328"</ETag>
<Size>64</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
</ListVersionsResult>'''
class TestValidatePath:
@async
def test_normal_name(self, provider):
path = yield from provider.validate_path('/this/is/a/path.txt')
assert path.name == 'path.txt'
assert path.parent.name == 'a'
assert path.is_file
assert not path.is_dir
assert not path.is_root
@async
def test_folder(self, provider):
path = yield from provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
@async
def test_root(self, provider):
path = yield from provider.validate_path('/this/is/a/folder/')
assert path.name == 'folder'
assert path.parent.name == 'a'
assert not path.is_file
assert path.is_dir
assert not path.is_root
class TestCRUD:
@async
@pytest.mark.aiohttpretty
def test_download(self, provider):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': 'attachment'})
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = yield from provider.download(path)
content = yield from result.read()
assert content == b'delicious'
@async
@pytest.mark.aiohttpretty
def test_download_version(self, provider):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(
100,
query_parameters={'versionId': 'someversion'},
response_headers={'response-content-disposition': 'attachment'},
)
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = yield from provider.download(path, version='someversion')
content = yield from result.read()
assert content == b'delicious'
@async
@pytest.mark.aiohttpretty
def test_download_display_name(self, provider):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': "attachment; filename*=UTF-8''tuna"})
aiohttpretty.register_uri('GET', url, body=b'delicious', auto_length=True)
result = yield from provider.download(path, displayName='tuna')
content = yield from result.read()
assert content == b'delicious'
@async
@pytest.mark.aiohttpretty
def test_download_not_found(self, provider):
path = WaterButlerPath('/muhtriangle')
url = provider.bucket.new_key(path.path).generate_url(100, response_headers={'response-content-disposition': 'attachment'})
aiohttpretty.register_uri('GET', url, status=404)
with pytest.raises(exceptions.DownloadError):
yield from provider.download(path)
@async
@pytest.mark.aiohttpretty
def test_download_folder_400s(self, provider):
with pytest.raises(exceptions.DownloadError) as e:
yield from provider.download(WaterButlerPath('/cool/folder/mom/'))
assert e.value.code == 400
@async
@pytest.mark.aiohttpretty
def test_upload_update(self, provider, file_content, file_stream, file_metadata):
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', metadata_url, headers=file_metadata)
aiohttpretty.register_uri('PUT', url, status=201, headers={'ETag': '"{}"'.format(content_md5)})
metadata, created = yield from provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert not created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
@async
@pytest.mark.aiohttpretty
def test_delete(self, provider):
path = WaterButlerPath('/some-file')
url = provider.bucket.new_key(path.path).generate_url(100, 'DELETE')
aiohttpretty.register_uri('DELETE', url, status=200)
yield from provider.delete(path)
assert aiohttpretty.has_call(method='DELETE', uri=url)
@async
@pytest.mark.aiohttpretty
def test_accepts_url(self, provider):
path = WaterButlerPath('/my-image')
url = provider.bucket.new_key(path.path).generate_url(100, 'GET', response_headers={'response-content-disposition': 'attachment'})
ret_url = yield from provider.download(path, accept_url=True)
assert ret_url == url
class TestMetadata:
@async
@pytest.mark.aiohttpretty
def test_metadata_folder(self, provider, folder_metadata):
path = WaterButlerPath('/darp/')
url = provider.bucket.generate_url(100)
aiohttpretty.register_uri('GET', url, body=folder_metadata, headers={'Content-Type': 'application/xml'})
result = yield from provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 3
assert result[0].name == ' photos'
assert result[1].name == 'my-image.jpg'
assert result[2].extra['md5'] == '1b2cf535f27731c974343645a3985328'
@async
@pytest.mark.aiohttpretty
def test_metadata_folder_self_listing(self, provider, contents_and_self):
path = WaterButlerPath('/thisfolder/')
url = provider.bucket.generate_url(100)
aiohttpretty.register_uri('GET', url, body=contents_and_self)
result = yield from provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 2
for fobj in result:
assert fobj.name != path.path
@async
@pytest.mark.aiohttpretty
def test_just_a_folder_metadata_folder(self, provider, just_a_folder_metadata):
path = WaterButlerPath('/')
url = provider.bucket.generate_url(100)
aiohttpretty.register_uri('GET', url, body=just_a_folder_metadata, headers={'Content-Type': 'application/xml'})
result = yield from provider.metadata(path)
assert isinstance(result, list)
assert len(result) == 1
assert result[0].kind == 'folder'
# @async
# @pytest.mark.aiohttpretty
# def test_must_have_slash(self, provider, just_a_folder_metadata):
# with pytest.raises(exceptions.InvalidPathError):
# yield from provider.metadata('')
@async
@pytest.mark.aiohttpretty
def test_metadata_file(self, provider, file_metadata):
path = WaterButlerPath('/Foo/Bar/my-image.jpg')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, headers=file_metadata)
result = yield from provider.metadata(path)
assert isinstance(result, metadata.BaseFileMetadata)
assert result.path == str(path)
assert result.name == 'my-image.jpg'
assert result.extra['md5'] == 'fba9dede5f27731c9771645a39863328'
@async
@pytest.mark.aiohttpretty
def test_metadata_file_missing(self, provider):
path = WaterButlerPath('/notfound.txt')
url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri('HEAD', url, status=404)
with pytest.raises(exceptions.MetadataError):
yield from provider.metadata(path)
@async
@pytest.mark.aiohttpretty
def test_upload(self, provider, file_content, file_stream, file_metadata):
path = WaterButlerPath('/foobah')
content_md5 = hashlib.md5(file_content).hexdigest()
url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
metadata_url = provider.bucket.new_key(path.path).generate_url(100, 'HEAD')
aiohttpretty.register_uri(
'HEAD',
metadata_url,
responses=[
{'status': 404},
{'headers': file_metadata},
],
)
aiohttpretty.register_uri('PUT', url, status=200, headers={'ETag': '"{}"'.format(content_md5)}),
metadata, created = yield from provider.upload(file_stream, path)
assert metadata.kind == 'file'
assert created
assert aiohttpretty.has_call(method='PUT', uri=url)
assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
class TestCreateFolder:
@async
@pytest.mark.aiohttpretty
def test_raise_409(self, provider, just_a_folder_metadata):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
aiohttpretty.register_uri('GET', url, body=just_a_folder_metadata, headers={'Content-Type': 'application/xml'})
with pytest.raises(exceptions.FolderNamingConflict) as e:
yield from provider.create_folder(path)
assert e.value.code == 409
assert e.value.message == 'Cannot create folder "alreadyexists" because a file or folder already exists at path "/alreadyexists/"'
@async
@pytest.mark.aiohttpretty
def test_must_start_with_slash(self, provider):
path = WaterButlerPath('/alreadyexists')
with pytest.raises(exceptions.CreateFolderError) as e:
yield from provider.create_folder(path)
assert e.value.code == 400
assert e.value.message == 'Path must be a directory'
@async
@pytest.mark.aiohttpretty
def test_errors_out(self, provider):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
create_url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
aiohttpretty.register_uri('GET', url, status=404)
aiohttpretty.register_uri('PUT', create_url, status=403)
with pytest.raises(exceptions.CreateFolderError) as e:
yield from provider.create_folder(path)
assert e.value.code == 403
@async
@pytest.mark.aiohttpretty
def test_errors_out_metadata(self, provider):
path = WaterButlerPath('/alreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
aiohttpretty.register_uri('GET', url, status=403)
with pytest.raises(exceptions.MetadataError) as e:
yield from provider.create_folder(path)
assert e.value.code == 403
@async
@pytest.mark.aiohttpretty
def test_creates(self, provider):
path = WaterButlerPath('/doesntalreadyexists/')
url = provider.bucket.generate_url(100, 'GET')
create_url = provider.bucket.new_key(path.path).generate_url(100, 'PUT')
aiohttpretty.register_uri('GET', url, status=404)
aiohttpretty.register_uri('PUT', create_url, status=200)
resp = yield from provider.create_folder(path)
assert resp.kind == 'folder'
assert resp.name == 'doesntalreadyexists'
assert resp.path == '/doesntalreadyexists/'
class TestOperations:
# @async
# @pytest.mark.aiohttpretty
# def test_copy(self, provider, file_metadata):
# dest_path = WaterButlerPath('/dest')
# source_path = WaterButlerPath('/source')
# headers = {'x-amz-copy-source': '/{}/{}'.format(provider.settings['bucket'], source_path.path)}
# metadata_url = provider.bucket.new_key(dest_path.path).generate_url(100, 'HEAD')
# url = provider.bucket.new_key(dest_path.path).generate_url(100, 'PUT', headers=headers)
# aiohttpretty.register_uri('PUT', url, status=200)
# aiohttpretty.register_uri('HEAD', metadata_url, headers=file_metadata)
# resp = yield from provider.copy(provider, source_path, dest_path)
# # TODO: matching url content for request
# assert resp['kind'] == 'file'
# assert aiohttpretty.has_call(method='HEAD', uri=metadata_url)
# assert aiohttpretty.has_call(method='PUT', uri=url, headers=headers)
@async
@pytest.mark.aiohttpretty
def test_version_metadata(self, provider, version_metadata):
path = WaterButlerPath('/my-image.jpg')
url = provider.bucket.generate_url(100, 'GET', query_parameters={'versions': ''})
aiohttpretty.register_uri('GET', url, status=200, body=version_metadata)
data = yield from provider.revisions(path)
assert isinstance(data, list)
assert len(data) == 3
for item in data:
assert hasattr(item, 'extra')
assert hasattr(item, 'version')
assert hasattr(item, 'version_identifier')
assert aiohttpretty.has_call(method='GET', uri=url)
def test_equality(self, provider):
assert provider.can_intra_copy(provider)
assert provider.can_intra_move(provider)
class TestWebView:
@async
def test_web_view(self, provider):
path = WaterButlerPath('/my-image.jpg')
with pytest.raises(exceptions.UnsupportedError):
yield from provider.web_view(path=path)
|
|
# Copyright 2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import textwrap
import mock
import pep8
from nova.hacking import checks
from nova import test
class HackingTestCase(test.NoDBTestCase):
"""This class tests the hacking checks in nova.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_virt_driver_imports(self):
expect = (0, "N311: importing code from other virt drivers forbidden")
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertEqual(expect, checks.import_no_virt_driver_import_deps(
"import nova.virt.libvirt.utils as libvirt_utils",
"./nova/virt/xenapi/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"from nova.virt.libvirt import utils as libvirt_utils",
"./nova/virt/libvirt/driver.py"))
self.assertIsNone(checks.import_no_virt_driver_import_deps(
"import nova.virt.firewall",
"./nova/virt/libvirt/firewall.py"))
def test_virt_driver_config_vars(self):
self.assertIsInstance(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/xenapi/driver.py"), tuple)
self.assertIsNone(checks.import_no_virt_driver_config_deps(
"CONF.import_opt('volume_drivers', "
"'nova.virt.libvirt.driver', group='libvirt')",
"./nova/virt/libvirt/volume.py"))
def test_no_vi_headers(self):
lines = ['Line 1\n', 'Line 2\n', 'Line 3\n', 'Line 4\n', 'Line 5\n',
'Line 6\n', 'Line 7\n', 'Line 8\n', 'Line 9\n', 'Line 10\n',
'Line 11\n', 'Line 12\n', 'Line 13\n', 'Line14\n', 'Line15\n']
self.assertIsNone(checks.no_vi_headers(
"Test string foo", 1, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
2, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
6, lines))
self.assertIsNone(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
9, lines))
self.assertEqual(len(list(checks.no_vi_headers(
"# vim: et tabstop=4 shiftwidth=4 softtabstop=4",
14, lines))), 2)
self.assertIsNone(checks.no_vi_headers(
"Test end string for vi",
15, lines))
def test_assert_true_instance(self):
self.assertEqual(len(list(checks.assert_true_instance(
"self.assertTrue(isinstance(e, "
"exception.BuildAbortException))"))), 1)
self.assertEqual(
len(list(checks.assert_true_instance("self.assertTrue()"))), 0)
def test_assert_equal_type(self):
self.assertEqual(len(list(checks.assert_equal_type(
"self.assertEqual(type(als['QuicAssist']), list)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_type("self.assertTrue()"))), 0)
def test_assert_equal_in(self):
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', True)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), True)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(True, any(a==1 for a in b))"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(a in b, False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual('str' in 'string', False)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(any(a==1 for a in b), False)"))), 0)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, a in b)"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, 'str' in 'string')"))), 1)
self.assertEqual(len(list(checks.assert_equal_in(
"self.assertEqual(False, any(a==1 for a in b))"))), 0)
def test_assert_equal_none(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(None, A)"))), 1)
self.assertEqual(
len(list(checks.assert_equal_none("self.assertIsNone()"))), 0)
def test_assert_true_or_false_with_in_or_not_in(self):
self.assertEqual(len(list(checks.assert_equal_none(
"self.assertEqual(A, None)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B)"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(A not in B, 'some message')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in 'some string with spaces')"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in ['1', '2', '3'])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(A in [1, 2, 3])"))), 1)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B))"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertTrue(any(A > 5 for A in B), 'some message')"))), 0)
self.assertEqual(len(list(checks.assert_true_or_false_with_in(
"self.assertFalse(some in list1 and some2 in list2)"))), 0)
def test_no_translate_debug_logs(self):
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug(_('foo'))", "nova/scheduler/foo.py"))), 1)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.debug('foo')", "nova/scheduler/foo.py"))), 0)
self.assertEqual(len(list(checks.no_translate_debug_logs(
"LOG.info(_('foo'))", "nova/scheduler/foo.py"))), 0)
def test_no_setting_conf_directly_in_tests(self):
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.group.option = 1", "nova/tests/test_foo.py"))), 1)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = foo = 1", "nova/tests/test_foo.py"))), 1)
# Shouldn't fail with comparisons
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option == 'foo'", "nova/tests/test_foo.py"))), 0)
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option != 1", "nova/tests/test_foo.py"))), 0)
# Shouldn't fail since not in nova/tests/
self.assertEqual(len(list(checks.no_setting_conf_directly_in_tests(
"CONF.option = 1", "nova/compute/foo.py"))), 0)
def test_log_translations(self):
logs = ['audit', 'error', 'info', 'warning', 'critical', 'warn',
'exception']
levels = ['_LI', '_LW', '_LE', '_LC']
debug = "LOG.debug('OK')"
self.assertEqual(
0, len(list(checks.validate_log_translations(debug, debug, 'f'))))
for log in logs:
bad = 'LOG.%s("Bad")' % log
self.assertEqual(1,
len(list(
checks.validate_log_translations(bad, bad, 'f'))))
ok = "LOG.%s('OK') # noqa" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
ok = "LOG.%s(variable)" % log
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
for level in levels:
ok = "LOG.%s(%s('OK'))" % (log, level)
self.assertEqual(0,
len(list(
checks.validate_log_translations(ok, ok, 'f'))))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def get_info_from_bdm(virt_type, bdm, mapping=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_check_explicit_underscore_import(self):
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 1)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"LOG.info(_('My info message'))",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"from cinder.i18n import _, _LW",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files2.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"_ = translations.ugettext",
"cinder/tests/other_files3.py"))), 0)
self.assertEqual(len(list(checks.check_explicit_underscore_import(
"msg = _('My message')",
"cinder/tests/other_files3.py"))), 0)
def test_use_jsonutils(self):
def __get_msg(fun):
msg = ("N324: jsonutils.%(fun)s must be used instead of "
"json.%(fun)s" % {'fun': fun})
return [(0, msg)]
for method in ('dump', 'dumps', 'load', 'loads'):
self.assertEqual(
__get_msg(method),
list(checks.use_jsonutils("json.%s(" % method,
"./nova/virt/xenapi/driver.py")))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.%s(" % method,
"./plugins/xenserver/script.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("jsonx.%s(" % method,
"./nova/virt/xenapi/driver.py"))))
self.assertEqual(0,
len(list(checks.use_jsonutils("json.dumb",
"./nova/virt/xenapi/driver.py"))))
# We are patching pep8 so that only the check under test is actually
# installed.
@mock.patch('pep8._checks',
{'physical_line': {}, 'logical_line': {}, 'tree': {}})
def _run_check(self, code, checker, filename=None):
pep8.register_check(checker)
lines = textwrap.dedent(code).strip().splitlines(True)
checker = pep8.Checker(filename=filename, lines=lines)
checker.check_all()
checker.report._deferred_print.sort()
return checker.report._deferred_print
def _assert_has_errors(self, code, checker, expected_errors=None,
filename=None):
actual_errors = [e[:3] for e in
self._run_check(code, checker, filename)]
self.assertEqual(expected_errors or [], actual_errors)
def _assert_has_no_errors(self, code, checker, filename=None):
self._assert_has_errors(code, checker, filename=filename)
def test_str_unicode_exception(self):
checker = checks.CheckForStrUnicodeExc
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = str(e)
return p
"""
errors = [(5, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = unicode(a) + str(b)
except ValueError as e:
p = e
return p
"""
self._assert_has_no_errors(code, checker)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
p = unicode(e)
return p
"""
errors = [(5, 20, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + str(ve)
p = e
return p
"""
errors = [(8, 20, 'N325'), (8, 29, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
try:
p = str(a) + str(b)
except ValueError as e:
try:
p = unicode(a) + unicode(b)
except ValueError as ve:
p = str(e) + unicode(ve)
p = str(e)
return p
"""
errors = [(8, 20, 'N325'), (8, 33, 'N325'), (9, 16, 'N325')]
self._assert_has_errors(code, checker, expected_errors=errors)
def test_api_version_decorator_check(self):
code = """
@some_other_decorator
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_errors(code, checks.check_api_version_decorator,
expected_errors=[(2, 0, "N332")])
def test_oslo_assert_raises_regexp(self):
code = """
self.assertRaisesRegexp(ValueError,
"invalid literal for.*XYZ'$",
int,
'XYZ')
"""
self._assert_has_errors(code, checks.assert_raises_regexp,
expected_errors=[(1, 0, "N335")])
def test_api_version_decorator_check_no_errors(self):
code = """
class ControllerClass():
@wsgi.api_version("2.5")
def my_method():
pass
"""
self._assert_has_no_errors(code, checks.check_api_version_decorator)
def test_trans_add(self):
checker = checks.CheckForTransAdd
code = """
def fake_tran(msg):
return msg
_ = fake_tran
_LI = _
_LW = _
_LE = _
_LC = _
def f(a, b):
msg = _('test') + 'add me'
msg = _LI('test') + 'add me'
msg = _LW('test') + 'add me'
msg = _LE('test') + 'add me'
msg = _LC('test') + 'add me'
msg = 'add to me' + _('test')
return msg
"""
errors = [(13, 10, 'N326'), (14, 10, 'N326'), (15, 10, 'N326'),
(16, 10, 'N326'), (17, 10, 'N326'), (18, 24, 'N326')]
self._assert_has_errors(code, checker, expected_errors=errors)
code = """
def f(a, b):
msg = 'test' + 'add me'
return msg
"""
self._assert_has_no_errors(code, checker)
def test_dict_constructor_with_list_copy(self):
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([(i, connect_info[i])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" attrs = dict([(k, _from_json(v))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" type_names = dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict((value, key) for key, value in"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
"foo(param=dict((k, v) for k, v in bar.items()))"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dict([[i,i] for i in range(3)])"))))
self.assertEqual(1, len(list(checks.dict_constructor_with_list_copy(
" dd = dict([i,i] for i in range(3))"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" create_kwargs = dict(snapshot=snapshot,"))))
self.assertEqual(0, len(list(checks.dict_constructor_with_list_copy(
" self._render_dict(xml, data_el, data.__dict__)"))))
def test_check_http_not_implemented(self):
code = """
except NotImplementedError:
common.raise_http_not_implemented_error()
"""
filename = "nova/api/openstack/compute/plugins/v3/test.py"
self._assert_has_no_errors(code, checks.check_http_not_implemented,
filename=filename)
code = """
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
"""
errors = [(3, 4, 'N339')]
self._assert_has_errors(code, checks.check_http_not_implemented,
expected_errors=errors, filename=filename)
filename = "nova/api/openstack/compute/contrib/test.py"
self._assert_has_no_errors(code, checks.check_http_not_implemented,
filename=filename)
|
|
#
# Copyright (c) 2012 Patrice Munger
# This file is part of pynetdicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pynetdicom.googlecode.com
#
import dsutils
from DIMSEparameters import *
import DIMSEprovider
import ACSEprovider
import time
import logging
logger = logging.getLogger(__name__)
class Status(object):
def __init__(self, Type, Description, CodeRange):
self.Type = Type
self.Description = Description
self.CodeRange = CodeRange
def __int__(self):
return self.CodeRange[0]
def __repr__(self):
return self.Type + ' ' + self.Description
class ServiceClass(object):
def __init__(self):
pass
def Code2Status(self, code):
for dd in dir(self):
getattr(self, dd).__class__
obj = getattr(self, dd)
if obj.__class__ == Status:
if code in obj.CodeRange:
return obj
# unknown status ...
return None
class VerificationServiceClass(ServiceClass):
Success = Status(
'Success',
'',
xrange(0x0000, 0x0000 + 1)
)
def __init__(self):
ServiceClass.__init__(self)
def SCU(self, id):
cecho = C_ECHO_ServiceParameters()
cecho.MessageID = id
cecho.AffectedSOPClassUID = self.UID
self.DIMSE.Send(cecho, self.pcid, self.maxpdulength)
ans, id = self.DIMSE.Receive(Wait=True)
return self.Code2Status(ans.Status)
def SCP(self, msg):
rsp = C_ECHO_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID.value
rsp.Status = self.Success
# send response
try:
self.AE.OnReceiveEcho(self)
except:
logger.error("There was an exception on OnReceiveEcho callback")
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
class StorageServiceClass(ServiceClass):
OutOfResources = Status(
'Failure',
'Refused: Out of resources',
xrange(0xA700, 0xA7FF + 1)
)
DataSetDoesNotMatchSOPClassFailure = Status(
'Failure',
'Error: Data Set does not match SOP Class',
xrange(0xA900, 0xA9FF + 1)
)
CannotUnderstand = Status(
'Failure',
'Error: Cannot understand',
xrange(0xC000, 0xCFFF + 1)
)
CoercionOfDataElements = Status(
'Warning',
'Coercion of Data Elements',
xrange(0xB000, 0xB000 + 1)
)
DataSetDoesNotMatchSOPClassWarning = Status(
'Warning',
'Data Set does not match SOP Class',
xrange(0xB007, 0xB007 + 1)
)
ElementDiscarted = Status(
'Warning',
'Element Discarted',
xrange(0xB006, 0xB006 + 1)
)
Success = Status(
'Success',
'',
xrange(0x0000, 0x0000 + 1)
)
def SCU(self, dataset, msgid):
# build C-STORE primitive
csto = C_STORE_ServiceParameters()
csto.MessageID = msgid
csto.AffectedSOPClassUID = dataset.SOPClassUID
csto.AffectedSOPInstanceUID = dataset.SOPInstanceUID
csto.Priority = 0x0002
csto.DataSet = dsutils.encode(dataset,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send cstore request
self.DIMSE.Send(csto, self.pcid, self.maxpdulength)
# wait for c-store response
ans, id = self.DIMSE.Receive(Wait=True)
return self.Code2Status(ans.Status.value)
def __init__(self):
ServiceClass.__init__(self)
def SCP(self, msg):
status = None
try:
DS = dsutils.decode(msg.DataSet,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
except:
status = self.CannotUnderstand
# make response
rsp = C_STORE_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPInstanceUID = msg.AffectedSOPInstanceUID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
# callback
if not status:
try:
status = self.AE.OnReceiveStore(self, DS)
except:
logger.error(
"There was an exception in OnReceiveStore callback")
status = self.CannotUnderstand
raise
rsp.Status = int(status)
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
class QueryRetrieveServiceClass(ServiceClass):
pass
class QueryRetrieveFindSOPClass(QueryRetrieveServiceClass):
OutOfResources = Status(
'Failure',
'Refused: Out of resources',
xrange(0xA700, 0xA700 + 1)
)
IdentifierDoesNotMatchSOPClass = Status(
'Failure',
'Identifier does not match SOP Class',
xrange(0xA900, 0xA900 + 1)
)
UnableToProcess = Status(
'Failure',
'Unable to process',
xrange(0xC000, 0xCFFF + 1)
)
MatchingTerminatedDueToCancelRequest = Status(
'Cancel',
'Matching terminated due to Cancel request',
xrange(0xFE00, 0xFE00 + 1)
)
Success = Status(
'Success',
'Matching is complete - No final Identifier is supplied',
xrange(0x0000, 0x0000 + 1)
)
Pending = Status(
'Pending',
'Matches are continuing - Current Match is supplied \
and any Optional Keys were supported in the same manner as '
'Required Keys',
xrange(0xFF00, 0xFF00 + 1)
)
PendingWarning = Status(
'Pending',
'Matches are continuing - Warning that one or more Optional\
Keys were not supported for existence and/or matching for '
'this identifier',
xrange(0xFF01, 0xFF01 + 1)
)
def SCU(self, ds, msgid):
# build C-FIND primitive
cfind = C_FIND_ServiceParameters()
cfind.MessageID = msgid
cfind.AffectedSOPClassUID = self.UID
cfind.Priority = 0x0002
cfind.Identifier = dsutils.encode(ds,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send c-find request
self.DIMSE.Send(cfind, self.pcid, self.maxpdulength)
while 1:
time.sleep(0.001)
# wait for c-find responses
ans, id = self.DIMSE.Receive(Wait=False)
if not ans:
continue
d = dsutils.decode(
ans.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
try:
status = self.Code2Status(ans.Status.value).Type
except:
status = None
if status != 'Pending':
break
yield status, d
yield status, d
def SCP(self, msg):
ds = dsutils.decode(msg.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# make response
rsp = C_FIND_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
gen = self.AE.OnReceiveFind(self, ds)
try:
while 1:
time.sleep(0.001)
IdentifierDS, status = gen.next()
rsp.Status = int(status)
rsp.Identifier = dsutils.encode(
IdentifierDS,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send response
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
except StopIteration:
# send final response
rsp = C_FIND_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
rsp.Status = int(self.Success)
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
class QueryRetrieveGetSOPClass(QueryRetrieveServiceClass):
OutOfResourcesNumberOfMatches = Status(
'Failure',
'Refused: Out of resources - Unable to calcultate number of matches',
xrange(0xA701, 0xA701 + 1)
)
OutOfResourcesUnableToPerform = Status(
'Failure',
'Refused: Out of resources - Unable to perform sub-operations',
xrange(0xA702, 0xA702 + 1)
)
IdentifierDoesNotMatchSOPClass = Status(
'Failure',
'Identifier does not match SOP Class',
xrange(0xA900, 0xA900 + 1)
)
UnableToProcess = Status(
'Failure',
'Unable to process',
xrange(0xC000, 0xCFFF + 1)
)
Cancel = Status(
'Cancel',
'Sub-operations terminated due to Cancel indication',
xrange(0xFE00, 0xFE00 + 1)
)
Warning = Status(
'Warning',
'Sub-operations Complete - One or more Failures or Warnings',
xrange(0xB000, 0xB000 + 1)
)
Success = Status(
'Success',
'Sub-operations Complete - No Failure or Warnings',
xrange(0x0000, 0x0000 + 1)
)
Pending = Status(
'Pending',
'Sub-operations are continuing',
xrange(0xFF00, 0xFF00 + 1)
)
CannotUnderstand = Status(
'Failure',
'Error: Cannot understand',
xrange(0xC000, 0xCFFF + 1)
)
def SCP(self, msg):
ds = dsutils.decode(msg.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# make response
rsp = C_GET_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID.value
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID.value
rsp.Status = int(self.Pending)
rsp.NumberOfRemainingSubOperations = 0
rsp.NumberOfCompletedSubOperations = 0
rsp.NumberOfFailedSubOperations = 0
rsp.NumberOfWarningSubOperations = 0
self.DIMSE.Send(rsp, self.pcid, self.maxpdulength)
gen = self.AE.OnReceiveGet(self, ds)
# # build C-STORE primitive
csto = C_STORE_ServiceParameters()
csto.MessageID = 0
for ds in gen:
csto.AffectedSOPClassUID = ds.SOPClassUID
csto.AffectedSOPInstanceUID = ds.SOPInstanceUID
csto.Priority = 0x0002
csto.DataSet = dsutils.encode(ds,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send cstore request
self.DIMSE.Send(csto, self.pcid, self.maxpdulength)
# wait for c-store response
ans, id = self.DIMSE.Receive(Wait=True)
# TODO: Handle the answers coming back
if self.Code2Status(ans.Status.value).Type != 'Success':
pass
# TODO: Set various values on the rsp here
rsp.Status = int(self.Success)
self.DIMSE.Send(rsp, self.pcid, self.maxpdulength)
def SCU(self, ds, msgid):
# build C-GET primitive
cget = C_GET_ServiceParameters()
cget.MessageID = msgid
cget.AffectedSOPClassUID = self.UID
cget.Priority = 0x0002
cget.Identifier = dsutils.encode(ds,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send c-get primitive
self.DIMSE.Send(cget, self.pcid, self.maxpdulength)
while 1:
# receive c-store
msg, id = self.DIMSE.Receive(Wait=True)
if msg.__class__ == C_GET_ServiceParameters:
if self.Code2Status(msg.Status.value).Type == 'Pending':
# pending. intermediate C-GET response
pass
else:
# last answer
break
elif msg.__class__ == C_STORE_ServiceParameters:
# send c-store response
rsp = C_STORE_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPInstanceUID = msg.AffectedSOPInstanceUID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
try:
d = dsutils.decode(
msg.DataSet, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
SOPClass = UID2SOPClass(d.SOPClassUID)
status = self.AE.OnReceiveStore(SOPClass, d)
except:
# cannot understand
status = self.CannotUnderstand
rsp.Status = int(status)
self.DIMSE.Send(rsp, id, self.maxpdulength)
class QueryRetrieveMoveSOPClass(QueryRetrieveServiceClass):
OutOfResourcesNumberOfMatches = Status(
'Failure',
'Refused: Out of resources - Unable to calcultate number of matches',
xrange(0xA701, 0xA701 + 1)
)
OutOfResourcesUnableToPerform = Status(
'Failure',
'Refused: Out of resources - Unable to perform sub-operations',
xrange(0xA702, 0xA702 + 1)
)
MoveDestinationUnknown = Status(
'Failure',
'Refused: Move destination unknown',
xrange(0xA801, 0xA801 + 1)
)
IdentifierDoesNotMatchSOPClass = Status(
'Failure',
'Identifier does not match SOP Class',
xrange(0xA900, 0xA900 + 1)
)
UnableToProcess = Status(
'Failure',
'Unable to process',
xrange(0xC000, 0xCFFF + 1)
)
Cancel = Status(
'Cancel',
'Sub-operations terminated due to Cancel indication',
xrange(0xFE00, 0xFE00 + 1)
)
Warning = Status(
'Warning',
'Sub-operations Complete - One or more Failures or Warnings',
xrange(0xB000, 0xB000 + 1)
)
Success = Status(
'Success',
'Sub-operations Complete - No Failure or Warnings',
xrange(0x0000, 0x0000 + 1)
)
Pending = Status(
'Pending',
'Sub-operations are continuing',
xrange(0xFF00, 0xFF00 + 1)
)
def SCU(self, ds, destaet, msgid):
# build C-FIND primitive
cmove = C_MOVE_ServiceParameters()
cmove.MessageID = msgid
cmove.AffectedSOPClassUID = self.UID
cmove.MoveDestination = destaet
cmove.Priority = 0x0002
cmove.Identifier = dsutils.encode(
ds, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send c-find request
self.DIMSE.Send(cmove, self.pcid, self.maxpdulength)
while 1:
# wait for c-move responses
time.sleep(0.001)
ans, id = self.DIMSE.Receive(Wait=False)
if not ans:
continue
status = self.Code2Status(ans.Status.value).Type
if status != 'Pending':
break
yield status
def SCP(self, msg):
ds = dsutils.decode(msg.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# make response
rsp = C_MOVE_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID.value
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID.value
gen = self.AE.OnReceiveMove(self, ds, msg.MoveDestination.value)
# first value returned by callback must be the complete remote AE specs
remoteAE = gen.next()
# request association to move destination
ass = self.AE.RequestAssociation(remoteAE)
nop = gen.next()
try:
ncomp = 0
nfailed = 0
nwarning = 0
ncompleted = 0
while 1:
DataSet = gen.next()
# request an association with destination
# send C-STORE
s = str(UID2SOPClass(DataSet.SOPClassUID))
ind = len(s) - s[::-1].find('.')
obj = getattr(ass, s[ind:-2])
status = obj.SCU(DataSet, ncompleted)
if status.Type == 'Failed':
nfailed += 1
if status.Type == 'Warning':
nwarning += 1
rsp.Status = int(self.Pending)
rsp.NumberOfRemainingSubOperations = nop - ncompleted
rsp.NumberOfCompletedSubOperations = ncompleted
rsp.NumberOfFailedSubOperations = nfailed
rsp.NumberOfWarningSubOperations = nwarning
ncompleted += 1
# send response
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
except StopIteration:
# send final response
rsp = C_MOVE_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID.value
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID.value
rsp.NumberOfRemainingSubOperations = nop - ncompleted
rsp.NumberOfCompletedSubOperations = ncompleted
rsp.NumberOfFailedSubOperations = nfailed
rsp.NumberOfWarningSubOperations = nwarning
rsp.Status = int(self.Success)
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
ass.Release(0)
class BasicWorklistServiceClass (ServiceClass):
pass
class ModalityWorklistServiceSOPClass (BasicWorklistServiceClass):
OutOfResources = Status(
'Failure',
'Refused: Out of resources',
xrange(0xA700, 0xA700 + 1)
)
IdentifierDoesNotMatchSOPClass = Status(
'Failure',
'Identifier does not match SOP Class',
xrange(0xA900, 0xA900 + 1)
)
UnableToProcess = Status(
'Failure',
'Unable to process',
xrange(0xC000, 0xCFFF + 1)
)
MatchingTerminatedDueToCancelRequest = Status(
'Cancel',
'Matching terminated due to Cancel request',
xrange(0xFE00, 0xFE00 + 1)
)
Success = Status(
'Success',
'Matching is complete - No final Identifier is supplied',
xrange(0x0000, 0x0000 + 1)
)
Pending = Status(
'Pending',
'Matches are continuing - Current Match is supplied'
'and any Optional Keys were supported in the same manner as'
'Required Keys',
xrange(0xFF00, 0xFF00 + 1)
)
PendingWarning = Status(
'Pending',
'Matches are continuing - Warning that one or more Optional'
'Keys were not supported for existence and/or matching for'
'this identifier',
xrange(0xFF01, 0xFF01 + 1)
)
def SCU(self, ds, msgid):
# build C-FIND primitive
cfind = C_FIND_ServiceParameters()
cfind.MessageID = msgid
cfind.AffectedSOPClassUID = self.UID
cfind.Priority = 0x0002
cfind.Identifier = dsutils.encode(ds,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send c-find request
self.DIMSE.Send(cfind, self.pcid, self.maxpdulength)
while 1:
time.sleep(0.001)
# wait for c-find responses
ans, id = self.DIMSE.Receive(Wait=False)
if not ans:
continue
d = dsutils.decode(
ans.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
try:
status = self.Code2Status(ans.Status.value).Type
except:
status = None
if status != 'Pending':
break
yield status, d
yield status, d
def SCP(self, msg):
ds = dsutils.decode(msg.Identifier, self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# make response
rsp = C_FIND_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
gen = self.AE.OnReceiveFind(self, ds)
try:
while 1:
time.sleep(0.001)
IdentifierDS, status = gen.next()
rsp.Status = int(status)
rsp.Identifier = dsutils.encode(
IdentifierDS,
self.transfersyntax.is_implicit_VR,
self.transfersyntax.is_little_endian)
# send response
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
except StopIteration:
# send final response
rsp = C_FIND_ServiceParameters()
rsp.MessageIDBeingRespondedTo = msg.MessageID
rsp.AffectedSOPClassUID = msg.AffectedSOPClassUID
rsp.Status = int(self.Success)
self.DIMSE.Send(rsp, self.pcid, self.ACSE.MaxPDULength)
# VERIFICATION SOP CLASSES
class VerificationSOPClass(VerificationServiceClass):
UID = '1.2.840.10008.1.1'
# STORAGE SOP CLASSES
# Slowly adding everything from http://www.dicomlibrary.com/dicom/sop/
class StorageSOPClass(StorageServiceClass):
pass
class MRImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.4'
class EnhancedMRImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.4.1'
class MRSpectroscopyStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.4.2'
class CTImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.2'
class PositronEmissionTomographyImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.128'
class CRImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1'
class DigitalXRayImagePresentationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.1'
class DigitalXRayImageProcessingStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.1.1'
class DigitalMammographyXRayImagePresentationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.2'
class DigitalMammographyXRayImageProcessingStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.2.1'
class DigitalIntraOralXRayImagePresentationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.3'
class DigitalIntraOralXRayImageProcessingStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.3.1'
class EncapsulatedPDFStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.104.1'
class GrayscaleSoftcopyPresentationStateStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.11.1'
class ColorSoftcopyPresentationStateStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.11.2'
class PseudocolorSoftcopyPresentationStageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.11.3'
class BlendingSoftcopyPresentationStateStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.11.4'
class XRayAngiographicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.12.1'
class EnhancedXAImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.12.1.1'
class XRayRadiofluoroscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.12.2'
class EnhancedXRFImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.12.2.1'
class EnhancedCTImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.2.1'
class NMImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.20'
class UltrasoundMultiframeImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.3.1'
class SCImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.7'
class MultiframeSingleBitSecondaryCaptureImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.7.1'
class MultiframeGrayscaleByteSecondaryCaptureImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.7.2'
class MultiframeGrayscaleWordSecondaryCaptureImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.7.3'
class MultiframeTrueColorSecondaryCaptureImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.7.4'
class RTImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.481.1'
class RTDoseStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.481.2'
class RTStructureSetStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.481.3'
class RTPlanStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.481.5'
class VLEndoscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.1'
class SpatialRegistrationSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66.1'
class EnhancedSRSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.88.22'
class DigitalXRayImageStorageForPresentationSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.1'
class DigitalXRayImageStorageForProcessingSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.1.1'
class DigitalMammographyXRayImageStorageForPresentationSOPClass(StorageSOPClass): # noqa
UID = '1.2.840.10008.5.1.4.1.1.1.2'
class DigitalMammographyXRayImageStorageForProcessingSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.2.1'
class DigitalIntraOralXRayImageStorageForPresentationSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.3'
class DigitalIntraOralXRayImageStorageForProcessingSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.1.3.1'
class VideoEndoscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.1.1'
class VLMicroscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.2'
class VideoMicroscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.2.1'
class VLSlideCoordinatesMicroscopicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.3'
class VLPhotographicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.4'
class VideoPhotographicImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.4.1'
class OphthalmicPhotography8BitImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.5.1'
class OphthalmicPhotography16BitImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.5.2'
class StereometricRelationshipStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.77.1.5.3'
class UltrasoundImageStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.6.1'
class RawDataStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66'
class SpatialRegistrationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66.1'
class SpatialFiducialsStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66.2'
class DeformableSpatialRegistrationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66.3'
class SegmentationStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.66.4'
class RealWorldValueMappingStorageSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.67'
class XRayRadiationDoseStructuredReportSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.88.67'
class EnhancedStructuredReportSOPClass(StorageSOPClass):
UID = '1.2.840.10008.5.1.4.1.1.88.22'
# QUERY RETRIEVE SOP Classes
class QueryRetrieveSOPClass(QueryRetrieveServiceClass):
pass
class PatientRootQueryRetrieveSOPClass(QueryRetrieveSOPClass):
pass
class StudyRootQueryRetrieveSOPClass(QueryRetrieveSOPClass):
pass
class PatientStudyOnlyQueryRetrieveSOPClass(QueryRetrieveSOPClass):
pass
class PatientRootFindSOPClass(PatientRootQueryRetrieveSOPClass,
QueryRetrieveFindSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.1.1'
class PatientRootMoveSOPClass(PatientRootQueryRetrieveSOPClass,
QueryRetrieveMoveSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.1.2'
class PatientRootGetSOPClass(PatientRootQueryRetrieveSOPClass,
QueryRetrieveGetSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.1.3'
class StudyRootFindSOPClass(StudyRootQueryRetrieveSOPClass,
QueryRetrieveFindSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.2.1'
class StudyRootMoveSOPClass(StudyRootQueryRetrieveSOPClass,
QueryRetrieveMoveSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.2.2'
class StudyRootGetSOPClass(StudyRootQueryRetrieveSOPClass,
QueryRetrieveGetSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.2.3'
class PatientStudyOnlyFindSOPClass(PatientStudyOnlyQueryRetrieveSOPClass,
QueryRetrieveFindSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.3.1'
class PatientStudyOnlyMoveSOPClass(PatientStudyOnlyQueryRetrieveSOPClass,
QueryRetrieveMoveSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.3.2'
class PatientStudyOnlyGetSOPClass(PatientStudyOnlyQueryRetrieveSOPClass,
QueryRetrieveGetSOPClass):
UID = '1.2.840.10008.5.1.4.1.2.3.3'
# BASIC WORKLIST SOP Classes
class BasicWorklistSOPClass(BasicWorklistServiceClass):
pass
class ModalityWorklistInformationFindSOPClass(BasicWorklistSOPClass,
ModalityWorklistServiceSOPClass):
UID = '1.2.840.10008.5.1.4.31'
d = dir()
def UID2SOPClass(UID):
"""Returns a SOPClass object from given UID"""
for ss in d:
if hasattr(eval(ss), 'UID'):
tmpuid = getattr(eval(ss), 'UID')
if tmpuid == UID:
return eval(ss)
return None
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.core.files.uploadedfile import SimpleUploadedFile
from django.forms import *
from django.test import TestCase
from django.utils.safestring import mark_safe
class AssertFormErrorsMixin(object):
def assertFormErrors(self, expected, the_callable, *args, **kwargs):
try:
the_callable(*args, **kwargs)
self.fail("Testing the 'clean' method on %s failed to raise a ValidationError.")
except ValidationError as e:
self.assertEqual(e.messages, expected)
class FormsErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_charfield(self):
e = {
'required': 'REQUIRED',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = CharField(min_length=5, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_integerfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = IntegerField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
def test_floatfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
}
f = FloatField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
def test_decimalfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_value': 'MIN VALUE IS %(limit_value)s',
'max_value': 'MAX VALUE IS %(limit_value)s',
'max_digits': 'MAX DIGITS IS %s',
'max_decimal_places': 'MAX DP IS %s',
'max_whole_digits': 'MAX DIGITS BEFORE DP IS %s',
}
f = DecimalField(min_value=5, max_value=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'MIN VALUE IS 5'], f.clean, '4')
self.assertFormErrors([u'MAX VALUE IS 10'], f.clean, '11')
f2 = DecimalField(max_digits=4, decimal_places=2, error_messages=e)
self.assertFormErrors([u'MAX DIGITS IS 4'], f2.clean, '123.45')
self.assertFormErrors([u'MAX DP IS 2'], f2.clean, '1.234')
self.assertFormErrors([u'MAX DIGITS BEFORE DP IS 2'], f2.clean, '123.4')
def test_datefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_timefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = TimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_datetimefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = DateTimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
def test_regexfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = RegexField(r'^\d+$', min_length=5, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abcde')
self.assertFormErrors([u'LENGTH 4, MIN LENGTH 5'], f.clean, '1234')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, '12345678901')
def test_emailfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'min_length': 'LENGTH %(show_value)s, MIN LENGTH %(limit_value)s',
'max_length': 'LENGTH %(show_value)s, MAX LENGTH %(limit_value)s',
}
f = EmailField(min_length=8, max_length=10, error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abcdefgh')
self.assertFormErrors([u'LENGTH 7, MIN LENGTH 8'], f.clean, 'a@b.com')
self.assertFormErrors([u'LENGTH 11, MAX LENGTH 10'], f.clean, 'aye@bee.com')
def test_filefield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
'missing': 'MISSING',
'empty': 'EMPTY FILE',
}
f = FileField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc')
self.assertFormErrors([u'EMPTY FILE'], f.clean, SimpleUploadedFile('name', None))
self.assertFormErrors([u'EMPTY FILE'], f.clean, SimpleUploadedFile('name', ''))
def test_urlfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID',
}
f = URLField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID'], f.clean, 'abc.c')
def test_booleanfield(self):
e = {
'required': 'REQUIRED',
}
f = BooleanField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
def test_choicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
}
f = ChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'b IS INVALID CHOICE'], f.clean, 'b')
def test_multiplechoicefield(self):
e = {
'required': 'REQUIRED',
'invalid_choice': '%(value)s IS INVALID CHOICE',
'invalid_list': 'NOT A LIST',
}
f = MultipleChoiceField(choices=[('a', 'aye')], error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'NOT A LIST'], f.clean, 'b')
self.assertFormErrors([u'b IS INVALID CHOICE'], f.clean, ['b'])
def test_splitdatetimefield(self):
e = {
'required': 'REQUIRED',
'invalid_date': 'INVALID DATE',
'invalid_time': 'INVALID TIME',
}
f = SplitDateTimeField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID DATE', u'INVALID TIME'], f.clean, ['a', 'b'])
def test_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = IPAddressField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_generic_ipaddressfield(self):
e = {
'required': 'REQUIRED',
'invalid': 'INVALID IP ADDRESS',
}
f = GenericIPAddressField(error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID IP ADDRESS'], f.clean, '127.0.0')
def test_subclassing_errorlist(self):
class TestForm(Form):
first_name = CharField()
last_name = CharField()
birthday = DateField()
def clean(self):
raise ValidationError("I like to be awkward.")
class CustomErrorList(util.ErrorList):
def __unicode__(self):
return self.as_divs()
def as_divs(self):
if not self: return u''
return mark_safe(u'<div class="error">%s</div>' % ''.join([u'<p>%s</p>' % e for e in self]))
# This form should print errors the default way.
form1 = TestForm({'first_name': 'John'})
self.assertHTMLEqual(str(form1['last_name'].errors), '<ul class="errorlist"><li>This field is required.</li></ul>')
self.assertHTMLEqual(str(form1.errors['__all__']), '<ul class="errorlist"><li>I like to be awkward.</li></ul>')
# This one should wrap error groups in the customized way.
form2 = TestForm({'first_name': 'John'}, error_class=CustomErrorList)
self.assertHTMLEqual(str(form2['last_name'].errors), '<div class="error"><p>This field is required.</p></div>')
self.assertHTMLEqual(str(form2.errors['__all__']), '<div class="error"><p>I like to be awkward.</p></div>')
class ModelChoiceFieldErrorMessagesTestCase(TestCase, AssertFormErrorsMixin):
def test_modelchoicefield(self):
# Create choices for the model choice field tests below.
from regressiontests.forms.models import ChoiceModel
c1 = ChoiceModel.objects.create(pk=1, name='a')
c2 = ChoiceModel.objects.create(pk=2, name='b')
c3 = ChoiceModel.objects.create(pk=3, name='c')
# ModelChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': 'INVALID CHOICE',
}
f = ModelChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'INVALID CHOICE'], f.clean, '4')
# ModelMultipleChoiceField
e = {
'required': 'REQUIRED',
'invalid_choice': '%s IS INVALID CHOICE',
'list': 'NOT A LIST OF VALUES',
}
f = ModelMultipleChoiceField(queryset=ChoiceModel.objects.all(), error_messages=e)
self.assertFormErrors([u'REQUIRED'], f.clean, '')
self.assertFormErrors([u'NOT A LIST OF VALUES'], f.clean, '3')
self.assertFormErrors([u'4 IS INVALID CHOICE'], f.clean, ['4'])
|
|
"""
Use DeepFool to craft adversarials on MNIST.
"""
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # noqa: E402
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import tensorflow as tf
from attacks import deepfool
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
img_size = 28
img_chan = 1
n_classes = 10
print('\nLoading MNIST')
mnist = tf.keras.datasets.mnist
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = np.reshape(X_train, [-1, img_size, img_size, img_chan])
X_train = X_train.astype(np.float32) / 255
X_test = np.reshape(X_test, [-1, img_size, img_size, img_chan])
X_test = X_test.astype(np.float32) / 255
to_categorical = tf.keras.utils.to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print('\nSpliting data')
ind = np.random.permutation(X_train.shape[0])
X_train, y_train = X_train[ind], y_train[ind]
VALIDATION_SPLIT = 0.1
n = int(X_train.shape[0] * (1-VALIDATION_SPLIT))
X_valid = X_train[n:]
X_train = X_train[:n]
y_valid = y_train[n:]
y_train = y_train[:n]
print('\nConstruction graph')
def model(x, logits=False, training=False):
with tf.variable_scope('conv0'):
z = tf.layers.conv2d(x, filters=32, kernel_size=[3, 3],
padding='same', activation=tf.nn.relu)
z = tf.layers.max_pooling2d(z, pool_size=[2, 2], strides=2)
with tf.variable_scope('conv1'):
z = tf.layers.conv2d(z, filters=64, kernel_size=[3, 3],
padding='same', activation=tf.nn.relu)
z = tf.layers.max_pooling2d(z, pool_size=[2, 2], strides=2)
with tf.variable_scope('flatten'):
shape = z.get_shape().as_list()
z = tf.reshape(z, [-1, np.prod(shape[1:])])
with tf.variable_scope('mlp'):
z = tf.layers.dense(z, units=128, activation=tf.nn.relu)
z = tf.layers.dropout(z, rate=0.25, training=training)
logits_ = tf.layers.dense(z, units=10, name='logits')
y = tf.nn.softmax(logits_, name='ybar')
if logits:
return y, logits_
return y
class Dummy:
pass
env = Dummy()
with tf.variable_scope('model'):
env.x = tf.placeholder(tf.float32, (None, img_size, img_size, img_chan),
name='x')
env.y = tf.placeholder(tf.float32, (None, n_classes), name='y')
env.training = tf.placeholder_with_default(False, (), name='mode')
env.ybar, logits = model(env.x, logits=True, training=env.training)
with tf.variable_scope('acc'):
count = tf.equal(tf.argmax(env.y, axis=1), tf.argmax(env.ybar, axis=1))
env.acc = tf.reduce_mean(tf.cast(count, tf.float32), name='acc')
with tf.variable_scope('loss'):
xent = tf.nn.softmax_cross_entropy_with_logits(labels=env.y,
logits=logits)
env.loss = tf.reduce_mean(xent, name='loss')
with tf.variable_scope('train_op'):
optimizer = tf.train.AdamOptimizer()
env.train_op = optimizer.minimize(env.loss)
env.saver = tf.train.Saver()
with tf.variable_scope('model', reuse=True):
env.adv_epochs = tf.placeholder(tf.int32, (), name='adv_epochs')
env.noise = deepfool(model, env.x, epochs=env.adv_epochs, batch=True,
noise=True)
print('\nInitializing graph')
sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
def evaluate(sess, env, X_data, y_data, batch_size=128):
"""
Evaluate TF model by running env.loss and env.acc.
"""
print('\nEvaluating')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
loss, acc = 0, 0
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
cnt = end - start
batch_loss, batch_acc = sess.run(
[env.loss, env.acc],
feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end]})
loss += batch_loss * cnt
acc += batch_acc * cnt
loss /= n_sample
acc /= n_sample
print(' loss: {0:.4f} acc: {1:.4f}'.format(loss, acc))
return loss, acc
def train(sess, env, X_data, y_data, X_valid=None, y_valid=None, epochs=1,
load=False, shuffle=True, batch_size=128, name='model'):
"""
Train a TF model by running env.train_op.
"""
if load:
if not hasattr(env, 'saver'):
return print('\nError: cannot find saver op')
print('\nLoading saved model')
return env.saver.restore(sess, 'model/{}'.format(name))
print('\nTrain model')
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
for epoch in range(epochs):
print('\nEpoch {0}/{1}'.format(epoch + 1, epochs))
if shuffle:
print('\nShuffling data')
ind = np.arange(n_sample)
np.random.shuffle(ind)
X_data = X_data[ind]
y_data = y_data[ind]
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
sess.run(env.train_op, feed_dict={env.x: X_data[start:end],
env.y: y_data[start:end],
env.training: True})
if X_valid is not None:
evaluate(sess, env, X_valid, y_valid)
if hasattr(env, 'saver'):
print('\n Saving model')
os.makedirs('model', exist_ok=True)
env.saver.save(sess, 'model/{}'.format(name))
def predict(sess, env, X_data, batch_size=128):
"""
Do inference by running env.ybar.
"""
print('\nPredicting')
n_classes = env.ybar.get_shape().as_list()[1]
n_sample = X_data.shape[0]
n_batch = int((n_sample+batch_size-1) / batch_size)
yval = np.empty((n_sample, n_classes))
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
y_batch = sess.run(env.ybar, feed_dict={env.x: X_data[start:end]})
yval[start:end] = y_batch
print()
return yval
def make_deepfool(sess, env, X_data, epochs=1, eps=0.01, batch_size=128):
"""
Generate DeepFool by running env.xadv.
"""
print('\nMaking adversarials via DeepFool')
n_sample = X_data.shape[0]
n_batch = int((n_sample + batch_size - 1) / batch_size)
X_noise = np.empty_like(X_data)
for batch in range(n_batch):
print(' batch {0}/{1}'.format(batch + 1, n_batch), end='\r')
start = batch * batch_size
end = min(n_sample, start + batch_size)
noise = sess.run(env.noise, feed_dict={env.x: X_data[start:end],
env.adv_epochs: epochs})
X_noise[start:end] = noise
print()
return X_noise
print('\nTraining')
train(sess, env, X_train, y_train, X_valid, y_valid, load=False, epochs=5,
name='mnist')
print('\nEvaluating on clean data')
evaluate(sess, env, X_test, y_test)
print('\nGenerating adversarial data')
X_noise = make_deepfool(sess, env, X_test, epochs=3)
X_adv = np.clip(X_test + 1.02*X_noise, 0, 1)
print(np.min(X_noise), np.max(X_noise))
print('\nEvaluating on adversarial data')
evaluate(sess, env, X_adv, y_test)
print('\nRandomly sample adversarial data from each category')
y1 = predict(sess, env, X_test)
y2 = predict(sess, env, X_adv)
z0 = np.argmax(y_test, axis=1)
z1 = np.argmax(y1, axis=1)
z2 = np.argmax(y2, axis=1)
print('\nPlotting results')
fig = plt.figure(figsize=(10, 2.2))
gs = gridspec.GridSpec(2, 10, wspace=0.05, hspace=0.05)
for i in range(10):
print('Target {0}'.format(i))
ind, = np.where(np.all([z0 == i, z1 == i, z2 != i], axis=0))
ind = np.random.choice(ind)
xcur = [X_test[ind], X_adv[ind]]
ycur = y2[ind]
zcur = z2[ind]
for j in range(2):
img = np.squeeze(xcur[j])
ax = fig.add_subplot(gs[j, i])
ax.imshow(img, cmap='gray', interpolation='none')
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlabel('{0} ({1:.2f})'.format(zcur, ycur[zcur]), fontsize=12)
print('\nSaving figure')
gs.tight_layout(fig)
os.makedirs('img', exist_ok=True)
plt.savefig('img/deepfool_mnist.png')
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import exceptions
from tempest.test import attr
class ServerMetadataTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServerMetadataTestJSON, cls).setUpClass()
cls.client = cls.servers_client
cls.quotas = cls.quotas_client
cls.admin_client = cls._get_identity_admin_client()
resp, tenants = cls.admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
resp, server = cls.create_server(meta={}, wait_until='ACTIVE')
cls.server_id = server['id']
def setUp(self):
super(ServerMetadataTestJSON, self).setUp()
meta = {'key1': 'value1', 'key2': 'value2'}
resp, _ = self.client.set_server_metadata(self.server_id, meta)
self.assertEqual(resp.status, 200)
@attr(type='gate')
def test_list_server_metadata(self):
# All metadata key/value pairs for a server should be returned
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
# Verify the expected metadata items are in the list
self.assertEqual(200, resp.status)
expected = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@attr(type='gate')
def test_set_server_metadata(self):
# The server's metadata should be replaced with the provided values
# Create a new set of metadata for the server
req_metadata = {'meta2': 'data2', 'meta3': 'data3'}
resp, metadata = self.client.set_server_metadata(self.server_id,
req_metadata)
self.assertEqual(200, resp.status)
# Verify the expected values are correct, and that the
# previous values have been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
self.assertEqual(resp_metadata, req_metadata)
@attr(type='gate')
def test_server_create_metadata_key_too_long(self):
# Attempt to start a server with a meta-data key that is > 255
# characters
# Try a few values
for sz in [256, 257, 511, 1023]:
key = "k" * sz
meta = {key: 'data1'}
self.assertRaises(exceptions.OverLimit,
self.create_server,
meta=meta)
# no teardown - all creates should fail
@attr(type='gate')
def test_update_server_metadata(self):
# The server's metadata values should be updated to the
# provided values
meta = {'key1': 'alt1', 'key3': 'value3'}
resp, metadata = self.client.update_server_metadata(self.server_id,
meta)
self.assertEqual(200, resp.status)
# Verify the values have been updated to the proper values
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'alt1', 'key2': 'value2', 'key3': 'value3'}
self.assertEqual(expected, resp_metadata)
@attr(type='gate')
def test_update_metadata_empty_body(self):
# The original metadata should not be lost if empty metadata body is
# passed
meta = {}
_, metadata = self.client.update_server_metadata(self.server_id, meta)
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'value1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@attr(type='gate')
def test_get_server_metadata_item(self):
# The value for a specific metadata key should be returned
resp, meta = self.client.get_server_metadata_item(self.server_id,
'key2')
self.assertEqual('value2', meta['key2'])
@attr(type='gate')
def test_set_server_metadata_item(self):
# The item's value should be updated to the provided value
# Update the metadata value
meta = {'nova': 'alt'}
resp, body = self.client.set_server_metadata_item(self.server_id,
'nova', meta)
self.assertEqual(200, resp.status)
# Verify the meta item's value has been updated
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key1': 'value1', 'key2': 'value2', 'nova': 'alt'}
self.assertEqual(expected, resp_metadata)
@attr(type='gate')
def test_delete_server_metadata_item(self):
# The metadata value/key pair should be deleted from the server
resp, meta = self.client.delete_server_metadata_item(self.server_id,
'key1')
self.assertEqual(204, resp.status)
# Verify the metadata item has been removed
resp, resp_metadata = self.client.list_server_metadata(self.server_id)
expected = {'key2': 'value2'}
self.assertEqual(expected, resp_metadata)
@attr(type=['negative', 'gate'])
def test_server_metadata_negative(self):
# Blank key should trigger an error.
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.create_server,
meta=meta)
# GET on a non-existent server should not succeed
self.assertRaises(exceptions.NotFound,
self.client.get_server_metadata_item, 999, 'test2')
# List metadata on a non-existent server should not succeed
self.assertRaises(exceptions.NotFound,
self.client.list_server_metadata, 999)
# Raise BadRequest if key in uri does not match
# the key passed in body.
meta = {'testkey': 'testvalue'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata_item,
self.server_id, 'key', meta)
# Set metadata on a non-existent server should not succeed
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.NotFound,
self.client.set_server_metadata, 999, meta)
# An update should not happen for a non-existent image
meta = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(exceptions.NotFound,
self.client.update_server_metadata, 999, meta)
# Blank key should trigger an error
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.update_server_metadata,
self.server_id, meta=meta)
# Should not be able to delete metadata item from a non-existent server
self.assertRaises(exceptions.NotFound,
self.client.delete_server_metadata_item, 999, 'd')
# Raise a 413 OverLimit exception while exceeding metadata items limit
# for tenant.
_, quota_set = self.quotas.get_quota_set(self.tenant_id)
quota_metadata = quota_set['metadata_items']
req_metadata = {}
for num in range(1, quota_metadata + 2):
req_metadata['key' + str(num)] = 'val' + str(num)
self.assertRaises(exceptions.OverLimit,
self.client.set_server_metadata,
self.server_id, req_metadata)
# Raise a 413 OverLimit exception while exceeding metadata items limit
# for tenant (update).
self.assertRaises(exceptions.OverLimit,
self.client.update_server_metadata,
self.server_id, req_metadata)
# Raise a bad request error for blank key.
# set_server_metadata will replace all metadata with new value
meta = {'': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata,
self.server_id, meta=meta)
# Raise a bad request error for a missing metadata field
# set_server_metadata will replace all metadata with new value
meta = {'meta1': 'data1'}
self.assertRaises(exceptions.BadRequest,
self.client.set_server_metadata,
self.server_id, meta=meta, no_metadata_field=True)
class ServerMetadataTestXML(ServerMetadataTestJSON):
_interface = 'xml'
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _verify_type, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedContext
if session is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session.conf.set(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
if SparkSession._instantiatedContext is None:
SparkSession._instantiatedContext = self
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or
:class:`pyspark.sql.types.StringType`, it must match the
real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a
:class:`pyspark.sql.types.StringType` or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
verify_func = _verify_type if verifySchema else lambda _, t: True
if isinstance(schema, StructType):
def prepare(obj):
verify_func(obj, schema)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
def prepare(obj):
verify_func(obj, dataType)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedContext = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
#!/usr/bin/env python
# Copyright (c) 2017,2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
import uuid
from mock import Mock
from mock import patch
import oslo_messaging as messaging
import f5_openstack_agent.lbaasv2.drivers.bigip.constants_v2 as constants
import f5_openstack_agent.lbaasv2.drivers.bigip.plugin_rpc as target_mod
import class_tester_base_class
import mock_builder_base_class
class TestPluginRpcMockBuilder(mock_builder_base_class.MockBuilderBase):
"""Builder class for Mock objects that mock LBaaSv2PluginRPC
This class builds mock-module class objects for isolation of the
LbaasAgentManager. As such, all reference to `target` are pointing to
either an instantiated instance of LBaaSv2PluginRPC or is a mocked
instance of this class.
Use:
class Tester(object):
my_mock_builder = TestLBaaSv2PluginRPCMockBuilder
standalone = TestLBaaSv2PluginRPCMockBuilder.standalone
neutron_only = TestLBaaSv2PluginRPCMockBuilder.neutron_only
bigip_only = TestLBaaSv2PluginRPCMockBuilder.bigip_only
fully_int = TestLBaaSv2PluginRPCMockBuilder.fully_int
fixture = my_mock_builder.fixture
def test_foo(fixture):
# this then uses the pytest.fixture fixture from MockBuilder
"""
# non-instantiated original:
_other_builders = dict()
@staticmethod
@patch('f5_openstack_agent.lbaasv2.drivers.bigip.plugin_rpc.'
'LBaaSv2PluginRPC.__init__')
def mocked_target(init):
init.return_value = None
return target_mod.LBaaSv2PluginRPC()
def fully_mocked_target(self, mocked_target):
"""Creates a mocked target that mocks all lower other_builders' targets
This does not mean that the caller's black-box is limited to this
target, but can drill further using a system of either mocks or
non-mocks. Please see conftest.MockBuilder for details.
"""
# instantiate other_builders...
self._construct_others()
mocked_target.topic = None
mocked_target.target = Mock() # replace with logic for icontrol mocker
mocked_target._client = Mock() # replace with icontrol mocker later...
mocked_target.context = 'context'
mocked_target.env = 'env'
mocked_target.group = 'group'
mocked_target.host = 'host'
return mocked_target
def mock_get_clusterwide_agent(self, target=None, call_cnt=1,
expected_args=None, static=None, **kwargs):
"""mocks LBaaSv2PluginRPC.get_clusterwide_agent method
This will mock the method in the instantiated production target.
"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'get_clusterwide_agent', static, call_cnt,
expected_args, kwargs)
return target
def mock__call(self, target=None, call_cnt=1, expected_args=None,
static=None, **kwargs):
"""mocks LBaaSv2PluginRPC._call method"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, '_call', static, call_cnt, expected_args,
kwargs)
def mock_validate_loadbalancers_state(
self, target=None, call_cnt=1, expected_args=None, static=None,
**kwargs):
"""mocks LBaaSv2PluginRPC.validate_loadbalancers_state method"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'validate_loadbalancers_state', static,
call_cnt, expected_args, kwargs)
return target
def mock_validate_listeners_state(
self, target=None, call_cnt=1, expected_args=None, static=None,
**kwargs):
"""mocks LBaaSv2PluginRPC.validate_listeners_state method"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'validate_listeners_state', static,
call_cnt, expected_args, kwargs)
return target
def mock_validate_pools_state(
self, target=None, call_cnt=1, expected_args=None, static=None,
**kwargs):
"""mocks LBaaSv2PluginRPC.validate_pools_state method"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'validate_pools_state', static,
call_cnt, expected_args, kwargs)
return target
def mock_validate_health_monitors_state(
self, target=None, call_cnt=1, expected_args=None, static=None,
**kwargs):
"""mocks LBaaSv2PluginRPC.validate_health_monitors_state method"""
if not target:
target = self.new_fully_mocked_target()
self._mockfactory(target, 'validate_health_monitors_state', static,
call_cnt, expected_args, kwargs)
return target
class TestPluginRpcConstructor(object):
payload = dict(topic='topic', context='context', env='env', group='group',
host='host')
payload_order = ('topic', 'context', 'env', 'group', 'host')
@classmethod
@patch('oslo_messaging.Target')
@patch('neutron.common.rpc.get_client')
def create_target(cls, payload, m_target, m_get_client):
m_target.return_value = m_target
m_get_client.return_value = m_get_client
return target_mod.LBaaSv2PluginRPC(*payload)
@classmethod
@pytest.fixture
def target(cls):
payload = cls.payload_order
return cls.create_target(payload)
@classmethod
@pytest.fixture
def topic_less_target(cls):
payload = list(cls.payload_order)
payload.remove('topic')
payload.insert(0, None)
return cls.create_target(payload)
@staticmethod
@pytest.fixture
def get_uuid():
return uuid.uuid4()
@pytest.fixture
def mock_logger(self, request):
logger = Mock()
self.ice_log = target_mod.LOG
target_mod.LOG = logger
self.m_logger = logger
request.addfinalizer(self.teardown)
def teardown(self):
log = getattr(self, 'ice_log', None)
target_mod.LOG = log if log else target_mod.LOG
class TestPluginRpc(TestPluginRpcConstructor,
class_tester_base_class.ClassTesterBase):
builder = TestPluginRpcMockBuilder
def test__init__(self, target, topic_less_target):
def positive_case_fully_populated(self, target, expected_args):
assert target.target.called_once_with(
topic=target.topic, version=constants.RPC_API_VERSION)
assert target._client.called_once_with(
target.target, version_cap=None)
for item in expected_args:
assert getattr(target, item) == item
def positive_case_default_topic(self, target):
assert target.topic == constants.TOPIC_PROCESS_ON_HOST_V2
positive_case_fully_populated(self, target, self.payload.keys())
positive_case_default_topic(self, topic_less_target)
def test_get_loadbalancers_by_network(self, target, get_uuid, mock_logger):
populated_payload = self.payload.copy()
map(lambda x: populated_payload.pop(x), ['topic', 'context'])
empty_payload = dict()
target._make_msg = Mock()
target._call = Mock()
def positive_case_no_loadbalancers(target, network_id, payload):
"""Tests scenario where there are no loadbalancers
This simply has _call return with an 'empty' object. This also
tests whether:
* passed tuple matches with kwargs per expected orchestration
"""
expected = 'expected'
target._make_msg.return_value = expected
payload = {x: x + "`" for x in payload}
target._call.return_value = ''
assert target.get_loadbalancers_by_network(
network_id, **payload) == tuple()
payload['network_id'] = network_id
target._make_msg.assert_called_once_with(
'get_loadbalancers_by_network', **payload)
target._call.assert_called_once_with(
target.context, expected, topic=target.topic)
def positive_case_loadbalancers(target, network_id, payload):
"""Tests the scenario of loadbalancers being returned from _call
This test scenario tests:
* a populated loadbalancers list return as a tuple
* the scenario where the target's attributes are used
* When the payload does not have the right kwargs
"""
expected = [1, 2, 3]
target._call.return_value = expected
assert target.get_loadbalancers_by_network(
network_id, **payload) == tuple(expected)
target._make_msg.assert_called_with(
'get_loadbalancers_by_network', network_id=network_id,
group=target.group, host=target.host, env=target.env)
def negative_case(target, network_id, logger):
"""Tests the negative case of the target method
This test method will concentrate on the one negative case where
there is a MessageDeliveryFailure exception raised during the
target's _call method call.
"""
target._make_msg.side_effect = messaging.MessageDeliveryFailure
target.get_loadbalancers_by_network(network_id)
assert logger.error.call_count == 1
positive_case_no_loadbalancers(target, get_uuid, populated_payload)
positive_case_loadbalancers(target, get_uuid, empty_payload)
negative_case(target, get_uuid, self.m_logger)
|
|
#!/usr/bin/env python
# This file is part of tcollector.
# Copyright (C) 2011 StumbleUpon, Inc.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
# General Public License for more details. You should have received a copy
# of the GNU Lesser General Public License along with this program. If not,
# see <http://www.gnu.org/licenses/>.
# Note: I spent many hours reading the Linux kernel's source code to infer the
# exact meaning of some of the obscure but useful metrics it exposes. The
# description of the metrics are correct to the best of my knowledge, but it's
# not always to make sense of the Linux kernel's code. Please report any
# inaccuracy you find. -- tsuna.
"""Socket allocation and network statistics for TSDB.
Metrics from /proc/net/sockstat:
- net.sockstat.num_sockets: Number of sockets allocated (only TCP).
- net.sockstat.num_timewait: Number of TCP sockets currently in
TIME_WAIT state.
- net.sockstat.sockets_inuse: Number of sockets in use (TCP/UDP/raw).
- net.sockstat.num_orphans: Number of orphan TCP sockets (not attached
to any file descriptor).
- net.sockstat.memory: Memory allocated for this socket type (in bytes).
- net.sockstat.ipfragqueues: Number of IP flows for which there are
currently fragments queued for reassembly.
Metrics from /proc/net/netstat (`netstat -s' command):
- net.stat.tcp.abort: Number of connections that the kernel had to abort.
type=memory is especially bad, the kernel had to drop a connection due to
having too many orphaned sockets. Other types are normal (e.g. timeout).
- net.stat.tcp.abort.failed: Number of times the kernel failed to abort a
connection because it didn't even have enough memory to reset it (bad).
- net.stat.tcp.congestion.recovery: Number of times the kernel detected
spurious retransmits and was able to recover part or all of the CWND.
- net.stat.tcp.delayedack: Number of delayed ACKs sent of different types.
- net.stat.tcp.failed_accept: Number of times a connection had to be dropped
after the 3WHS. reason=full_acceptq indicates that the application isn't
accepting connections fast enough. You should see SYN cookies too.
- net.stat.tcp.invalid_sack: Number of invalid SACKs we saw of diff types.
(requires Linux v2.6.24-rc1 or newer)
- net.stat.tcp.memory.pressure: Number of times a socket entered the
"memory pressure" mode (not great).
- net.stat.tcp.memory.prune: Number of times a socket had to discard
received data due to low memory conditions (bad).
- net.stat.tcp.packetloss.recovery: Number of times we recovered from packet
loss by type of recovery (e.g. fast retransmit vs SACK).
- net.stat.tcp.receive.queue.full: Number of times a received packet had to
be dropped because the socket's receive queue was full.
(requires Linux v2.6.34-rc2 or newer)
- net.stat.tcp.reording: Number of times we detected re-ordering and how.
- net.stat.tcp.syncookies: SYN cookies (both sent & received).
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import pwd
import re
import resource
import sys
import time
from io import open
# If we're running as root and this user exists, we'll drop privileges.
USER = "nobody"
COLLECTION_INTERVAL = 30 # seconds
# Scalyr edit: Check environment variable for collection interval. TODO: See if we can centralize code, but
# difficult without requiring collectors including common module which is goes against tcollector architecture.
try:
if "TCOLLECTOR_SAMPLE_INTERVAL" in os.environ:
COLLECTION_INTERVAL = float(os.environ["TCOLLECTOR_SAMPLE_INTERVAL"])
except ValueError:
pass
def drop_privileges():
"""Drops privileges if running as root."""
try:
ent = pwd.getpwnam(USER)
except KeyError:
return
if os.getuid() != 0:
return
os.setgid(ent.pw_gid)
os.setuid(ent.pw_uid)
def main():
"""Main loop"""
drop_privileges()
sys.stdin.close()
interval = COLLECTION_INTERVAL
page_size = resource.getpagesize()
try:
sockstat = open("/proc/net/sockstat", encoding="utf-8")
netstat = open("/proc/net/netstat", encoding="utf-8")
except IOError as e:
print("Failed to open /proc/net/sockstat: %s" % e, file=sys.stderr)
return 13 # Ask tcollector to not re-start us.
# Note: up until v2.6.37-rc2 most of the values were 32 bits.
# The first value is pretty useless since it accounts for some
# socket types but not others. So we don't report it because it's
# more confusing than anything else and it's not well documented
# what type of sockets are or aren't included in this count.
regexp = re.compile(
r"sockets: used \d+\n"
r"TCP: inuse (?P<tcp_inuse>\d+) orphan (?P<orphans>\d+)"
r" tw (?P<tw_count>\d+) alloc (?P<tcp_sockets>\d+)"
r" mem (?P<tcp_pages>\d+)\n"
r"UDP: inuse (?P<udp_inuse>\d+)"
# UDP memory accounting was added in v2.6.25-rc1
r"(?: mem (?P<udp_pages>\d+))?\n"
# UDP-Lite (RFC 3828) was added in v2.6.20-rc2
r"(?:UDPLITE: inuse (?P<udplite_inuse>\d+)\n)?"
r"RAW: inuse (?P<raw_inuse>\d+)\n"
r"FRAG: inuse (?P<ip_frag_nqueues>\d+)"
r" memory (?P<ip_frag_mem>\d+)\n"
)
def print_sockstat(metric, value, tags=""): # Note: tags must start with ' '
if value is not None:
print("net.sockstat.%s %d %s%s" % (metric, ts, value, tags))
# If a line in /proc/net/netstat doesn't start with a word in that dict,
# we'll ignore it. We use the value to build the metric name.
known_netstatstypes = {
"TcpExt:": "tcp",
"IpExt:": "ip", # We don't collect anything from here for now.
}
# Any stat in /proc/net/netstat that doesn't appear in this dict will be
# ignored. If we find a match, we'll use the (metricname, tags).
known_netstats = {
# An application wasn't able to accept a connection fast enough, so
# the kernel couldn't store an entry in the queue for this connection.
# Instead of dropping it, it sent a cookie to the client.
"SyncookiesSent": ("syncookies", "type=sent"),
# After sending a cookie, it came back to us and passed the check.
"SyncookiesRecv": ("syncookies", "type=received"),
# After sending a cookie, it came back to us but looked invalid.
"SyncookiesFailed": ("syncookies", "type=failed"),
# When a socket is using too much memory (rmem), the kernel will first
# discard any out-of-order packet that has been queued (with SACK).
"OfoPruned": ("memory.prune", "type=drop_ofo_queue"),
# If the kernel is really really desperate and cannot give more memory
# to this socket even after dropping the ofo queue, it will simply
# discard the packet it received. This is Really Bad.
"RcvPruned": ("memory.prune", "type=drop_received"),
# We waited for another packet to send an ACK, but didn't see any, so
# a timer ended up sending a delayed ACK.
"DelayedACKs": ("delayedack", "type=sent"),
# We wanted to send a delayed ACK but failed because the socket was
# locked. So the timer was reset.
"DelayedACKLocked": ("delayedack", "type=locked"),
# We sent a delayed and duplicated ACK because the remote peer
# retransmitted a packet, thinking that it didn't get to us.
"DelayedACKLost": ("delayedack", "type=lost"),
# We completed a 3WHS but couldn't put the socket on the accept queue,
# so we had to discard the connection.
"ListenOverflows": ("failed_accept", "reason=full_acceptq"),
# We couldn't accept a connection because one of: we had no route to
# the destination, we failed to allocate a socket, we failed to
# allocate a new local port bind bucket. Note: this counter
# also include all the increments made to ListenOverflows...
"ListenDrops": ("failed_accept", "reason=other"),
# A packet was lost and we recovered after a fast retransmit.
"TCPRenoRecovery": ("packetloss.recovery", "type=fast_retransmit"),
# A packet was lost and we recovered by using selective
# acknowledgements.
"TCPSackRecovery": ("packetloss.recovery", "type=sack"),
# We detected re-ordering using FACK (Forward ACK -- the highest
# sequence number known to have been received by the peer when using
# SACK -- FACK is used during congestion control).
"TCPFACKReorder": ("reording", "detectedby=fack"),
# We detected re-ordering using SACK.
"TCPSACKReorder": ("reording", "detectedby=sack"),
# We detected re-ordering using fast retransmit.
"TCPRenoReorder": ("reording", "detectedby=fast_retransmit"),
# We detected re-ordering using the timestamp option.
"TCPTSReorder": ("reording", "detectedby=timestamp"),
# We detected some erroneous retransmits and undid our CWND reduction.
"TCPFullUndo": ("congestion.recovery", "type=full_undo"),
# We detected some erroneous retransmits, a partial ACK arrived while
# we were fast retransmitting, so we were able to partially undo some
# of our CWND reduction.
"TCPPartialUndo": ("congestion.recovery", "type=hoe_heuristic"),
# We detected some erroneous retransmits, a D-SACK arrived and ACK'ed
# all the retransmitted data, so we undid our CWND reduction.
"TCPDSACKUndo": ("congestion.recovery", "type=sack"),
# We detected some erroneous retransmits, a partial ACK arrived, so we
# undid our CWND reduction.
"TCPLossUndo": ("congestion.recovery", "type=ack"),
# We received an unexpected SYN so we sent a RST to the peer.
"TCPAbortOnSyn": ("abort", "type=unexpected_syn"),
# We were in FIN_WAIT1 yet we received a data packet with a sequence
# number that's beyond the last one for this connection, so we RST'ed.
"TCPAbortOnData": ("abort", "type=data_after_fin_wait1"),
# We received data but the user has closed the socket, so we have no
# wait of handing it to them, so we RST'ed.
"TCPAbortOnClose": ("abort", "type=data_after_close"),
# This is Really Bad. It happens when there are too many orphaned
# sockets (not attached a FD) and the kernel has to drop a connection.
# Sometimes it will send a reset to the peer, sometimes it wont.
"TCPAbortOnMemory": ("abort", "type=out_of_memory"),
# The connection timed out really hard.
"TCPAbortOnTimeout": ("abort", "type=timeout"),
# We killed a socket that was closed by the application and lingered
# around for long enough.
"TCPAbortOnLinger": ("abort", "type=linger"),
# We tried to send a reset, probably during one of teh TCPABort*
# situations above, but we failed e.g. because we couldn't allocate
# enough memory (very bad).
"TCPAbortFailed": ("abort.failed", None),
# Number of times a socket was put in "memory pressure" due to a non
# fatal memory allocation failure (reduces the send buffer size etc).
"TCPMemoryPressures": ("memory.pressure", None),
# We got a completely invalid SACK block and discarded it.
"TCPSACKDiscard": ("invalid_sack", "type=invalid"),
# We got a duplicate SACK while retransmitting so we discarded it.
"TCPDSACKIgnoredOld": ("invalid_sack", "type=retransmit"),
# We got a duplicate SACK and discarded it.
"TCPDSACKIgnoredNoUndo": ("invalid_sack", "type=olddup"),
# We received something but had to drop it because the socket's
# receive queue was full.
"TCPBacklogDrop": ("receive.queue.full", None),
}
def print_netstat(statstype, metric, value, tags=""):
if tags:
space = " "
else:
tags = space = ""
print("net.stat.%s.%s %d %s%s%s" % (statstype, metric, ts, value, space, tags))
statsdikt = {}
while True:
# Scalyr edit to add in check for parent. A ppid of 1 means our parent has died.
if os.getppid() == 1:
sys.exit(1)
ts = int(time.time())
sockstat.seek(0)
netstat.seek(0)
data = sockstat.read()
stats = netstat.read()
m = re.match(regexp, data)
if not m:
print("Cannot parse sockstat: %r" % data, file=sys.stderr)
return 13
# The difference between the first two values is the number of
# sockets allocated vs the number of sockets actually in use.
print_sockstat("num_sockets", m.group("tcp_sockets"), " type=tcp")
print_sockstat("num_timewait", m.group("tw_count"))
print_sockstat("sockets_inuse", m.group("tcp_inuse"), " type=tcp")
print_sockstat("sockets_inuse", m.group("udp_inuse"), " type=udp")
print_sockstat("sockets_inuse", m.group("udplite_inuse"), " type=udplite")
print_sockstat("sockets_inuse", m.group("raw_inuse"), " type=raw")
print_sockstat("num_orphans", m.group("orphans"))
print_sockstat("memory", int(m.group("tcp_pages")) * page_size, " type=tcp")
if m.group("udp_pages") is not None:
print_sockstat("memory", int(m.group("udp_pages")) * page_size, " type=udp")
print_sockstat("memory", m.group("ip_frag_mem"), " type=ipfrag")
print_sockstat("ipfragqueues", m.group("ip_frag_nqueues"))
# /proc/net/netstat has a retarded column-oriented format. It looks
# like this:
# Header: SomeMetric OtherMetric
# Header: 1 2
# OtherHeader: ThirdMetric FooBar
# OtherHeader: 42 51
# We first group all the lines for each header together:
# {"Header:": [["SomeMetric", "OtherHeader"], ["1", "2"]],
# "OtherHeader:": [["ThirdMetric", "FooBar"], ["42", "51"]]}
# Then we'll create a dict for each type:
# {"SomeMetric": "1", "OtherHeader": "2"}
for line in stats.splitlines():
line = line.split()
if line[0] not in known_netstatstypes:
print(
"Unrecoginized line in /proc/net/netstat: %r (file=%r)"
% (line, stats),
file=sys.stderr,
)
continue
statstype = line.pop(0)
statsdikt.setdefault(known_netstatstypes[statstype], []).append(line)
for statstype, stats in statsdikt.items():
# stats is now:
# [["SyncookiesSent", "SyncookiesRecv", ...], ["1", "2", ....]]
assert len(stats) == 2, repr(statsdikt)
stats = dict(list(zip(*stats)))
value = stats.get("ListenDrops")
if value is not None: # Undo the kernel's double counting
stats["ListenDrops"] = int(value) - int(stats.get("ListenOverflows", 0))
for stat, (metric, tags) in known_netstats.items():
value = stats.get(stat)
if value is not None:
print_netstat(statstype, metric, value, tags)
stats.clear()
statsdikt.clear()
sys.stdout.flush()
time.sleep(interval)
if __name__ == "__main__":
sys.exit(main())
|
|
import threading, time
from sqlalchemy import pool, interfaces, select, event
import sqlalchemy as tsa
from test.lib import testing
from test.lib.util import gc_collect, lazy_gc
from test.lib.testing import eq_, assert_raises
from test.lib.engines import testing_engine
from test.lib import fixtures
mcid = 1
class MockDBAPI(object):
throw_error = False
def connect(self, *args, **kwargs):
if self.throw_error:
raise Exception("couldnt connect !")
delay = kwargs.pop('delay', 0)
if delay:
time.sleep(delay)
return MockConnection()
class MockConnection(object):
closed = False
def __init__(self):
global mcid
self.id = mcid
mcid += 1
def close(self):
self.closed = True
def rollback(self):
pass
def cursor(self):
return MockCursor()
class MockCursor(object):
def execute(self, *args, **kw):
pass
def close(self):
pass
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _queuepool_fixture(self, **kw):
dbapi = MockDBAPI()
return pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), **kw)
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'), **kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
class NoKws(object):
def connect(self, arg):
return MockConnection()
manager = pool.manage(NoKws(), use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
connection = manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal = False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.throw_error = True
p.dispose()
p.recreate()
def testthreadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def testthreadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_properties(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
self.assert_(c.connection is not c2.connection)
self.assert_(not c2.info)
self.assert_('foo2' in c.info)
class PoolEventsTest(object): #PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary, ["listen_one","listen_four", "listen_two","listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print "connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print "first_connect(%s, %s)" % (con, record)
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print "checkout(%s, %s, %s)" % (con, record, proxy)
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print "checkin(%s, %s)" % (con, record)
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def testqueuepool_del(self):
self._do_testqueuepool(useclose=False)
def testqueuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
tup = pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
print 'Pool size: %d Connections in pool: %d Current '\
'Overflow: %d Current Checked out connections: %d' % tup
return tup
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError, e:
assert int(time.time() - now) == 2
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator = lambda: dbapi.connect(delay=.05),
pool_size = 2,
max_overflow = 1, use_threadlocal = False, timeout=3)
timeouts = []
def checkout():
for x in xrange(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError, e:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 10, "Not all timeouts were < 10 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
def creator():
time.sleep(.05)
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in xrange(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join()
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p):
conn = p.connect()
time.sleep(.5)
success.append(True)
conn.close()
time.sleep(.2)
c1 = p.connect()
c2 = p.connect()
for i in range(2):
t = threading.Thread(target=waiter, args=(p, ))
t.setDaemon(True) # so the tests dont hang if this fails
t.start()
c1.invalidate()
c2.invalidate()
p2 = p._replace()
time.sleep(2)
eq_(len(success), 12)
@testing.requires.python26
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator1():
canary.append(1)
return dbapi.connect()
def creator2():
canary.append(2)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator1,
pool_size=1, timeout=None,
max_overflow=0)
p2 = pool.QueuePool(creator=creator2,
pool_size=1, timeout=None,
max_overflow=-1)
def waiter(p):
conn = p.connect()
time.sleep(.5)
conn.close()
c1 = p1.connect()
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.setDaemon(True)
t.start()
time.sleep(.5)
eq_(canary, [1])
p1._pool.abort(p2)
time.sleep(1)
eq_(canary, [1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
conns = [c1.connection, c2.connection]
c1.close()
eq_([c.closed for c in conns], [False, False])
p.dispose()
eq_([c.closed for c in conns], [True, False])
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_([c.closed for c in conns], [True, False])
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is conns[1]
def test_no_overflow(self):
self._test_overflow(40, 0)
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in xrange(5):
conns = [_conn() for i in xrange(4)]
for c in conns:
c.close()
still_opened = len([c for c in strong_refs if not c.closed])
eq_(still_opened, 2)
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c_id = c1.connection.id
c2 = p.connect()
assert c2.connection.id != c1.connection.id
dbapi.raise_error = True
c2.invalidate()
c2 = None
c2 = p.connect()
assert c2.connection.id != c1.connection.id
con = c1.connection
assert not con.closed
c1.close()
assert con.closed
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class SingletonThreadPoolTest(PoolTestBase):
def test_cleanup(self):
self._test_cleanup(False)
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
p = pool.SingletonThreadPool(creator=dbapi.connect,
pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in xrange(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in xrange(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join()
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.closed])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator = lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c_id = c1.connection.id
c1.close(); c1=None
c1 = p.connect()
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnvserver_authenticationsamlpolicy_binding(base_resource) :
""" Binding class showing the authenticationsamlpolicy that can be bound to vpnvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._acttype = 0
self._secondary = False
self._name = ""
self._groupextraction = False
self._gotopriorityexpression = ""
self._bindpoint = ""
self.___count = 0
@property
def priority(self) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
"""The priority, if any, of the VPN virtual server policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
"""Expression or other value specifying the next policy to evaluate if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax or classic expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a number that is larger than the largest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def policy(self) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
"""The name of the policy, if any, bound to the VPN virtual server.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
"""Binds the authentication policy to a tertiary chain which will be used only for group extraction. The user will not authenticate against this server, and this will only be called if primary and/or secondary authentication has succeeded.
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
@property
def name(self) :
"""Name of the virtual server.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the virtual server.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def secondary(self) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
"""Binds the authentication policy as the secondary policy to use in a two-factor configuration. A user must then authenticate not only via a primary authentication method but also via a secondary authentication method. User groups are aggregated across both. The user name must be exactly the same for both authentication methods, but they can require different passwords.
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def bindpoint(self) :
"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
return self._bindpoint
except Exception as e:
raise e
@bindpoint.setter
def bindpoint(self, bindpoint) :
"""Bind point to which to bind the policy. Applies only to rewrite and cache policies. If you do not set this parameter, the policy is bound to REQ_DEFAULT or RES_DEFAULT, depending on whether the policy rule is a response-time or a request-time expression.
"""
try :
self._bindpoint = bindpoint
except Exception as e:
raise e
@property
def acttype(self) :
try :
return self._acttype
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnvserver_authenticationsamlpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnvserver_authenticationsamlpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnvserver_authenticationsamlpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.gotopriorityexpression = resource.gotopriorityexpression
updateresource.bindpoint = resource.bindpoint
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnvserver_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
updateresources[i].bindpoint = resource[i].bindpoint
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnvserver_authenticationsamlpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
deleteresource.bindpoint = resource.bindpoint
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnvserver_authenticationsamlpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
deleteresources[i].bindpoint = resource[i].bindpoint
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch vpnvserver_authenticationsamlpolicy_binding resources.
"""
try :
obj = vpnvserver_authenticationsamlpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of vpnvserver_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count vpnvserver_authenticationsamlpolicy_binding resources configued on NetScaler.
"""
try :
obj = vpnvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of vpnvserver_authenticationsamlpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnvserver_authenticationsamlpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Staaddresstype:
IPV4 = "IPV4"
IPV6 = "IPV6"
class Bindpoint:
REQUEST = "REQUEST"
RESPONSE = "RESPONSE"
ICA_REQUEST = "ICA_REQUEST"
OTHERTCP_REQUEST = "OTHERTCP_REQUEST"
class vpnvserver_authenticationsamlpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnvserver_authenticationsamlpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnvserver_authenticationsamlpolicy_binding = [vpnvserver_authenticationsamlpolicy_binding() for _ in range(length)]
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class MirroredStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
from functools import partial
import threading
from tensorflow.contrib.distribute.python import cross_tower_ops as cross_tower_ops_lib
from tensorflow.contrib.distribute.python import shared_variable_creator
from tensorflow.contrib.distribute.python import values
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tf_device
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.training import coordinator
from tensorflow.python.training import device_util
from tensorflow.python.training import distribute as distribute_lib
from tensorflow.python.util import nest
# TODO(josh11b): Replace asserts in this file with if ...: raise ...
@contextlib.contextmanager
def _enter_graph(g):
if context.executing_eagerly():
with g.as_default(), context.eager_mode():
yield
else:
with g.as_default():
yield
def _cpu_device(device):
cpu_device = tf_device.DeviceSpec.from_string(device)
cpu_device.merge_from(tf_device.DeviceSpec(device_type="CPU", device_index=0))
return cpu_device.to_string()
class _RequestedStop(Exception):
pass
# _call_for_each_tower and _reduce_non_distributed_value are not members of
# MirroredStrategy so that they are generally not allowed to use anything
# specific to MirroredStrategy and thus can be shared with other distribution
# strategies.
# TODO(yuefengz): maybe create a common class for those who need to call this
# _call_for_each_tower.
def _call_for_each_tower(distribution, fn, *args, **kwargs):
"""Run `fn` in separate threads, once per tower/worker device.
Args:
distribution: the DistributionStrategy object.
fn: function to run (will be run once per device, each in its own thread).
*args: positional arguments for `fn`
**kwargs: keyword arguments for `fn`.
`"run_concurrently"`: Boolean indicating whether executions of `fn`
can be run concurrently (under eager execution only), defaults to
`True`.
Returns:
Merged return value of `fn` across all towers.
Raises:
RuntimeError: If fn() calls get_tower_context().merge_call() a different
number of times from the available devices.
"""
run_concurrently = kwargs.pop("run_concurrently", True)
if not context.executing_eagerly():
# Lots of TF library code isn't thread-safe in graph mode, and
# there is little to be gained by turning on multithreading when
# constructing a graph.
run_concurrently = False
# Needed for per-thread device, etc. contexts in graph mode.
ops.get_default_graph().switch_to_thread_local()
elif run_concurrently is None:
run_concurrently = True
coord = coordinator.Coordinator(clean_stop_exception_types=(_RequestedStop,))
shared_variable_store = {}
# TODO(isaprykin): Create these threads once instead of during every run()
# call.
threads = []
for index, d in enumerate(distribution.worker_devices):
variable_creator_fn = shared_variable_creator.make_fn(
shared_variable_store, index)
t = MirroredStrategy._MirroredTowerThread( # pylint: disable=protected-access
distribution, coord, d, variable_creator_fn, fn,
*values.select_device(d, args), **values.select_device(d, kwargs))
threads.append(t)
for t in threads:
t.start()
# When `fn` starts `should_run` event is set on _MirroredTowerThread
# (`MTT`) threads. The execution waits until
# `MTT.has_paused` is set, which indicates that either `fn` is
# complete or a `get_tower_context().merge_call()` is called. If `fn` is
# complete, then `MTT.done` is set to True. Otherwise, arguments
# of `get_tower_context().merge_call` from all paused threads are grouped
# and the `merge_fn` is performed. Results of the
# `get_tower_context().merge_call` are then set to `MTT.merge_result`.
# Each such `get_tower_context().merge_call` call returns the
# `MTT.merge_result` for that thread when `MTT.should_run` event
# is reset again. Execution of `fn` resumes.
try:
with coord.stop_on_exception():
all_done = False
while not all_done and not coord.should_stop():
done = []
if run_concurrently:
for t in threads:
t.should_run.set()
for t in threads:
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
else:
for t in threads:
t.should_run.set()
t.has_paused.wait()
t.has_paused.clear()
if coord.should_stop():
return None
done.append(t.done)
if coord.should_stop():
return None
all_done = all(done)
if not all_done:
if any(done):
raise RuntimeError("Some towers made a different number of "
"tower_context().merge_call() calls.")
# get_tower_context().merge_call() case
merge_args = values.regroup({t.device: t.merge_args for t in threads})
merge_kwargs = values.regroup(
{t.device: t.merge_kwargs for t in threads})
# We capture the name_scope of the MTT when we call merge_fn
# to ensure that if we have opened a name scope in the MTT,
# it will be respected when executing the merge function. We only
# capture the name_scope from the first MTT and assume it is
# the same for all other MTTs.
mtt_captured_name_scope = threads[0].captured_name_scope
with ops.name_scope(mtt_captured_name_scope):
merge_result = threads[0].merge_fn(distribution, *merge_args,
**merge_kwargs)
for t in threads:
t.merge_result = values.select_device(t.device, merge_result)
finally:
for t in threads:
t.should_run.set()
coord.join(threads)
return values.regroup({t.device: t.main_result for t in threads})
def _reduce_non_distributed_value(distribution, aggregation, value,
destinations):
"""Reduce a non-DistributedValue `value` to `destinations`."""
if isinstance(value, values.DistributedValues):
raise ValueError("You are passing a `DistributedValue` to "
"`_reduce_non_distributed_value`, which is not allowed.")
# If the same value is present on all towers then the PerDevice value will
# be a single value. We also handle the case when `value` is a single value
# and equal to 0.
if value == 0:
return 0
# If the aggregation type is MEAN or ONLY_FIRST_TOWER, then this
# essentially means that the same value should be on all destinations.
if aggregation in (
variable_scope.VariableAggregation.MEAN,
variable_scope.VariableAggregation.ONLY_FIRST_TOWER):
return value
cross_tower_ops_lib.validate_destinations(destinations)
# We do not support an aggregation type of SUM if the value is the same across
# all towers. We call this as part of assign functions for MirroredVariables
# and summing up identical values across towers is not clearly defined.
if (len(distribution.worker_devices) != 1 or
not cross_tower_ops_lib.check_destinations(destinations)):
raise ValueError("A non-DistributedValues value %s cannot be reduced with "
"the given aggregation %s." % (value, aggregation))
# TODO(anjalisridhar): Moves these methods to a device utility file?
devices = cross_tower_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
with ops.device(devices[0]):
return array_ops.identity(value)
else:
value_updates = {}
for d in devices:
with ops.device(d):
value_updates[d] = array_ops.identity(value)
return values.Mirrored(value_updates)
def _create_mirrored_variable(devices, real_mirrored_creator, *args, **kwargs): # pylint: disable=g-missing-docstring
# Figure out what collections this variable should be added to.
# We'll add the MirroredVariable to those collections instead.
collections = kwargs.pop("collections", None)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# Get synchronization value
synchronization = kwargs.get("synchronization",
variable_scope.VariableSynchronization.ON_WRITE)
if synchronization == variable_scope.VariableSynchronization.NONE:
raise ValueError("`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please"
" change the `synchronization` for variable: " +
kwargs["name"])
elif synchronization == variable_scope.VariableSynchronization.ON_READ:
# Variables that are to be synced on read are tower local.
is_tower_local = True
kwargs["trainable"] = False
elif (synchronization == variable_scope.VariableSynchronization.ON_WRITE or
synchronization == variable_scope.VariableSynchronization.AUTO):
# `AUTO` synchronization for `MirroredStrategy` is `ON_WRITE`.
is_tower_local = False
else:
raise ValueError("Invalid variable synchronization mode: " +
synchronization + " for variable: " + kwargs["name"])
# Get aggregation value
aggregation = kwargs.pop("aggregation",
variable_scope.VariableAggregation.NONE)
if aggregation not in (
variable_scope.VariableAggregation.NONE,
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN,
variable_scope.VariableAggregation.ONLY_FIRST_TOWER
):
raise ValueError("Invalid variable aggregation mode: " + aggregation +
" for variable: " + kwargs["name"])
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
index = real_mirrored_creator(devices, *args, **kwargs)
if is_tower_local:
result = values.TowerLocalVariable(index, index[devices[0]], aggregation)
else:
result = values.MirroredVariable(index, index[devices[0]], aggregation)
# Add the wrapped variable to the requested collections.
# The handling of eager mode and the global step matches
# ResourceVariable._init_from_args().
if not context.executing_eagerly():
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in index.values():
if v in l:
l.remove(v)
g.add_to_collections(collections, result)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, result)
return result
class MirroredStrategy(distribute_lib.DistributionStrategy):
"""Mirrors vars to distribute across multiple devices and machines.
This strategy uses one tower per device and sync replication for its multi-GPU
version.
When `cluster_spec` is given by the `configure` method., it turns into the
mulit-worker version that works on multiple workers with in-graph replication.
Note: `configure` will be called by higher-level APIs if running in
distributed environment.
There are several important concepts for distributed TensorFlow, e.g.
`client`, `job`, 'task', `cluster`, `in-graph replication` and
'synchronous training' and they have already been defined in the
[TensorFlow's documentation](https://www.tensorflow.org/deploy/distributed).
The distribution strategy inherits these concepts as well and in addition to
that we also clarify several more concepts:
* **In-graph replication**: the `client` creates a single `tf.Graph` that
specifies tasks for devices on all workers. The `client` then creates a
client session which will talk to the `master` service of a `worker`. Then
the `master` will partition the graph and distribute the work to all
participating workers.
* **Worker**: A `worker` is a TensorFlow `task` that usually maps to one
physical machine. We will have multiple `worker`s with different `task`
index. They all do similar things except for one worker checkpointing model
variables, writing summaries, etc. in addition to its ordinary work.
The multi-worker version of this class maps one tower to one device on a
worker. It mirrors all model variables on all towers. For example, if you have
two `worker`s and each `worker` has 4 GPUs, it will create 8 copies of the
model variables on these 8 GPUs. Then like in MirroredStrategy, each tower
performs their computation with their own copy of variables unless in
cross-tower model where variable or tensor reduction happens.
Args:
devices: a list of device strings.
num_gpus: number of GPUs. For local training, either specify `devices` or
`num_gpus`. In distributed training, this must be specified as number of
GPUs on each worker.
num_gpus_per_worker: number of GPUs per worker. This is the same as
`num_gpus` and only one of `num_gpus` and `num_gpus_per_worker` can be
specified.
cross_tower_ops: optional, a descedant of `CrossTowerOps`. If this is not
set, the `configure` method will try to find the best one.
prefetch_on_device: optional boolean to specify whether to prefetch input
data to devices.
auto_shard_dataset: whether to auto-shard the dataset when there are
multiple workers.
"""
def __init__(self,
devices=None,
num_gpus=None,
num_gpus_per_worker=None,
cross_tower_ops=None,
prefetch_on_device=None,
auto_shard_dataset=False):
super(MirroredStrategy, self).__init__()
self._cross_tower_ops = cross_tower_ops
self._prefetch_on_device = prefetch_on_device
self._auto_shard_dataset = auto_shard_dataset
# Remember num GPUs which might be needed by `configure` method.
if num_gpus is not None and num_gpus_per_worker is not None:
raise ValueError(
"You cannot specify both `num_gpus` and `num_gpus_per_worker`.")
if num_gpus is not None:
self._num_gpus = num_gpus
else:
self._num_gpus = num_gpus_per_worker
self._initialize_local(self._num_gpus, devices)
def _initialize_local(self, num_gpus, devices):
"""Initializes the object for local training."""
self._cluster_spec = None
# Convert `num_gpus` into `devices`, shouldn't specify both.
if devices is None:
if num_gpus is None:
num_gpus = context.num_gpus()
if num_gpus == 0:
devices = ["/device:CPU:0"]
else:
devices = ["/device:GPU:%d" % d for d in range(num_gpus)]
elif num_gpus is not None:
raise ValueError("Must only specify one of `devices` and `num_gpus`.")
self._num_gpus = num_gpus
# TODO(yuefengz): consider setting the default device.
assert devices, "Must specify at least one device."
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument.")
# TODO(josh11b): Require at least 2 devices?
self._devices = [device_util.resolve(d) for d in devices]
self._canonical_device_set = set(self._devices)
self._device_index = values.PerDevice({d: i for i, d in enumerate(devices)})
def _initialize_multi_worker(self, num_gpus, cluster_spec):
"""Initializes the object for multi-worker training."""
cluster_spec = multi_worker_util.normalize_cluster_spec(cluster_spec)
self._cluster_spec = cluster_spec
self._workers = []
for job in ["chief", "worker"]:
for task in range(len(cluster_spec.as_dict().get(job, []))):
self._workers.append("/job:%s/task:%d" % (job, task))
if num_gpus is None:
raise ValueError("`num_gpus` is required if `cluster_spec` is given.")
if num_gpus > 0:
self._worker_device_map = {
worker: [
device_util.canonicalize(worker + "/device:GPU:%d" % gpu)
for gpu in range(num_gpus)
] for worker in self._workers
}
else:
self._worker_device_map = {
worker: [device_util.canonicalize(worker, "/device:CPU:0")]
for worker in self._workers
}
devices = nest.flatten(self._worker_device_map)
# Setting `_default_device` will add a device scope in the
# distribution.scope. We set the default device to the first worker. When
# users specify device under distribution.scope by
# with tf.device("/cpu:0"):
# ...
# their ops will end up on the cpu device of its first worker, e.g.
# "/job:worker/task:0/device:CPU:0". Note this is not used in tower mode.
self._default_device = self._workers[0]
assert devices, "Must specify at least one device."
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument.")
# TODO(josh11b): Require at least 2 devices?
self._devices = [device_util.resolve(d) for d in devices]
self._canonical_device_set = set(self._devices)
self._device_index = values.PerDevice(
{d: i for i, d in enumerate(devices)})
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a mirrored variable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
devices = self._get_devices_from(colocate_with)
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
index = {}
for i, d in enumerate(devices):
with ops.device(d):
if i > 0:
# Give replicas meaningful distinct names:
var0name = index[devices[0]].name.split(":")[0]
# We append a / to variable names created on towers with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
# Initialize replicas with the same value:
def initial_value_fn(device=d):
if context.executing_eagerly():
init_value = index[devices[0]].value()
return array_ops.identity(init_value)
else:
with ops.device(device):
init_value = index[devices[0]].initial_value
return array_ops.identity(init_value)
kwargs["initial_value"] = initial_value_fn
with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
# Don't record operations (e.g. other variable reads) during
# variable creation.
with tape.stop_recording():
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.DistributedVariable)
index[d] = v
return index
return _create_mirrored_variable(devices, _real_mirrored_creator, *args,
**kwargs)
def distribute_dataset(self, dataset_fn):
if self._cluster_spec:
return values.MultiWorkerDataset(
partial(self._call_dataset_fn, dataset_fn), self._worker_device_map,
self._prefetch_on_device, self._auto_shard_dataset)
else:
return values.PerDeviceDataset(
self._call_dataset_fn(dataset_fn), self._devices,
self._prefetch_on_device)
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values=None):
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = values.MultiStepContext()
def body(i, *args):
"""A wrapper around `fn` to create the while loop body."""
del args
fn_inputs = iterator.get_next()
if not isinstance(fn_inputs, tuple):
fn_inputs = (fn_inputs,)
fn_result = fn(ctx, *fn_inputs)
for (name, output) in ctx.last_step_outputs.items():
# Convert all outputs to tensors, potentially from `DistributedValues`.
ctx.last_step_outputs[name] = self.unwrap(output)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
with ops.control_dependencies([fn_result]):
return [i + 1] + flat_last_step_outputs
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop. This is useful in cases where we might need to exit
# these contexts and get back to the outer context to do some things, for
# e.g. create an op which should be evaluated only once at the end of the
# loop on the host. One such usage is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
cond = lambda i, *args: i < iterations
i = constant_op.constant(0)
loop_result = control_flow_ops.while_loop(
cond, body, [i] + initial_loop_values, name="",
parallel_iterations=1, back_prop=False, swap_memory=False,
return_same_structure=True)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(loop_result)
# Convert the last_step_outputs from a list to the original dict structure
# of last_step_outputs.
last_step_tensor_outputs = loop_result[1:]
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for (name, aggregation) in ctx._last_step_outputs_aggregations.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been aggregated, wrap them in a Mirrored
# container, else in a PerDevice container.
if aggregation is variables_lib.VariableAggregation.NONE:
last_step_tensor_outputs_dict[name] = values.regroup(
{d: t for d, t in zip(self._devices, output)}, values.PerDevice)
else:
assert len(output) == 1
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
return ctx
def _broadcast(self, tensor, destinations):
# TODO(josh11b): In eager mode, use one thread per device, or async mode.
return self._get_cross_tower_ops().broadcast(tensor, destinations or
self._devices)
def _call_for_each_tower(self, fn, *args, **kwargs):
return _call_for_each_tower(self, fn, *args, **kwargs)
def map(self, map_over, fn, *args, **kwargs):
# TODO(josh11b): In eager mode, use one thread per device.
index = {}
for i, m in enumerate(map_over):
d = self._devices[i % len(self._devices)]
with ops.device(d):
l = index.get(d, [])
l.append(fn(m,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
index[d] = l
# TODO(josh11b): Need a values.regroup equivalent that handles MapOutput
# in addition to PerDevice data.
return values.PerDevice({k: values.MapOutput(v) for k, v in index.items()})
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del task_type, task_id
if session_config:
session_config.isolate_session_state = True
if cluster_spec:
self._initialize_multi_worker(self._num_gpus, cluster_spec)
if self._cross_tower_ops is None:
if self._cluster_spec:
# It currently cannot detect the toplogy of remote workers. So we
# hard-code the multi-worker all-reduce algorithm for now.
if len(self._workers) == 1:
# The default is "nccl".
self._cross_tower_ops = cross_tower_ops_lib.AllReduceCrossTowerOps()
else:
# The default is hierarchical reduce and broadcast.
self._cross_tower_ops = cross_tower_ops_lib.MultiWorkerAllReduce(
self._workers, self._num_gpus)
else:
self._cross_tower_ops = cross_tower_ops_lib.choose_the_best(
self._devices, session_config=session_config)
def _get_cross_tower_ops(self):
if self._cross_tower_ops is None:
self._cross_tower_ops = (
cross_tower_ops_lib.ReductionToOneDeviceCrossTowerOps())
return self._cross_tower_ops
def _reduce(self, aggregation, value, destinations):
assert not isinstance(value, values.Mirrored)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerDevice or Mirrored
# values. For example, the same value could be present on all towers in
# which case `value` would be a single value or value could be 0.
return _reduce_non_distributed_value(self, aggregation, value,
destinations)
if aggregation == variable_scope.VariableAggregation.ONLY_FIRST_TOWER:
value = value.get(self._devices[0])
if isinstance(value, (int, float)):
return value
return self.broadcast(value, destinations)
return self._get_cross_tower_ops().reduce(
aggregation, value, destinations=destinations)
def _batch_reduce(self, aggregation, value_destination_pairs):
if aggregation == variable_scope.VariableAggregation.ONLY_FIRST_TOWER:
return [self.broadcast(v.get(self._devices[0]), d)
for v, d in value_destination_pairs]
return self._get_cross_tower_ops().batch_reduce(aggregation,
value_destination_pairs)
def _update(self, var, options, fn, *args, **kwargs):
# TODO(josh11b): In eager mode, use one thread per device.
assert isinstance(var, values.DistributedVariable)
should_group = options.pop("grouped")
assert not options # Validate that we are processing all of the options.
updates = {}
for d, v in var._index.items(): # pylint: disable=protected-access
name = "update_%d" % self._device_index.get(d)
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates[d] = fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs))
return values.update_regroup(self, updates, should_group)
def _update_non_slot(self, colocate_with, options, fn, *args, **kwargs):
assert isinstance(colocate_with, list)
should_group = options.pop("grouped")
assert not options # Validate that we are processing all of the options.
# TODO(josh11b): In eager mode, use one thread per device.
updates = {}
for d in colocate_with:
name = "update_%d" % self._device_index.get(d)
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
updates[d] = fn(*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs))
return values.update_regroup(self, updates, should_group)
def read_var(self, tower_local_var):
"""Read the aggregate value of a tower-local variable."""
if isinstance(tower_local_var, values.TowerLocalVariable):
return tower_local_var._get_cross_tower() # pylint: disable=protected-access
assert isinstance(tower_local_var, values.Mirrored)
return array_ops.identity(tower_local_var.get())
def _unwrap(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
if set(val.devices) == self._canonical_device_set:
return [val.get(device=d) for d in self._devices]
return [val.get(device=d) for d in sorted(val.devices)]
return [val]
def value_container(self, val):
return values.value_container(val)
@property
def is_single_tower(self):
return len(self._devices) == 1
@property
def num_towers(self):
return len(self._devices)
@property
def num_replicas_in_sync(self):
return len(self._devices)
def _worker_device_index(self):
return self._device_index
@property
def worker_devices(self):
# Make a copy to prevent users from accidentally mutating our copy.
return list(self._devices)
@property
def parameter_devices(self):
return list(self._devices)
@property
def between_graph(self):
return False
@property
def should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
def non_slot_devices(self, var_list):
del var_list
return list(self._devices)
def _get_devices_from(self, colocate_with=None):
if colocate_with is None:
return self._devices
else:
return cross_tower_ops_lib.get_devices_from(colocate_with)
class _MirroredTowerThread(threading.Thread):
"""A thread that runs() a function on a device."""
def __init__(self, dist, coord, device, variable_creator_fn, fn, *args,
**kwargs):
super(MirroredStrategy._MirroredTowerThread, self).__init__() # pylint: disable=protected-access
self.coord = coord
self.distribution = dist
self.device = device
self.tower_id = dist.worker_devices.index(device)
self.variable_creator_fn = variable_creator_fn
# State needed to run and return the results of `fn`.
self.main_fn = fn
self.main_args = args
self.main_kwargs = kwargs
self.main_result = None
self.done = False
# State needed to run the next merge_call() (if any) requested via
# TowerContext.
self.merge_fn = None
self.merge_args = None
self.merge_kwargs = None
self.merge_result = None
self.captured_name_scope = None
# We use a thread.Event for the main thread to signal when this
# thread should start running (`should_run`), and another for
# this thread to transfer control back to the main thread
# (`has_paused`, either when it gets to a
# `get_tower_context().merge_call` or when `fn` returns). In
# either case the event starts cleared, is signaled by calling
# set(). The receiving thread waits for the signal by calling
# wait() and then immediately clearing the event using clear().
self.should_run = threading.Event()
self.has_paused = threading.Event()
# These fields have to do with inheriting various contexts from the
# parent thread:
# pylint: disable=protected-access
self.context_mode = context.context()._eager_context.mode
if not context.context()._context_handle:
context.context()._initialize_handle_and_devices()
self.context_device_policy = (
pywrap_tensorflow.TFE_ContextGetDevicePlacementPolicy(
context.context()._context_handle))
self.graph = ops.get_default_graph()
self._variable_creator_stack = self.graph._variable_creator_stack[:]
self._captured_var_scope = variable_scope.get_variable_scope()
# Adding a "/" at end lets us re-enter this scope later.
self._name_scope = self.graph.get_name_scope()
if self._name_scope:
self._name_scope += "/"
if self.tower_id > 0:
if not self._name_scope:
self._name_scope = ""
self._name_scope += "tower_%d/" % self.tower_id
def run(self):
# pylint: disable=protected-access
self.graph._variable_creator_stack = self._variable_creator_stack
self.should_run.wait()
self.should_run.clear()
try:
if self.coord.should_stop():
return
with self.coord.stop_on_exception(), \
context.context()._mode(self.context_mode), \
context.context().device_policy(self.context_device_policy), \
_enter_graph(self.graph), \
MirroredTowerContext(self.distribution, self.tower_id), \
ops.device(self.device), \
ops.name_scope(self._name_scope), \
variable_scope.variable_scope(
self._captured_var_scope, reuse=self.tower_id > 0), \
variable_scope.variable_creator_scope(self.variable_creator_fn):
self.main_result = self.main_fn(*self.main_args, **self.main_kwargs)
self.done = True
finally:
self.has_paused.set()
class MirroredTowerContext(distribute_lib.TowerContext):
"""TowerContext used in MirroredStrategy.call_for_each_tower().
Opened in `_MirroredTowerThread`, to allow the user to invoke
`MirroredStrategy`'s specific implementation of `merge_call()`,
which works by delegating the function and its arguments to
the main thread (the one that invoked
`MirroredStrategy.call_for_each_tower()`).
"""
def _merge_call(self, fn, *args, **kwargs):
"""Delegate to the main thread to actually perform merge_call()."""
t = threading.current_thread() # a _MirroredTowerThread
t.merge_fn = fn
t.merge_args = args
t.merge_kwargs = kwargs
t.captured_name_scope = t.graph.get_name_scope()
# Adding a "/" at end lets us re-enter this scope later.
if t.captured_name_scope:
t.captured_name_scope += "/"
t.has_paused.set()
t.should_run.wait()
t.should_run.clear()
if t.coord.should_stop():
raise _RequestedStop()
return t.merge_result
@property
def device(self):
distribute_lib.require_tower_context(self)
return self._distribution_strategy.worker_devices[self._tower_id]
|
|
#!/usr/bin/python
"""Script to poll metrics from MySQL Database and push them to Wavefront."""
from __future__ import print_function
import os
import sys
import re
import time
import datetime
import socket
import signal
import itertools
import mysql.connector
from wavefront_sdk.client_factory import WavefrontClientFactory
# USER CONFIGURATION ###
# All metrics will be prefixed with this text. Please ensure it has a trailing
ZABBIX_PREFIX = "zabbix."
# Frequency at which records will be retrieved from the DB in seconds
POLL_INTERVAL = 60
# Limit the amount of records that will be pulled from the DB
# on each POLL_INTERVAL
LIMIT = 10000
# Set to False to print values rather than sending to Wavefront
SEND_TO_WF = False
WAVEFRONT_PROXY_HOST = "localhost"
WAVEFRONT_PROXY_PORT = 2878
DB_SERVER = "localhost"
DB_DATABASE = "zabbix"
DB_UNAME = "root"
DB_PW = "password"
# Store the latest Epoch time that has already been sent to Wavefront in
# these files. There is a separate file for each Zabbix telemetry table -
# history and history_uint The "history" table stores metrics with
# float/double values and the "history_uint" table stores metrics
# with Integer values.
HISTORY_CLOCK_FILEPATH = "last_history_clock.hist"
HISTORYUINT_CLOCK_FILEPATH = "last_historyuint_clock.hist"
# END USER CONFIGURATION ###
# SQLs. The results of SQL_QUERY_ITEMIDS are used within SQL_QUERY_HISTORY
# to force MySQL to use the history_1 index
# which is created by default by the Zabbix install.
SQL_QUERY_ITEMIDS = "select distinct itemid from items"
HISTORY_TABLE = "history"
HISTORY_TABLE_UINT = "history_uint"
SQL_QUERY_HISTORY = """SELECT hi.clock, hi.value, h.host, i.key_ FROM
%s AS hi
INNER JOIN items AS i ON hi.itemid = i.itemid
INNER JOIN hosts AS h ON i.hostid = h.hostid
WHERE hi.itemid IN (%s) AND hi.clock >= %s LIMIT %s"""
def read_last_clock_file(file):
"""Return the clock (Unixtime) from the specified file or current time
if the file doesn't exist"""
last_clock = int(time.time())
if os.path.isfile(file):
try:
last_clock_file = open(file, 'r')
last_clock = int(last_clock_file.readline())
last_clock_file.close()
except IOError:
sys.stderr.write('Error: failed to read last clock file, ' +
file + '\n')
sys.exit(2)
else:
print('Error: ' + file +
' file not found! Starting from current timestamp')
return last_clock
def write_last_clock_file(file, clock):
"""Write the supplied clock (Unixtime) to file"""
try:
file = open(file, 'w')
file.write(str(clock))
file.close()
except IOError:
sys.stderr.write('Error writing last clock to file: ' +
file + '\n')
sys.exit(2)
def get_db_connection():
"""Get a connection to the DB. We do this per query to ensure we always
have a live connection."""
conn = mysql.connector.connect(user=DB_UNAME, password=DB_PW,
host=DB_SERVER, database=DB_DATABASE,
autocommit=True)
return conn
def get_wavefront_client():
"""Connect to the Wavefront Proxy. We do this per connection to ensure we
always have a live connection."""
client_factory = WavefrontClientFactory()
client_factory.add_client(
url="proxy://{}:{}".format(WAVEFRONT_PROXY_HOST, WAVEFRONT_PROXY_PORT),
max_queue_size=50000,
batch_size=10000,
flush_interval_seconds=5)
wavefront_sender = client_factory.get_client()
return wavefront_sender
def fetch_next_metrics(history_clock, historyuint_clock, wavefront_sender, tags):
"""Send the next batch of Floating point and Integer metrics to the Wavefront
Proxy and return the last clock time for int and float metrics as a tuple.
Query both the history and history_uint tables."""
conn = get_db_connection()
cursor = conn.cursor()
# Process the history table which contains floating point metrics
float_rows = query_db(HISTORY_TABLE, history_clock, cursor)
float_points_count = len(float_rows)
history_clock = process_and_send_metrics(
float_rows, history_clock, wavefront_sender, tags)
# Process the history_uint table which contains integer metrics
int_rows = query_db(HISTORY_TABLE_UINT, historyuint_clock, cursor)
int_points_count = len(int_rows)
historyuint_clock = process_and_send_metrics(
int_rows,
historyuint_clock,
wavefront_sender,
tags)
cursor.close()
conn.close()
print("Processed {} Float Points and {} Integer Points. "
"Press C-c to terminate."
.format(float_points_count, int_points_count))
return (history_clock, historyuint_clock)
def query_db(history_table_name, clock, cursor):
"""Query the DB for the given table and clock time and return any rows"""
cursor.execute(SQL_QUERY_ITEMIDS)
# The Python MySQL connector doesn't natively handle binding a list
# to a SQL IN clause so that is handled here add the correct number of %s
# symbols to the SQL string
itemids = [x[0] for x in cursor.fetchall()]
in_p = ','.join(itertools.repeat('%s', len(itemids)))
sql = SQL_QUERY_HISTORY % (history_table_name, in_p, clock, LIMIT)
cursor.execute(sql, itemids)
return cursor.fetchall()
def process_and_send_metrics(rows, latest_clock, wavefront_sender, tags):
"""Convert each row in rows into the Wavefront format and send to the
Wavefront proxy. Return the latest clock value found (which will be unchanged
if rows was empty)"""
for (clock, value, host, itemkey) in rows:
# These isinstance checks will only return true with Python3.
# See this issue: http://sourceforge.net/p/mysql-python/bugs/289/
if isinstance(host, (bytes, bytearray)):
host = host.decode()
if isinstance(itemkey, (bytes, bytearray)):
itemkey = itemkey.decode()
metric = convert_key_to_wf_metric(itemkey)
host = replace_punctuation_and_whitespace(host)
host = host.replace("_", ".") # Make sure no underscore in host name
if clock > latest_clock:
latest_clock = clock
# Wavefront metric names must include at least one .
if "." not in metric:
warning("Cannot process Zabbix item with key_: {} "
"as it contains no . character"
.format(itemkey))
continue
# wavefront_sender will be None if SEND_TO_WF is False
if wavefront_sender:
wavefront_sender.send_metric(metric, value, clock, host, tags)
else:
di_msg = "{0}{1} {2} host={3}\n".format(metric,
value, clock, host)
print(di_msg)
return latest_clock
def convert_key_to_wf_metric(key):
"""A Wavefront metric name can be: 'Any lowercase, alphanumeric,
dot-separated value. May also include dashes (-) or underscores (_)'
For the purposes of sending data from Zabbix to Wavefront we replace any
whitespace or punctuation other than dash and underscore from the Zabbix key_
with a single . and convert the whole string to lower case. """
metric = replace_punctuation_and_whitespace(key)
metric = remove_trailing_dots(metric)
metric = just_one_dot(metric)
metric = prefix_metric(metric)
metric = metric.lower()
return metric
def replace_punctuation_and_whitespace(text):
"""Replace occurrences of punctuation (other than . - _) and
any consecutive white space with ."""
regex = re.compile(r"[^\w\s\-.]|\s+")
return regex.sub(".", text)
def just_one_dot(text):
"""Some Zabbix metrics can end up with multiple . characters.
Replace with a single one"""
regex = re.compile(r"\.+")
return regex.sub(".", text)
def prefix_metric(metric):
"""Apply metric prefix if the collected metrics does
not start with 'zabbix.'"""
if metric.startswith(ZABBIX_PREFIX):
return metric
else:
return ZABBIX_PREFIX + metric
def remove_trailing_dots(text):
"""Remove any trailing . characters from a metric name"""
regex = re.compile(r"\.+$")
return regex.sub("", text)
def format_clock(clock):
"""Return the clock in a human readable format"""
return datetime.datetime.fromtimestamp(int(clock)).strftime(
'%Y-%m-%d %H:%M:%S')
def warning(*msgs):
"""Print the supplied msgs to stderr as a warning"""
print("WARNING: ", *msgs, file=sys.stderr)
def error(*msgs):
"""Print the supplied msgs to stderr as an Error"""
print("ERROR: ", *msgs, file=sys.stderr)
def signal_handler(signal, frame):
"""Print the final Float Clock Time"""
print("Wrapping up. Final Float Clock Time: {}. "
"Final Int Clock Time: {}."
.format(history_clock, historyuint_clock))
write_last_clock_file(HISTORY_CLOCK_FILEPATH, history_clock)
write_last_clock_file(HISTORYUINT_CLOCK_FILEPATH, historyuint_clock)
sys.exit(0)
if __name__ == "__main__":
# The clocks store the epoch time that has already been sent
# to Wavefront for each Zabbix telemetry table (history and history_uint)
history_clock = read_last_clock_file(HISTORY_CLOCK_FILEPATH)
historyuint_clock = read_last_clock_file(HISTORYUINT_CLOCK_FILEPATH)
# Listen for C-c and exit cleanly
signal.signal(signal.SIGINT, signal_handler)
wavefront_sender = None
tags = None
if SEND_TO_WF:
wavefront_sender = get_wavefront_client()
# Loop forever (unless killed by SIGINT) or exception caught
while True:
try:
history_clock, historyuint_clock = fetch_next_metrics(
history_clock, historyuint_clock, wavefront_sender, tags)
msg = "Latest Float Point: {}. Latest Integer Point: {}."
print(msg.format(
format_clock(history_clock),
format_clock(historyuint_clock)))
write_last_clock_file(HISTORY_CLOCK_FILEPATH, history_clock)
write_last_clock_file(
HISTORYUINT_CLOCK_FILEPATH, historyuint_clock)
time.sleep(POLL_INTERVAL)
except mysql.connector.Error as e:
error("Please check your database configuration: {}".format(e))
sys.exit(1)
except socket.error as e:
error("Please check your Wavefront Proxy configuration: {}".format(e))
if SEND_TO_WF:
wavefront_sender.flush_now()
wavefront_sender.close()
sys.exit(1)
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
from oslo_log import log as logging
from conveyor.clone.resources import common
from conveyor.common import plan_status
from conveyor.conveyoragentclient.v1 import client as birdiegatewayclient
from conveyor import compute
from conveyor import exception
from conveyor import utils
from conveyor import volume
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VolumeCloneDriver(object):
def __init__(self):
self.cinder_api = volume.API()
self.compute_api = compute.API()
def start_volume_clone(self, context, resource_name, template,
trans_data_wait_fun=None,
volume_wait_fun=None,
set_plan_state=None):
resources = template.get('resources')
volume_res = resources.get(resource_name)
volume_id = volume_res.get('id')
plan_id = template.get('plan_id', None)
ext_properties = volume_res.get('extra_properties', None)
d_copy = ext_properties.get('copy_data', True) if ext_properties \
else True
if not d_copy:
plan_state = 'DATA_TRANS_FINISHED'
set_plan_state(context, plan_id, plan_state,
plan_status.STATE_MAP)
return
resource_names = resource_name.split('.')
if len(resource_names) >= 1:
if resource_names[0].startswith('stack'):
self._copy_stack_volume(context, resource_name, template,
plan_id, trans_data_wait_fun,
volume_wait_fun, set_plan_state)
return
# 1. check instance which dependences this volume in template or not
# if instance exists in template do not execute copy data step
is_attached = self._check_volume_attach_instance(resource_name,
template)
if is_attached:
LOG.debug('Volume clone driver: volume %(id)s, name %(name)s',
{'id': volume_id, 'name': resource_name})
# update plan status
plan_state = 'DATA_TRANS_FINISHED'
set_plan_state(context, plan_id, plan_state,
plan_status.STATE_MAP)
return
# 2. if volume is system volume and does not clone, skip copy data step
if ext_properties:
sys_clone = ext_properties.get('sys_clone', False)
boot_index = ext_properties.get('boot_index', 1)
else:
sys_clone = False
boot_index = 1
if not sys_clone and boot_index in ['0', 0]:
plan_state = 'DATA_TRANS_FINISHED'
set_plan_state(context, plan_id, plan_state,
plan_status.STATE_MAP)
return
# 3. get volume info
try:
volume_info = self.cinder_api.get(context, volume_id)
except Exception as e:
LOG.error("Clone volume driver get volume %(id)s error: %(error)s",
{'id': volume_id, 'error': e})
raise exception.VolumeNotFound()
volume_status = volume_info['status']
v_shareable = volume_info.get('shareable', False)
volume_az = volume_info.get('availability_zone')
need_set_shareable = False
if volume_status == 'in-use' and not v_shareable:
need_set_shareable = True
# 3. attach volume to gateway vm
vgw_id, vgw_ip = utils.get_next_vgw(volume_az)
LOG.debug('Clone volume driver vgw info: id: %(id)s,ip: %(ip)s',
{'id': vgw_id, 'ip': vgw_ip})
des_dev_name = None
try:
client = birdiegatewayclient.get_birdiegateway_client(
vgw_ip,
str(CONF.v2vgateway_api_listen_port)
)
disks = set(client.vservices.get_disk_name().get('dev_name'))
if CONF.is_provide_device_name:
if need_set_shareable:
self.cinder_api.set_volume_shareable(context, volume_id,
True)
self.compute_api.attach_volume(context, vgw_id,
volume_id, None)
# des_dev_name = attach_resp._info.get('device')
self._wait_for_shareable_volume_status(context, volume_id,
vgw_id, 'in-use')
else:
self.compute_api.attach_volume(context, vgw_id,
volume_id, None)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'in-use')
# des_dev_name = attach_resp._info.get('device')
n_disks = set(client.vservices.get_disk_name().get('dev_name'))
diff_disk = n_disks - disks
des_dev_name = list(diff_disk)[0] if len(diff_disk) >= 1 \
else None
LOG.debug("dev_name = %s", des_dev_name)
else:
des_dev_name = self._attach_volume_for_device_name(
context, need_set_shareable, vgw_id, volume_id,
volume_wait_fun, client)
LOG.debug("dev_name = %s", des_dev_name)
except Exception as e:
LOG.error('Volume clone error: attach volume failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
raise exception.VolumeNotAttach(volume_id=volume_id,
seconds=120,
attempts=5)
# 5. copy data
try:
result = self._copy_volume_data(context, resource_name,
vgw_ip, vgw_id, template,
des_dev_name)
des_gw_ip = result.get('des_ip')
des_port = result.get('des_port')
task_ids = result.get('copy_tasks')
if trans_data_wait_fun:
trans_data_wait_fun(context, des_gw_ip,
des_port, task_ids,
plan_status.STATE_MAP,
plan_id)
except Exception as e:
LOG.error('Volume clone error: copy data failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
raise
finally:
try:
# if protocol is ftp, we should unmount volume in gateway vm
data_trans_protocol = CONF.data_transformer_procotol
if 'ftp' == data_trans_protocol:
client = birdiegatewayclient.get_birdiegateway_client(
vgw_ip,
str(CONF.v2vgateway_api_listen_port)
)
client.vservices._force_umount_disk("/opt/" + volume_id)
# if provider cloud can not detach volume in active status
if not CONF.is_active_detach_volume:
resouce_common = common.ResourceCommon()
self.compute_api.stop_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'SHUTOFF')
# 5. detach volume
self.compute_api.detach_volume(context, vgw_id, volume_id)
if need_set_shareable:
self._wait_for_shareable_volume_status(context,
volume_id,
vgw_id,
'available')
self.cinder_api.set_volume_shareable(context,
volume_id,
False)
else:
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'available')
if not CONF.is_active_detach_volume:
self.compute_api.start_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'ACTIVE')
except Exception as e:
LOG.error('Volume clone error: detach failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
def _copy_stack_volume(self, context, resource_name, template, plan_id,
trans_data_wait_fun=None, volume_wait_fun=None,
set_plan_state=None):
resources = template.get('resources')
volume_res = resources.get(resource_name)
ext_properties = volume_res.get('extra_properties', None)
if ext_properties:
sys_clone = ext_properties.get('sys_clone', False)
boot_index = ext_properties.get('boot_index', 1)
else:
sys_clone = False
boot_index = 1
if not sys_clone and boot_index in ['0', 0]:
plan_state = 'DATA_TRANS_FINISHED'
set_plan_state(context, plan_id, plan_state,
plan_status.STATE_MAP)
return
volume_id = volume_res.get('id')
try:
volume_info = self.cinder_api.get(context, volume_id)
except Exception as e:
LOG.error("Clone volume driver get volume %(id)s error: %(error)s",
{'id': volume_id, 'error': e})
raise exception.VolumeNotFound()
volume_status = volume_info['status']
v_shareable = volume_info.get('shareable', False)
volume_az = volume_info.get('availability_zone')
volume_attachments = volume_info.get('attachments', [])
if volume_status == 'in-use' and not v_shareable:
for attachment in volume_attachments:
server_id = attachment.get('server_id')
if not CONF.is_active_detach_volume:
resouce_common = common.ResourceCommon()
self.compute_api.stop_server(context, server_id)
resouce_common._await_instance_status(context,
server_id,
'SHUTOFF')
self.compute_api.detach_volume(context, server_id,
volume_id)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'available')
vgw_id, vgw_ip = utils.get_next_vgw(volume_az)
LOG.debug('Clone volume driver vgw info: id: %(id)s,ip: %(ip)s',
{'id': vgw_id, 'ip': vgw_ip})
des_dev_name = None
try:
client = birdiegatewayclient.get_birdiegateway_client(
vgw_ip,
str(CONF.v2vgateway_api_listen_port)
)
disks = set(client.vservices.get_disk_name().get('dev_name'))
self.compute_api.attach_volume(context, vgw_id,
volume_id, None)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'in-use')
n_disks = set(client.vservices.get_disk_name().get('dev_name'))
diff_disk = n_disks - disks
des_dev_name = list(diff_disk)[0] if len(diff_disk) >= 1 else None
LOG.debug("dev_name = %s", des_dev_name)
except Exception as e:
LOG.error('Volume clone error: attach volume failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
raise exception.VolumeNotAttach(volume_id=volume_id,
seconds=120,
attempts=5)
# 5. copy data
try:
result = self._copy_volume_data(context, resource_name,
vgw_ip, vgw_id, template,
des_dev_name)
des_gw_ip = result.get('des_ip')
des_port = result.get('des_port')
task_ids = result.get('copy_tasks')
if trans_data_wait_fun:
trans_data_wait_fun(context, des_gw_ip,
des_port, task_ids,
plan_status.STATE_MAP,
plan_id)
except Exception as e:
LOG.error('Volume clone error: copy data failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
raise
finally:
try:
# if protocol is ftp, we should unmount volume in gateway vm
data_trans_protocol = CONF.data_transformer_procotol
if 'ftp' == data_trans_protocol:
client = birdiegatewayclient.get_birdiegateway_client(
vgw_ip,
str(CONF.v2vgateway_api_listen_port)
)
client.vservices._force_umount_disk("/opt/" + volume_id)
# if provider cloud can not detach volume in active status
if not CONF.is_active_detach_volume:
resouce_common = common.ResourceCommon()
self.compute_api.stop_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'SHUTOFF')
# 5. detach volume
self.compute_api.detach_volume(context, vgw_id, volume_id)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'available')
if not CONF.is_active_detach_volume:
self.compute_api.start_server(context, vgw_id)
resouce_common._await_instance_status(context,
vgw_id,
'ACTIVE')
for attachment in volume_attachments:
server_id = attachment.get('server_id')
self.compute_api.attach_volume(context, server_id,
volume_id, None)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'in-use')
except Exception as e:
LOG.error('Volume clone error: detach failed:%(id)s,%(e)s',
{'id': volume_id, 'e': e})
def _attach_volume_for_device_name(self, context, need_set_shareable,
vgw_id, volume_id, volume_wait_fun,
agent_client):
@utils.synchronized(vgw_id)
def _do_attach_volume_for_ok(context, need_set_shareable, vgw_id,
volume_id, volume_wait_fun,
agent_client):
disks = set(agent_client.vservices.get_disk_name().get('dev_name'))
if need_set_shareable:
self.cinder_api.set_volume_shareable(context, volume_id, True)
self.compute_api.attach_volume(context, vgw_id,
volume_id, None)
# des_dev_name = attach_resp._info.get('device')
self._wait_for_shareable_volume_status(context, volume_id,
vgw_id, 'in-use')
else:
self.compute_api.attach_volume(context, vgw_id,
volume_id, None)
if volume_wait_fun:
volume_wait_fun(context, volume_id, 'in-use')
n_disks = set(agent_client.vservices.get_disk_name()
.get('dev_name'))
diff_disk = n_disks - disks
des_dev_name = list(diff_disk)[0] if len(diff_disk) >= 1 else None
return des_dev_name
return _do_attach_volume_for_ok(context, need_set_shareable, vgw_id,
volume_id, volume_wait_fun,
agent_client)
def _copy_volume_data(self, context, resource_name,
des_gw_ip, vgw_id, template, dev_name):
LOG.debug('Clone volume driver copy data start for %s', resource_name)
resources = template.get('resources')
volume_res = resources.get(resource_name)
volume_id = volume_res.get('id')
volume_ext_properties = volume_res.get('extra_properties')
# 1. get gateway vm conveyor agent service ip and port
des_gw_port = str(CONF.v2vgateway_api_listen_port)
des_gw_url = des_gw_ip + ':' + des_gw_port
# data transformer procotol(ftp/fillp)
data_trans_protocol = CONF.data_transformer_procotol
# data_trans_ports = CONF.trans_ports
# trans_port = data_trans_ports[0]
trans_port = utils.get_next_port_for_vgw(vgw_id)
# 2. get source cloud gateway vm conveyor agent service ip and port
src_gw_url = volume_ext_properties.get('gw_url')
src_urls = src_gw_url.split(':')
if len(src_urls) != 2:
LOG.error("Input source gw url error: %s", src_gw_url)
msg = "Input source gw url error: " + src_gw_url
raise exception.InvalidInput(reason=msg)
src_gw_ip = src_urls[0]
src_gw_port = src_urls[1]
# 3. get volme mount point and disk format info
boot_index = 1
src_mount_point = "/opt/" + volume_id
if volume_ext_properties:
src_dev_format = volume_ext_properties.get('guest_format')
# volume dev name in system
src_vol_sys_dev = volume_ext_properties.get('sys_dev_name')
boot_index = volume_ext_properties.get('boot_index', 1)
if dev_name:
des_dev_name = dev_name
else:
des_dev_name = src_vol_sys_dev
if not src_dev_format:
client = birdiegatewayclient.get_birdiegateway_client(src_gw_ip,
src_gw_port)
d_fromat = client.vservices.get_disk_format(src_vol_sys_dev)
src_dev_format = d_fromat.get('disk_format')
# if disk does not format, then no data to copy
if not src_dev_format and data_trans_protocol == 'ftp':
rsp = {'volume_id': volume_id,
'des_ip': None,
'des_port': None,
'copy_tasks': None}
return rsp
mount_point = []
task_ids = []
if boot_index not in[0, '0'] and data_trans_protocol == 'ftp':
mount_point.append(src_mount_point)
# 4. copy data
client = birdiegatewayclient.get_birdiegateway_client(des_gw_ip,
des_gw_port)
clone_rsp = client.vservices.clone_volume(
src_vol_sys_dev,
des_dev_name,
src_dev_format,
mount_point,
src_gw_url,
des_gw_url,
trans_protocol=data_trans_protocol,
trans_port=trans_port)
task_id = clone_rsp.get('body').get('task_id')
task_ids.append(task_id)
rsp = {'volume_id': volume_id,
'des_ip': des_gw_ip,
'des_port': des_gw_port,
'copy_tasks': task_ids}
LOG.debug('Clone volume driver copy data end for %s', resource_name)
return rsp
def _check_volume_attach_instance(self, resource_name, template):
# 1. Get all server resource info in template,
# and check each server BDM for containing this volume or not
LOG.debug('Volume clone start: check volume attached vm exist.')
resources = template.get('resources')
for key, res in resources.items():
res_type = res.get('type')
if 'OS::Nova::Server' == res_type:
properties = res.get('properties')
volumes = properties.get('block_device_mapping_v2')
if not volumes:
continue
for v_volume in volumes:
vol_name = v_volume.get('volume_id')
if isinstance(vol_name, dict):
vol_res_name = vol_name.get('get_resource')
if vol_res_name == resource_name:
LOG.debug('Volume clone end: attached vm exist.')
return True
LOG.debug('Volume clone end: volume attached vm not exist.')
return False
def _wait_for_shareable_volume_status(self, context,
volume_id,
server_id,
status):
volume = self.cinder_api.get(context, volume_id)
start = int(time.time())
volume_attachments = volume['attachments']
attach_flag = False
end_flag = False
for vol_att in volume_attachments:
if vol_att.get('server_id') == server_id:
attach_flag = True
if status == 'in-use' and attach_flag:
end_flag = True
elif status == 'available' and not attach_flag:
end_flag = True
while not end_flag:
attach_flag = False
time.sleep(CONF.check_interval)
volume = self.cinder_api.get(context, volume_id)
volume_attachments = volume['attachments']
for vol_att in volume_attachments:
if vol_att.get('server_id') == server_id:
attach_flag = True
if status == 'in-use' and attach_flag:
end_flag = True
elif status == 'available' and not attach_flag:
end_flag = True
if int(time.time()) - start >= CONF.check_timeout:
message = ('Volume %s failed to reach %s status (server %s) '
'within the required time (%s s).' %
(volume_id, status, server_id,
CONF.check_timeout))
raise exception.TimeoutException(msg=message)
|
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import mock
import netaddr
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.common import constants as l3_constants
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l3 import framework
DEVICE_OWNER_COMPUTE = l3_constants.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
class TestDvrRouter(framework.L3AgentTestFramework):
def test_dvr_router_lifecycle_without_ha_without_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=False)
def test_dvr_router_lifecycle_without_ha_with_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=False, enable_snat=True)
def test_dvr_router_lifecycle_ha_with_snat_with_fips(self):
self._dvr_router_lifecycle(enable_ha=True, enable_snat=True)
def _helper_create_dvr_router_fips_for_ext_network(
self, agent_mode, **dvr_router_kwargs):
self.agent.conf.agent_mode = agent_mode
router_info = self.generate_dvr_router_info(**dvr_router_kwargs)
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
router = self.manage_router(self.agent, router_info)
fip_ns = router.fip_ns.get_name()
return router, fip_ns
def _validate_fips_for_external_network(self, router, fip_ns):
self.assertTrue(self._namespace_exists(router.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_dvr_floating_ips(router)
self._assert_snat_namespace_does_not_exist(router)
def test_dvr_router_fips_for_multiple_ext_networks(self):
agent_mode = 'dvr'
# Create the first router fip with external net1
dvr_router1_kwargs = {'ip_address': '19.4.4.3',
'subnet_cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1',
'gateway_mac': 'ca:fe:de:ab:cd:ef'}
router1, fip1_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router1_kwargs))
# Validate the fip with external net1
self._validate_fips_for_external_network(router1, fip1_ns)
# Create the second router fip with external net2
dvr_router2_kwargs = {'ip_address': '19.4.5.3',
'subnet_cidr': '19.4.5.0/24',
'gateway_ip': '19.4.5.1',
'gateway_mac': 'ca:fe:de:ab:cd:fe'}
router2, fip2_ns = (
self._helper_create_dvr_router_fips_for_ext_network(
agent_mode, **dvr_router2_kwargs))
# Validate the fip with external net2
self._validate_fips_for_external_network(router2, fip2_ns)
def _dvr_router_lifecycle(self, enable_ha=False, enable_snat=False,
custom_mtu=2000,
ip_version=4,
dual_stack=False):
'''Test dvr router lifecycle
:param enable_ha: sets the ha value for the router.
:param enable_snat: the value of enable_snat is used
to set the agent_mode.
'''
# The value of agent_mode can be dvr, dvr_snat, or legacy.
# Since by definition this is a dvr (distributed = true)
# only dvr and dvr_snat are applicable
self.agent.conf.agent_mode = 'dvr_snat' if enable_snat else 'dvr'
self.agent.conf.network_device_mtu = custom_mtu
# We get the router info particular to a dvr router
router_info = self.generate_dvr_router_info(
enable_ha, enable_snat)
# We need to mock the get_agent_gateway_port return value
# because the whole L3PluginApi is mocked and we need the port
# gateway_port information before the l3_agent will create it.
# The port returned needs to have the same information as
# router_info['gw_port']
self.mock_plugin_api.get_agent_gateway_port.return_value = router_info[
'gw_port']
# We also need to mock the get_external_network_id method to
# get the correct fip namespace.
self.mock_plugin_api.get_external_network_id.return_value = (
router_info['_floatingips'][0]['floating_network_id'])
# With all that set we can now ask the l3_agent to
# manage the router (create it, create namespaces,
# attach interfaces, etc...)
router = self.manage_router(self.agent, router_info)
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ha_namespace,
interface_name)
utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[l3_constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
utils.wait_until_true(device_exists)
ext_gateway_port = router_info['gw_port']
self.assertTrue(self._namespace_exists(router.ns_name))
utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_dvr_external_device(router)
self._assert_dvr_gateway(router)
self._assert_dvr_floating_ips(router)
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_metadata_chains(router)
self._assert_extra_routes(router)
self._assert_rfp_fpr_mtu(router, custom_mtu)
if enable_snat:
ip_versions = [4, 6] if (ip_version == 6 or dual_stack) else [4]
snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self._assert_onlink_subnet_routes(
router, ip_versions, snat_ns_name)
self._delete_router(self.agent, router.router_id)
self._assert_fip_namespace_deleted(ext_gateway_port)
self._assert_router_does_not_exist(router)
self._assert_snat_namespace_does_not_exist(router)
def generate_dvr_router_info(self,
enable_ha=False,
enable_snat=False,
agent=None,
**kwargs):
if not agent:
agent = self.agent
router = l3_test_common.prepare_router_data(
enable_snat=enable_snat,
enable_floating_ip=True,
enable_ha=enable_ha,
**kwargs)
internal_ports = router.get(l3_constants.INTERFACE_KEY, [])
router['distributed'] = True
router['gw_port_host'] = agent.conf.host
router['gw_port']['binding:host_id'] = agent.conf.host
floating_ip = router['_floatingips'][0]
floating_ip['floating_network_id'] = router['gw_port']['network_id']
floating_ip['host'] = agent.conf.host
floating_ip['port_id'] = internal_ports[0]['id']
floating_ip['status'] = 'ACTIVE'
self._add_snat_port_info_to_router(router, internal_ports)
# FIP has a dependency on external gateway. So we need to create
# the snat_port info and fip_agent_gw_port_info irrespective of
# the agent type the dvr supports. The namespace creation is
# dependent on the agent_type.
external_gw_port = router['gw_port']
self._add_fip_agent_gw_port_info_to_router(router, external_gw_port)
return router
def _add_fip_agent_gw_port_info_to_router(self, router, external_gw_port):
# Add fip agent gateway port information to the router_info
fip_gw_port_list = router.get(
l3_constants.FLOATINGIP_AGENT_INTF_KEY, [])
if not fip_gw_port_list and external_gw_port:
# Get values from external gateway port
fixed_ip = external_gw_port['fixed_ips'][0]
float_subnet = external_gw_port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
fip_gw_port_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add floatingip agent gateway port info to router
prefixlen = netaddr.IPNetwork(float_subnet['cidr']).prefixlen
router[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = [
{'subnets': [
{'cidr': float_subnet['cidr'],
'gateway_ip': float_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': external_gw_port['network_id'],
'device_owner': l3_constants.DEVICE_OWNER_AGENT_GW,
'mac_address': 'fa:16:3e:80:8d:89',
'binding:host_id': self.agent.conf.host,
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': fip_gw_port_ip,
'prefixlen': prefixlen}],
'id': framework._uuid(),
'device_id': framework._uuid()}
]
def _add_snat_port_info_to_router(self, router, internal_ports):
# Add snat port information to the router
snat_port_list = router.get(l3_constants.SNAT_ROUTER_INTF_KEY, [])
if not snat_port_list and internal_ports:
# Get values from internal port
port = internal_ports[0]
fixed_ip = port['fixed_ips'][0]
snat_subnet = port['subnets'][0]
port_ip = fixed_ip['ip_address']
# Pick an ip address which is not the same as port_ip
snat_ip = str(netaddr.IPAddress(port_ip) + 5)
# Add the info to router as the first snat port
# in the list of snat ports
prefixlen = netaddr.IPNetwork(snat_subnet['cidr']).prefixlen
router[l3_constants.SNAT_ROUTER_INTF_KEY] = [
{'subnets': [
{'cidr': snat_subnet['cidr'],
'gateway_ip': snat_subnet['gateway_ip'],
'id': fixed_ip['subnet_id']}],
'network_id': port['network_id'],
'device_owner': l3_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:89',
'fixed_ips': [{'subnet_id': fixed_ip['subnet_id'],
'ip_address': snat_ip,
'prefixlen': prefixlen}],
'id': framework._uuid(),
'device_id': framework._uuid()}
]
def _assert_dvr_external_device(self, router):
external_port = router.get_ex_gw_port()
snat_ns_name = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
# if the agent is in dvr_snat mode, then we have to check
# that the correct ports and ip addresses exist in the
# snat_ns_name namespace
if self.agent.conf.agent_mode == 'dvr_snat':
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
external_port,
router.get_external_device_name,
snat_ns_name)
utils.wait_until_true(device_exists)
# if the agent is in dvr mode then the snat_ns_name namespace
# should not be present at all:
elif self.agent.conf.agent_mode == 'dvr':
self.assertFalse(
self._namespace_exists(snat_ns_name),
"namespace %s was found but agent is in dvr mode not dvr_snat"
% (str(snat_ns_name))
)
# if the agent is anything else the test is misconfigured
# we force a test failure with message
else:
self.assertTrue(False, " agent not configured for dvr or dvr_snat")
def _assert_dvr_gateway(self, router):
gateway_expected_in_snat_namespace = (
self.agent.conf.agent_mode == 'dvr_snat'
)
if gateway_expected_in_snat_namespace:
self._assert_dvr_snat_gateway(router)
self._assert_removal_of_already_deleted_gateway_device(router)
snat_namespace_should_not_exist = (
self.agent.conf.agent_mode == 'dvr'
)
if snat_namespace_should_not_exist:
self._assert_snat_namespace_does_not_exist(router)
def _assert_dvr_snat_gateway(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=namespace)
existing_gateway = (
external_device.route.get_gateway().get('gateway'))
expected_gateway = external_port['subnets'][0]['gateway_ip']
self.assertEqual(expected_gateway, existing_gateway)
def _assert_removal_of_already_deleted_gateway_device(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
device = ip_lib.IPDevice("fakedevice",
namespace=namespace)
# Assert that no exception is thrown for this case
self.assertIsNone(router._delete_gateway_device_if_exists(
device, "192.168.0.1", 0))
def _assert_snat_namespace_does_not_exist(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertFalse(self._namespace_exists(namespace))
def _assert_dvr_floating_ips(self, router):
# in the fip namespace:
# Check that the fg-<port-id> (floatingip_agent_gateway)
# is created with the ip address of the external gateway port
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
self.assertTrue(floating_ips)
# We need to fetch the floatingip agent gateway port info
# from the router_info
floating_agent_gw_port = (
router.router[l3_constants.FLOATINGIP_AGENT_INTF_KEY])
self.assertTrue(floating_agent_gw_port)
external_gw_port = floating_agent_gw_port[0]
fip_ns = self.agent.get_fip_ns(floating_ips[0]['floating_network_id'])
fip_ns_name = fip_ns.get_name()
fg_port_created_successfully = ip_lib.device_exists_with_ips_and_mac(
fip_ns.get_ext_device_name(external_gw_port['id']),
[self._port_first_ip_cidr(external_gw_port)],
external_gw_port['mac_address'],
namespace=fip_ns_name)
self.assertTrue(fg_port_created_successfully)
# Check fpr-router device has been created
device_name = fip_ns.get_int_device_name(router.router_id)
fpr_router_device_created_successfully = ip_lib.device_exists(
device_name, namespace=fip_ns_name)
self.assertTrue(fpr_router_device_created_successfully)
# In the router namespace
# Check rfp-<router-id> is created correctly
for fip in floating_ips:
device_name = fip_ns.get_rtr_ext_device_name(router.router_id)
self.assertTrue(ip_lib.device_exists(
device_name, namespace=router.ns_name))
def test_dvr_router_rem_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(fip_ns))
restarted_agent = neutron_l3_agent.L3NATAgentWithStateReport(
self.agent.host, self.agent.conf)
router1.router[l3_constants.FLOATINGIP_KEY] = []
self.manage_router(restarted_agent, router1.router)
self._assert_dvr_snat_gateway(router1)
self.assertTrue(self._namespace_exists(fip_ns))
def test_dvr_router_add_fips_on_restarted_agent(self):
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
router = self.manage_router(self.agent, router_info)
floating_ips = router.router[l3_constants.FLOATINGIP_KEY]
router_ns = router.ns_name
fip_rule_prio_1 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
restarted_agent = neutron_l3_agent.L3NATAgent(
self.agent.host, self.agent.conf)
floating_ips[0]['floating_ip_address'] = '21.4.4.2'
floating_ips[0]['fixed_ip_address'] = '10.0.0.2'
self.manage_router(restarted_agent, router_info)
fip_rule_prio_2 = self._get_fixed_ip_rule_priority(
router_ns, floating_ips[0]['fixed_ip_address'])
self.assertNotEqual(fip_rule_prio_1, fip_rule_prio_2)
def _get_fixed_ip_rule_priority(self, namespace, fip):
iprule = ip_lib.IPRule(namespace)
lines = iprule.rule._as_root([4], ['show']).splitlines()
for line in lines:
if fip in line:
info = iprule.rule._parse_line(4, line)
return info['priority']
def test_dvr_router_add_internal_network_set_arp_cache(self):
# Check that, when the router is set up and there are
# existing ports on the uplinked subnet, the ARP
# cache is properly populated.
self.agent.conf.agent_mode = 'dvr_snat'
router_info = l3_test_common.prepare_router_data()
router_info['distributed'] = True
expected_neighbor = '35.4.1.10'
port_data = {
'fixed_ips': [{'ip_address': expected_neighbor}],
'mac_address': 'fa:3e:aa:bb:cc:dd',
'device_owner': DEVICE_OWNER_COMPUTE
}
self.agent.plugin_rpc.get_ports_by_subnet.return_value = [port_data]
router1 = self.manage_router(self.agent, router_info)
internal_device = router1.get_internal_device_name(
router_info['_interfaces'][0]['id'])
neighbors = ip_lib.IPDevice(internal_device, router1.ns_name).neigh
self.assertEqual(expected_neighbor,
neighbors.show(ip_version=4).split()[0])
def _assert_rfp_fpr_mtu(self, router, expected_mtu=1500):
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_rtr_ext_device_name,
router.ns_name)
self.assertEqual(expected_mtu, dev_mtu)
dev_mtu = self.get_device_mtu(
router.router_id, router.fip_ns.get_int_device_name,
router.fip_ns.get_name())
self.assertEqual(expected_mtu, dev_mtu)
def test_dvr_router_fip_agent_mismatch(self):
"""Test to validate the floatingip agent mismatch.
This test validates the condition where floatingip agent
gateway port host mismatches with the agent and so the
binding will not be there.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
floating_ip = router_info['_floatingips'][0]
floating_ip['host'] = 'my_new_host'
# In this case the floatingip binding is different and so it
# should not create the floatingip namespace on the given agent.
# This is also like there is no current binding.
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertFalse(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def test_dvr_router_fip_late_binding(self):
"""Test to validate the floatingip migration or latebinding.
This test validates the condition where floatingip private
port changes while migration or when the private port host
binding is done later after floatingip association.
"""
self.agent.conf.agent_mode = 'dvr'
router_info = self.generate_dvr_router_info()
fip_agent_gw_port = router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY]
# Now let us not pass the FLOATINGIP_AGENT_INTF_KEY, to emulate
# that the server did not create the port, since there was no valid
# host binding.
router_info[l3_constants.FLOATINGIP_AGENT_INTF_KEY] = []
self.mock_plugin_api.get_agent_gateway_port.return_value = (
fip_agent_gw_port[0])
router1 = self.manage_router(self.agent, router_info)
fip_ns = router1.fip_ns.get_name()
self.assertTrue(self._namespace_exists(router1.ns_name))
self.assertTrue(self._namespace_exists(fip_ns))
self._assert_snat_namespace_does_not_exist(router1)
def _assert_snat_namespace_exists(self, router):
namespace = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
self.assertTrue(self._namespace_exists(namespace))
def _get_dvr_snat_namespace_device_status(
self, router, internal_dev_name=None):
"""Function returns the internal and external device status."""
snat_ns = dvr_snat_ns.SnatNamespace.get_snat_ns_name(
router.router_id)
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
qg_device_created_successfully = ip_lib.device_exists(
external_device_name, namespace=snat_ns)
sg_device_created_successfully = ip_lib.device_exists(
internal_dev_name, namespace=snat_ns)
return qg_device_created_successfully, sg_device_created_successfully
def test_dvr_router_snat_namespace_with_interface_remove(self):
"""Test to validate the snat namespace with interface remove.
This test validates the snat namespace for all the external
and internal devices. It also validates if the internal
device corresponding to the router interface is removed
when the router interface is deleted.
"""
self.agent.conf.agent_mode = 'dvr_snat'
router_info = self.generate_dvr_router_info()
snat_internal_port = router_info[l3_constants.SNAT_ROUTER_INTF_KEY]
router1 = self.manage_router(self.agent, router_info)
csnat_internal_port = (
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY])
# Now save the internal device name to verify later
internal_device_name = router1._get_snat_int_device_name(
csnat_internal_port[0]['id'])
self._assert_snat_namespace_exists(router1)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router1, internal_dev_name=internal_device_name)
self.assertTrue(qg_device)
self.assertTrue(sg_device)
self.assertEqual(router1.snat_ports, snat_internal_port)
# Now let us not pass INTERFACE_KEY, to emulate
# the interface has been removed.
router1.router[l3_constants.INTERFACE_KEY] = []
# Now let us not pass the SNAT_ROUTER_INTF_KEY, to emulate
# that the server did not send it, since the interface has been
# removed.
router1.router[l3_constants.SNAT_ROUTER_INTF_KEY] = []
self.agent._process_updated_router(router1.router)
router_updated = self.agent.router_info[router_info['id']]
self._assert_snat_namespace_exists(router_updated)
qg_device, sg_device = self._get_dvr_snat_namespace_device_status(
router_updated, internal_dev_name=internal_device_name)
self.assertFalse(sg_device)
self.assertTrue(qg_device)
def _mocked_dvr_ha_router(self, agent):
r_info = self.generate_dvr_router_info(enable_ha=True,
enable_snat=True,
agent=agent)
r_snat_ns_name = namespaces.build_ns_name(dvr_snat_ns.SNAT_NS_PREFIX,
r_info['id'])
mocked_r_snat_ns_name = r_snat_ns_name + '@' + agent.host
r_ns_name = namespaces.build_ns_name(namespaces.NS_PREFIX,
r_info['id'])
mocked_r_ns_name = r_ns_name + '@' + agent.host
return r_info, mocked_r_ns_name, mocked_r_snat_ns_name
def _setup_dvr_ha_agents(self):
self.agent.conf.agent_mode = 'dvr_snat'
conf = self._configure_agent('agent2')
self.failover_agent = neutron_l3_agent.L3NATAgentWithStateReport(
'agent2', conf)
self.failover_agent.conf.agent_mode = 'dvr_snat'
def _setup_dvr_ha_bridges(self):
br_int_1 = self._get_agent_ovs_integration_bridge(self.agent)
br_int_2 = self._get_agent_ovs_integration_bridge(self.failover_agent)
veth1, veth2 = self.useFixture(net_helpers.VethFixture()).ports
br_int_1.add_port(veth1.name)
br_int_2.add_port(veth2.name)
def _create_dvr_ha_router(self, agent):
get_ns_name = mock.patch.object(namespaces.RouterNamespace,
'_get_ns_name').start()
get_snat_ns_name = mock.patch.object(dvr_snat_ns.SnatNamespace,
'get_snat_ns_name').start()
(r_info,
mocked_r_ns_name,
mocked_r_snat_ns_name) = self._mocked_dvr_ha_router(agent)
get_ns_name.return_value = mocked_r_ns_name
get_snat_ns_name.return_value = mocked_r_snat_ns_name
router = self.manage_router(agent, r_info)
return router
def _assert_ip_addresses_in_dvr_ha_snat_namespace(self, router):
namespace = router.ha_namespace
ex_gw_port = router.get_ex_gw_port()
snat_port = router.get_snat_interfaces()[0]
ex_gw_port_name = router.get_external_device_name(
ex_gw_port['id'])
snat_port_name = router._get_snat_int_device_name(
snat_port['id'])
ip = ex_gw_port["fixed_ips"][0]['ip_address']
prefix_len = ex_gw_port["fixed_ips"][0]['prefixlen']
ex_gw_port_cidr = ip + "/" + str(prefix_len)
ip = snat_port["fixed_ips"][0]['ip_address']
prefix_len = snat_port["fixed_ips"][0]['prefixlen']
snat_port_cidr = ip + "/" + str(prefix_len)
self._assert_ip_address_on_interface(namespace,
ex_gw_port_name,
ex_gw_port_cidr)
self._assert_ip_address_on_interface(namespace,
snat_port_name,
snat_port_cidr)
def _assert_no_ip_addresses_in_dvr_ha_snat_namespace(self, router):
namespace = router.ha_namespace
ex_gw_port = router.get_ex_gw_port()
snat_port = router.get_snat_interfaces()[0]
ex_gw_port_name = router.get_external_device_name(
ex_gw_port['id'])
snat_port_name = router._get_snat_int_device_name(
snat_port['id'])
self._assert_no_ip_addresses_on_interface(namespace,
snat_port_name)
self._assert_no_ip_addresses_on_interface(namespace,
ex_gw_port_name)
def test_dvr_ha_router_failover(self):
self._setup_dvr_ha_agents()
self._setup_dvr_ha_bridges()
router1 = self._create_dvr_ha_router(self.agent)
router2 = self._create_dvr_ha_router(self.failover_agent)
utils.wait_until_true(lambda: router1.ha_state == 'master')
utils.wait_until_true(lambda: router2.ha_state == 'backup')
self._assert_ip_addresses_in_dvr_ha_snat_namespace(router1)
self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router2)
self.fail_ha_router(router1)
utils.wait_until_true(lambda: router2.ha_state == 'master')
utils.wait_until_true(lambda: router1.ha_state == 'backup')
self._assert_ip_addresses_in_dvr_ha_snat_namespace(router2)
self._assert_no_ip_addresses_in_dvr_ha_snat_namespace(router1)
def _assert_fip_namespace_deleted(self, ext_gateway_port):
ext_net_id = ext_gateway_port['network_id']
self.agent.fipnamespace_delete_on_ext_net(
self.agent.context, ext_net_id)
self._assert_interfaces_deleted_from_ovs()
|
|
# -*- coding: utf-8 -*-
"""
karnickel
~~~~~~~~~
AST macros for Python.
:copyright: Copyright 2010 by Georg Brandl.
:license: BSD, see LICENSE for details.
"""
import os
import ast
import imp
import new
import sys
from copy import deepcopy
from itertools import izip
def macro(func):
"""Decorator to mark macros."""
def new_func(*args, **kwds):
raise RuntimeError('%s.%s() is a macro; you should not call it '
' directly.' % (func.__module__, func.__name__))
return new_func
class MacroDefError(Exception):
"""Raised when an invalid macro definition is encountered."""
def parse_macros(code):
"""Find and parse all macros in *code*. Return a dictionary mapping macro
names to MacroDefs.
"""
code = ast.parse(code)
macros = {}
for item in code.body:
if not isinstance(item, ast.FunctionDef):
continue
if not (len(item.decorator_list) == 1 and
isinstance(item.decorator_list[0], ast.Name) and
item.decorator_list[0].id == 'macro'):
continue
name = item.name
args = [arg.id for arg in item.args.args]
if item.args.vararg or item.args.kwarg or item.args.defaults:
raise MacroDefError('macro %s has an unsupported signature' % name)
if len(item.body) == 1 and isinstance(item.body[0], ast.Expr):
macro = ExprMacroDef(args, item.body[0].value)
else:
macro = BlockMacroDef(args, item.body)
macros[name] = macro
return macros
def import_macros(module, names, dict):
"""Import macros given in *names* from *module*, from a module with the
given globals *dict*.
"""
try:
mod = __import__(module, dict, None, ['*'])
except Exception, err:
raise MacroDefError('macro module %s not found: %s' % (module, err))
filename = mod.__file__
if filename.lower().endswith(('c', 'o')):
filename = filename[:-1]
with open(filename, 'U') as f:
code = f.read()
all_macros = parse_macros(code)
macros = {}
for name, asname in names.iteritems():
if name == '*':
macros.update(all_macros)
break
try:
macros[asname] = all_macros[name]
except KeyError:
raise MacroDefError('macro %s not found in module %s' %
(name, module))
return macros
class MacroCallError(Exception):
"""Raised when an invalid macro call is encountered."""
def __init__(self, node, message):
Exception.__init__(self, '%s: %s' % (node.lineno, message))
def add_filename(self, filename):
self.args = [filename + ':' + self.args[0]]
class ContextChanger(ast.NodeVisitor):
"""
AST visitor that updates the "context" on nodes that can occur on the LHS or
RHS in an assignment. This is needed because on a macro call, arguments
always have Load context, while in the expansion, they can also have Store
or other contexts.
"""
def __init__(self, context):
self.context = context
def visit_Name(self, node):
node.ctx = self.context
self.generic_visit(node) # visit children
visit_Attribute = visit_Subscript = visit_List = visit_Tuple = visit_Name
class CallTransformer(ast.NodeTransformer):
"""
AST visitor that expands uses of macro arguments and __body__ inside a macro
definition.
"""
def __init__(self, args, body=None):
self.args = args
self.body = body
def visit_Name(self, node):
if node.id in self.args:
if not isinstance(node.ctx, ast.Load):
new_node = deepcopy(self.args[node.id])
ContextChanger(node.ctx).visit(new_node)
else:
new_node = self.args[node.id]
return new_node
return node
def visit_Expr(self, node):
node = self.generic_visit(node)
if self.body and isinstance(node.value, ast.Name) and \
node.value.id == '__body__':
new_node = ast.fix_missing_locations(ast.If(ast.Num(1),
self.body, []))
return new_node
class BodyVisitor(ast.NodeVisitor):
"""
AST visitor that checks for use of __body__, to determine if a block macro
has a body.
"""
def __init__(self):
self.found_body = False
def visit_Expr(self, node):
if isinstance(node.value, ast.Name) and node.value.id == '__body__':
self.found_body = True
class ExprMacroDef(object):
"""
Definition of an expression macro.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.has_body = False
def expand(self, node, call_args, body=None):
assert not body
if len(call_args) != len(self.args):
raise MacroCallError(node, 'invalid number of arguments')
expr = deepcopy(self.expr)
argdict = dict(izip(self.args, call_args))
return CallTransformer(argdict).visit(expr)
class BlockMacroDef(object):
"""
Definition of a block macro, with or without body.
"""
def __init__(self, args, stmts):
self.args = args
self.stmts = stmts
visitor = BodyVisitor()
visitor.visit(ast.Module(stmts))
self.has_body = visitor.found_body
def expand(self, node, call_args, body=None):
if len(call_args) != len(self.args):
raise MacroCallError(node, 'invalid number of arguments')
stmts = deepcopy(self.stmts)
argdict = dict(izip(self.args, call_args))
new_node = ast.fix_missing_locations(ast.If(ast.Num(1), stmts, []))
return CallTransformer(argdict, body).visit(new_node)
class Expander(ast.NodeTransformer):
"""
AST visitor that expands macros.
"""
def __init__(self, module, macro_definitions=None):
self.module = module
self.defs = macro_definitions or {}
def visit_ImportFrom(self, node):
if node.module and node.module.endswith('.__macros__'):
modname = node.module[:-11]
names = dict((alias.name, alias.asname or alias.name)
for alias in node.names)
self.defs.update(import_macros(
modname, names, self.module and self.module.__dict__))
return None
return node
def visit_With(self, node):
expanded_body = map(self.visit, node.body)
expr = node.context_expr
if isinstance(expr, ast.Call) and \
isinstance(expr.func, ast.Name) and expr.func.id in self.defs:
#if node.optional_vars:
# raise MacroCallError(node, '"with" macro call with "as" clause')
if expr.keywords or expr.starargs or expr.kwargs:
raise MacroCallError(node, 'macro call with kwargs or star syntax')
macro_def = self.defs[expr.func.id]
if not isinstance(macro_def, BlockMacroDef):
raise MacroCallError(node, 'macro is not a block macro')
if not macro_def.has_body:
raise MacroCallError(node, 'macro has no __body__ substitution')
return macro_def.expand(node, expr.args, expanded_body)
return node
def _handle_call(self, node, macrotype):
if node.keywords or node.starargs or node.kwargs:
raise MacroCallError(node, 'macro call with kwargs or star syntax')
macro_def = self.defs[node.func.id]
if not isinstance(macro_def, macrotype):
raise MacroCallError(node, 'macro is not a %s' % macrotype)
if macro_def.has_body:
raise MacroCallError(node, 'macro has a __body__ substitution')
expanded_args = map(self.visit, node.args)
return macro_def.expand(node, expanded_args)
def visit_Expr(self, node):
value = node.value
if isinstance(value, ast.Call) and \
isinstance(value.func, ast.Name) and value.func.id in self.defs:
ret = self._handle_call(value, (ExprMacroDef, BlockMacroDef))
if isinstance(ret, ast.expr):
ret = ast.fix_missing_locations(ast.Expr(ret))
return ret
return node
def visit_Call(self, node):
if isinstance(node.func, ast.Name) and node.func.id in self.defs:
return self._handle_call(node, ExprMacroDef)
return node
class MacroImporter(object):
"""
Import hook for use on `sys.meta_path`, to expand macros on import. Quite a
pain without having importlib.
"""
def __init__(self):
self._cache = {}
def find_module(self, name, path=None):
try:
lastname = name.split('.')[-1]
self._cache[name] = imp.find_module(lastname, path), path
except ImportError:
return None
return self
def load_module(self, name):
try:
(fd, fn, info), path = self._cache[name]
except KeyError:
# can that happen?
raise ImportError(name)
if info[2] == imp.PY_SOURCE:
newpath = None
filename = fn
with fd:
code = fd.read()
elif info[2] == imp.PY_COMPILED:
newpath = None
filename = fn[:-1]
with open(filename, 'U') as f:
code = f.read()
elif info[2] == imp.PKG_DIRECTORY:
filename = os.path.join(fn, '__init__.py')
newpath = [fn]
with open(filename, 'U') as f:
code = f.read()
else:
return imp.load_module(name, fd, fn, info)
try:
module = new.module(name)
module.__file__ = filename
if newpath:
module.__path__ = newpath
tree = ast.parse(code)
try:
transformed = Expander(module).visit(tree)
except MacroCallError, err:
err.add_filename(filename)
raise
code = compile(transformed, filename, 'exec')
sys.modules[name] = module
exec code in module.__dict__
return module
except Exception, err:
raise ImportError('cannot import %s: %s' % (name, err))
def install_hook():
"""Install the import hook that allows to import modules using macros."""
importer = MacroImporter()
sys.meta_path.insert(0, importer)
return importer
def remove_hook():
"""Remove any MacroImporter from `sys.meta_path`."""
sys.meta_path[:] = [importer for importer in sys.meta_path if
not isinstance(importer, MacroImporter)]
|
|
# Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
import numpy as np
from six.moves import range
from six.moves import zip
import tensorflow as tf
from tensorflow.lite.kernels.hashtable import pywrap_hashtable_ops as hashtable_ops_registerer
from tensorflow.lite.python import lite
from tensorflow.lite.python import lite_v2_test_util
from tensorflow.lite.python.convert import mlir_quantize
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.lite.python.interpreter import InterpreterWithCustomOps
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import saved_model
from tensorflow.python.saved_model.loader_impl import parse_saved_model
from tensorflow.python.saved_model.save import save
from tensorflow.python.training.tracking import tracking
class FromConcreteFunctionTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testTypeInvalid(self):
root = self._getSimpleVariableModel()
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_concrete_functions([root.f])
self.assertIn('call get_concrete_function', str(error.exception))
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testFloat(self, enable_mlir_converter):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir_converter
tflite_model = converter.convert()
# Check output value from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),
('_UINT8InputOutput', dtypes.uint8),
('_INT16InputOutput', dtypes.int16))
@test_util.run_v2_only
def testInvalidFloat(self, inference_input_output_type):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
with self.assertRaises(ValueError) as error:
converter.inference_input_type = inference_input_output_type
converter.inference_output_type = inference_input_output_type
converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@test_util.run_v2_only
def testScalarInput(self):
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testMultiFunctionModel(self):
"""Convert a single model in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
concrete_func = root.add.get_concrete_function(input_data)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.add(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testConvertMultipleFunctions(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
# Try converting multiple functions.
converter = lite.TFLiteConverterV2.from_concrete_functions(
[add_func, sub_func])
with self.assertRaises(ValueError) as error:
_ = converter.convert()
self.assertIn('can only convert a single ConcreteFunction',
str(error.exception))
def _getIntegerQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])
def func(inp):
conv = tf.nn.conv2d(
inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (to_save, calibration_gen)
@parameterized.named_parameters(
('EnableMlirQuantizer', True), # enable mlir quantizer
('DisableMlirQuantizer', False)) # disable mlir quantizer
def testPostTrainingCalibrateAndQuantize(self, mlir_quantizer):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_converter._experimental_new_quantizer = mlir_quantizer
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@parameterized.named_parameters(('_INT8InputOutput', dtypes.int8),
('_UINT8InputOutput', dtypes.uint8),
('_INT16InputOutput', dtypes.int16))
@test_util.run_v2_only
def testInvalidPostTrainingDynamicRangeQuantization(
self, inference_input_output_type):
func, _ = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
'must be tf.float32.', str(error.exception))
@parameterized.named_parameters(
('_Default', False, False, dtypes.float32),
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize', False, True, dtypes.float32),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly', True, False, dtypes.float32),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize', True, True, dtypes.float32),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
def testIntegerQuantization(self, is_int_only, is_int16_quantize,
inference_input_output_type):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(tflite_model))
@parameterized.named_parameters(
('_INT16Quantize_INT8InputOutput', True, dtypes.int8))
def testInvalidIntegerQuantization(self, is_int16_quantize,
inference_input_output_type):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert quantized model.
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
with self.assertRaises(ValueError) as error:
quantized_converter.inference_input_type = dtypes.int8
quantized_converter.inference_output_type = dtypes.int8
quantized_converter.convert()
self.assertEqual(
'The inference_input_type and inference_output_type '
"must be in ['tf.float32', 'tf.int16'].", str(error.exception))
def testCalibrateAndQuantizeBuiltinInt16(self):
func, calibration_gen = self._getIntegerQuantizeModel()
# Convert float model.
float_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
converter = lite.TFLiteConverterV2.from_concrete_functions([func])
# TODO(b/156309549): We should add INT16 to the builtin types.
converter.optimizations = [lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = calibration_gen
converter._experimental_calibrate_only = True
calibrated_tflite = converter.convert()
quantized_tflite_model = mlir_quantize(
calibrated_tflite, inference_type=_types_pb2.QUANTIZED_INT16)
self.assertIsNotNone(quantized_tflite_model)
# The default input and output types should be float.
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(np.float32, output_details[0]['dtype'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def _getTrainingTimeQuantizedModel(self):
class QLinear(tf.keras.layers.Layer):
def __init__(self, units=3, **kwargs):
super(QLinear, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
'weight',
shape=(input_shape[-1], self.units),
initializer='random_normal',
trainable=True)
self.min_var = self.add_weight(
'min',
initializer=tf.keras.initializers.Constant(-6.0),
trainable=False)
self.max_var = self.add_weight(
'max',
initializer=tf.keras.initializers.Constant(6.0),
trainable=False)
def call(self, inputs):
x = tf.quantization.fake_quant_with_min_max_vars(
inputs, self.min_var, self.max_var)
w_fq = tf.quantization.fake_quant_with_min_max_vars(
self.w, self.min_var, self.max_var)
x = tf.matmul(x, w_fq)
x = tf.quantization.fake_quant_with_min_max_vars(
x, self.min_var, self.max_var)
return x
return tf.keras.Sequential(QLinear(3, input_shape=(2,)))
@parameterized.named_parameters(
('_DefaultFLOAT32InputOutput', dtypes.float32),
('_INT8InputOutput', dtypes.int8), ('_UINT8InputOutput', dtypes.uint8))
@test_util.run_v2_only
def testTrainingTimeQuantization(self, inference_input_output_type):
model = self._getTrainingTimeQuantizedModel()
float_converter = lite.TFLiteConverterV2.from_keras_model(model)
float_tflite_model = float_converter.convert()
self.assertIsNotNone(float_tflite_model)
quantized_converter = lite.TFLiteConverterV2.from_keras_model(model)
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
# Ensure that the quantized tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testNewQuantizer(self):
"""Test the model quantized by the new converter."""
func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func])
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8
]
quantized_converter.representative_dataset = calibration_gen
# default quantizer
quantized_converter._experimental_new_quantizer = False
old_tflite = quantized_converter.convert()
# new quantizer
quantized_converter._experimental_new_quantizer = True
new_tflite = quantized_converter.convert()
for _ in range(5):
input_data = tf.constant(
np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32))
old_value = self._evaluateTFLiteModel(old_tflite, [input_data])
new_value = self._evaluateTFLiteModel(new_tflite, [input_data])
self.assertAllClose(old_value, new_value, atol=1e-01)
@parameterized.named_parameters(
('EnableMlirConverter', True), # enable mlir
('DisableMlirConverter', False)) # disable mlir
@test_util.run_v2_only
def testEmbeddings(self, enable_mlir_converter):
"""Test model with embeddings."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.int32))
class EmbeddingModel(tf.keras.Model):
def __init__(self):
super(EmbeddingModel, self).__init__()
self.shared_weights = self.add_weight(
'weights',
shape=(2000, 300),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
@tf.function(input_signature=[tf.TensorSpec(shape=(20), dtype=tf.int32)])
def func(self, x):
return tf.gather(self.shared_weights, x)
# Building the model.
root = EmbeddingModel()
concrete_func = root.func.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = enable_mlir_converter
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertAllClose(expected_value.numpy(), actual_value[0], atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a concrete function has debug info captured."""
root = tracking.AutoTrackable()
root.v1 = tf.Variable(3.)
root.f = tf.function(lambda x: root.v1 * x)
input_data = tf.constant(1., shape=[1])
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
def _getIntegerQuantizationModelWithFlexOp(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(input_signature=[
tf.TensorSpec(shape=[3, 3, 3, 3, 3], dtype=tf.float32)
])
def func(inp):
tanh = tf.math.tanh(inp)
# Flex delegate will merge the consecutive conv3d and erf ops into one
# Delegate node.
conv3d = tf.nn.conv3d(
tanh,
tf.ones([3, 3, 3, 3, 3]),
strides=[1, 1, 1, 1, 1],
padding='SAME')
erf = tf.math.erf(conv3d)
output = tf.math.tanh(erf)
return output
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(3, 3, 3, 3, 3)).astype(np.float32)
]
root.f = func
return (root.f.get_concrete_function(), calibration_gen)
@parameterized.named_parameters(
('_Default', False, False, dtypes.float32),
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize', False, True, dtypes.float32),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly', True, False, dtypes.float32),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize', True, True, dtypes.float32),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
@test_util.run_v2_only
def testIntegerQuantizationWithFlexOp(self, is_int_only, is_int16_quantize,
inference_input_output_type):
func, calibration_gen = self._getIntegerQuantizationModelWithFlexOp()
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.SELECT_TF_OPS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.SELECT_TF_OPS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS,
lite.OpsSet.SELECT_TF_OPS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS, lite.OpsSet.SELECT_TF_OPS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
input_details[0]['dtype'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertEqual(inference_input_output_type.as_numpy_dtype,
output_details[0]['dtype'])
def _getIntegerQuantizationModelWithUnsupportedOps(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(input_signature=[
tf.TensorSpec(shape=[3], dtype=tf.float32),
tf.TensorSpec(shape=[3], dtype=tf.float32)
])
def func(a, b):
# ceil kernel does not support int8 nor int16 types neither.
left = tf.math.ceil(a)
right = tf.nn.tanh(b)
add = tf.math.add(left, right)
# ceil kernel does not support int8 nor int16 types neither.
output = tf.math.ceil(add)
return (output, right)
def calibration_gen():
for _ in range(5):
yield [
np.random.uniform(-1, 1, size=(3)).astype(np.float32),
np.random.uniform(-1, 1, size=(3)).astype(np.float32)
]
root.f = func
return (root.f.get_concrete_function(), calibration_gen)
@parameterized.named_parameters(
('_INT8InputOutput', False, False, dtypes.int8),
('_UINT8InputOutput', False, False, dtypes.uint8),
('_INT16Quantize_INT16InputOutput', False, True, dtypes.int16),
('_IntOnly_INT8InputOutput', True, False, dtypes.int8),
('_IntOnly_UINT8InputOutput', True, False, dtypes.uint8),
('_IntOnly_INT16Quantize_INT16InputOutput', True, True, dtypes.int16))
@test_util.run_v2_only
def testIntegerQuantizationWithUnsupportedOps(self, is_int_only,
is_int16_quantize,
inference_input_output_type):
func, calib_gen = self._getIntegerQuantizationModelWithUnsupportedOps()
quantized_converter = tf.lite.TFLiteConverter.from_concrete_functions(
[func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calib_gen
if is_int_only:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS_INT8, lite.OpsSet.TFLITE_BUILTINS
]
else:
if is_int16_quantize:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8,
lite.OpsSet.TFLITE_BUILTINS
]
else:
quantized_converter.target_spec.supported_ops = [
lite.OpsSet.TFLITE_BUILTINS
]
quantized_converter.inference_input_type = inference_input_output_type
quantized_converter.inference_output_type = inference_input_output_type
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
interpreter = Interpreter(model_content=quantized_tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
# Allow float32 for fallback.
self.assertEqual(input_details[0]['dtype'], dtypes.float32)
self.assertEqual(input_details[1]['dtype'],
inference_input_output_type.as_numpy_dtype)
output_details = interpreter.get_output_details()
self.assertLen(output_details, 2)
# Allow float32 for fallback.
self.assertEqual(output_details[0]['dtype'], dtypes.float32)
self.assertEqual(output_details[1]['dtype'],
inference_input_output_type.as_numpy_dtype)
class FromSavedModelTest(lite_v2_test_util.ModelTest):
def _createV1SavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor_1 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputB')
in_tensor_2 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputA')
variable_node = tf.Variable(1.0, name='variable_node')
out_tensor = in_tensor_1 + in_tensor_2 * variable_node
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
sess.run(tf.compat.v1.variables_initializer([variable_node]))
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
@test_util.run_v2_only
def testV1SimpleModel(self):
"""Test a SavedModel."""
with tf.Graph().as_default():
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
interpreter = Interpreter(model_content=tflite_model)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
self.assertLen(input_details, 2)
self.assertStartsWith(input_details[0]['name'], 'inputA')
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([1, 16, 16, 3], input_details[0]['shape'])
self.assertEqual((0., 0.), input_details[0]['quantization'])
self.assertStartsWith(
input_details[1]['name'],
'inputB',
)
self.assertEqual(np.float32, input_details[1]['dtype'])
self.assertTrue([1, 16, 16, 3], input_details[1]['shape'])
self.assertEqual((0., 0.), input_details[1]['quantization'])
output_details = interpreter.get_output_details()
self.assertLen(output_details, 1)
self.assertStartsWith(output_details[0]['name'], 'add')
self.assertEqual(np.float32, output_details[0]['dtype'])
self.assertTrue([1, 16, 16, 3], output_details[0]['shape'])
self.assertEqual((0., 0.), output_details[0]['quantization'])
@test_util.run_v2_only
def testTF1HubFormattedModel(self):
"""Test a TF1 hub formatted model."""
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# TF1 hub model is based on V1 saved model and they omit the saved model
# schema version setting.
saved_model_proto = parse_saved_model(saved_model_dir)
saved_model_proto.saved_model_schema_version = 0
saved_model_pb_file_path = os.path.join(saved_model_dir, 'saved_model.pb')
with file_io.FileIO(saved_model_pb_file_path, 'wb') as writer:
writer.write(saved_model_proto.SerializeToString())
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def _createV1ModelWithHashTableInitializer(self):
# Create a v1 saved model with hash table initializers.
tf.compat.v1.disable_eager_execution()
saved_model_dir = os.path.join(self.get_temp_dir(),
'savedmodel_with_hashtable')
table_initializer = tf.lookup.KeyValueTensorInitializer(
keys=['a', 'b', 'c', 'd'],
values=[1, 2, 3, 4],
key_dtype=tf.string,
value_dtype=tf.int64)
table = tf.lookup.StaticHashTable(
table_initializer, default_value=tf.constant(-1, dtype=tf.int64))
x = tf.compat.v1.placeholder(tf.string, shape=(), name='input')
y = table.lookup(x)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(y)
signature_def_map, init_op, assets_collection = {
'serving_default':
(tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'x': tensor_info_x},
outputs={'y': tensor_info_y},
method_name='some_function'))
}, tf.compat.v1.tables_initializer(), None
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.initializers.global_variables())
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(
saved_model_dir)
builder.add_meta_graph_and_variables(
sess, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map,
main_op=init_op,
assets_collection=assets_collection,
strip_default_attrs=True)
builder.save()
# Restore TF v2 behavior.
tf.compat.v1.reset_default_graph()
tf.compat.v1.enable_eager_execution()
return saved_model_dir
@test_util.run_v2_only
def testModelWithHashTableInitializer(self):
"""Test a model with saved_model's session initializer for hash tables."""
saved_model_dir = self._createV1ModelWithHashTableInitializer()
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.allow_custom_ops = True
tflite_model = converter.convert()
# Check values from converted model.
interpreter = InterpreterWithCustomOps(
model_content=tflite_model,
custom_op_registerers=[hashtable_ops_registerer.HashtableOpsRegisterer])
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_data = np.array(['a', 'b', 'c', 'z'], dtype=np.string_)
interpreter.resize_tensor_input(
input_details[0]['index'], [4], strict=False)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
# Invoke multiple times to ensure the initializer graph runs only once.
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([1, 2, 3, -1], list(actual_value))
@test_util.run_v2_only
def testConstModel(self):
"""Test a basic model with functions to make sure functions are inlined."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testVariableModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatures(self):
"""Test values for `signature_keys` argument."""
root = self._getSimpleVariableModel()
input_data = tf.constant(1., shape=[1])
to_save = root.f.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save)
# Convert model with invalid `signature_keys`.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['INVALID'])
self.assertIn("Invalid signature key 'INVALID'", str(error.exception))
# Convert model with empty `signature_keys`.
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=[])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value.numpy(), actual_value)
@test_util.run_v2_only
def testSignatureDefs(self):
"""Test converting SignatureDef is correct and uses SignatureDef API."""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1., shape=[1])
input_data_1 = tf.constant(3., shape=[1])
mul_add_func = root.mul_add.get_concrete_function(input_data_1,
input_data_0)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add'])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.mul_add(input_data_1, input_data_0)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, 'mul_add', {
'y': input_data_0,
'x': input_data_1
})
self.assertEqual(list(results.keys()), ['output_0'])
self.assertEqual(expected_value.numpy(), results['output_0'])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])
self.assertEqual(
sorted(signature_defs['mul_add']['inputs']), ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
@test_util.run_v2_only
def testSignatureDefsWithDefaultValue(self):
"""Test converting SignatureDef is correct and uses SignatureDef API.
This test uses None as method_name to test default behavior.
"""
root = self._getMultiFunctionModel()
input_data_0 = tf.constant(1., shape=[1])
input_data_1 = tf.constant(3., shape=[1])
mul_add_func = root.mul_add.get_concrete_function(input_data_1,
input_data_0)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'mul_add': mul_add_func})
converter = lite.TFLiteConverterV2.from_saved_model(
save_dir, signature_keys=['mul_add'])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.mul_add(input_data_1, input_data_0)
interpreter = Interpreter(model_content=tflite_model)
signature_defs = interpreter.get_signature_list()
results = self._evaluateTFLiteModelUsingSignatureDef(
tflite_model, None, {
'y': input_data_0,
'x': input_data_1
})
self.assertEqual(list(results.keys()), ['output_0'])
self.assertEqual(expected_value.numpy(), results['output_0'])
# Verify the SignatureDef structure returned is as expected.
self.assertEqual(len(signature_defs), 1)
self.assertEqual(list(signature_defs.keys()), ['mul_add'])
self.assertEqual(len(signature_defs.values()), 1)
self.assertEqual(
list(signature_defs['mul_add'].keys()), ['inputs', 'outputs'])
self.assertEqual(
sorted(signature_defs['mul_add']['inputs']), ['x', 'y'])
self.assertEqual(list(signature_defs['mul_add']['outputs']), ['output_0'])
@test_util.run_v2_only
def testMultipleFunctionModel(self):
"""Convert multiple functions in a multi-functional model."""
root = self._getMultiFunctionModel()
input_data = tf.constant(1., shape=[1])
add_func = root.add.get_concrete_function(input_data)
sub_func = root.sub.get_concrete_function(input_data)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, {'add': add_func, 'sub': sub_func})
# Try converting multiple functions.
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertIn('Only support a single signature key.', str(error.exception))
@test_util.run_v2_only
def testNoConcreteFunctionModel(self):
root = self._getMultiFunctionModel()
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir)
with self.assertRaises(ValueError) as error:
_ = lite.TFLiteConverterV2.from_saved_model(save_dir)
self.assertIn('Only support a single signature key.', str(error.exception))
@test_util.run_v2_only
def testKerasSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1),
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(model, save_dir)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a SavedModel has debug info captured."""
input_data = tf.constant(1., shape=[1])
root = tracking.AutoTrackable()
root.f = tf.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data)
options = save_options.SaveOptions(save_debug_info=True)
save_dir = os.path.join(self.get_temp_dir(), 'saved_model')
save(root, save_dir, to_save, options)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(save_dir)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testFallbackPath(self):
"""Test a SavedModel fallback path using old converter."""
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.experimental_new_converter = False
tflite_model = converter.convert()
self.assertTrue(tflite_model)
@test_util.run_v2_only
def testNonStatefulConvLSTM2D(self):
"""Test saved model with non stateful ConvLSTM2D keras layer."""
# Create keras model
model = tf.keras.Sequential([
tf.keras.layers.ConvLSTM2D(
32, (3, 3),
padding='same',
return_sequences=True,
stateful=False,
batch_input_shape=(1, 1, 10, 10, 1))
])
model.compile()
# Export the keras model to saved model.
saved_model_dir = os.path.join(self.get_temp_dir(), 'conv_lstm_2d')
model.save(saved_model_dir, save_format='tf', include_optimizer=False)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
tflite_model = converter.convert()
self.assertTrue(tflite_model)
def _createUnknownInputShapeModel(self):
"""Create a simple SavedModel with unknown input."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'unknown_input_shape')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
unknown_shape = tf.TensorShape(None)
in_tensor = tf.compat.v1.placeholder(
shape=unknown_shape, dtype=tf.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'input': in_tensor}
outputs = {'output': out_tensor}
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
@test_util.run_v2_only
def testUnknownInputShapeModel(self):
"""Test a SavedModel with an unknown input shape."""
saved_model_dir = self._createUnknownInputShapeModel()
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
# Check values from converted model.
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_data = np.array([1., 2., 3.], dtype=np.float32)
interpreter.resize_tensor_input(
input_details[0]['index'], [3], strict=False)
interpreter.allocate_tensors()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
actual_value = interpreter.get_tensor(output_details[0]['index'])
self.assertEqual([2., 4., 6.], list(actual_value))
class FromKerasModelTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testSequentialModel(self):
"""Test a simple sequential tf.Keras model."""
input_data = tf.constant(1., shape=[1, 1])
# Create a simple Keras model.
x = np.array([[1.], [2.]])
y = np.array([[2.], [4.]])
model = tf.keras.models.Sequential([
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(units=1, input_shape=[1])
])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
self.assertEqual(expected_value, actual_value)
@test_util.run_v2_only
def testSequentialMultiInputOutputModel(self):
"""Test a tf.Keras model with multiple inputs and outputs."""
left_input_data = tf.constant(1., shape=[1, 3])
right_input_data = tf.constant(1., shape=[1, 3])
# Create a simple Keras model.
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_c_np = np.random.random((10, 3))
output_d_np = np.random.random((10, 2))
input_a = tf.keras.layers.Input(shape=(3,), name='input_a')
input_b = tf.keras.layers.Input(shape=(3,), name='input_b')
dense = tf.keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
interm_b = dense(input_b)
merged = tf.keras.layers.concatenate([interm_a, interm_b], name='merge')
output_c = tf.keras.layers.Dense(
3, activation='softmax', name='dense_2')(
merged)
output_d = tf.keras.layers.Dense(
2, activation='softmax', name='dense_3')(
merged)
model = tf.keras.models.Model(
inputs=[input_a, input_b], outputs=[output_c, output_d])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit([input_a_np, input_b_np], [output_c_np, output_d_np], epochs=1)
# Convert model and ensure model is not None.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
# Check values from converted model.
input_data = [left_input_data, right_input_data]
expected_value = model.predict(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, input_data)
for tf_result, tflite_result in zip(expected_value, actual_value):
self.assertAllClose(tf_result, tflite_result, atol=1e-05)
@test_util.run_v2_only
def testGraphDebugInfo(self):
"""Test a tf.Keras model has debug info captured."""
# Create a simple Keras model.
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential(
[tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.convert()
self._assertValidDebugInfo(converter._debug_info)
@test_util.run_v2_only
def testKerasFallbackPath(self):
"""Test keras model which failed when exporting to the saved model."""
input_data = tf.constant(
np.array(np.random.random_sample((20)), dtype=np.float32))
class Model(tf.keras.Model):
def __init__(self):
super(Model, self).__init__()
# A None name will cause a failure in exporting to a saved model.
self.shared_weights = self.add_weight(
name=None,
shape=(20, 1),
dtype=tf.float32,
initializer=tf.random_normal_initializer(
mean=0.0, stddev=300**(-0.5)))
def call(self, x):
return tf.add(self.shared_weights, x)
# Building the model.
model = Model()
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(input_data, input_data, epochs=1)
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
self.assertTrue(tflite_model)
class ControlFlowTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testCond(self):
input_data = {
'x': tf.constant([1., 2.], shape=[1, 2]),
'b': tf.constant(True)
}
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def true_fn(x):
return tf.matmul(x, weights)
def false_fn(x):
return tf.add(x, weights)
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, 2], dtype=tf.float32),
tf.TensorSpec(shape=(), dtype=tf.bool)
])
def model(x, b):
return tf.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(**input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data['x'], input_data['b']])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testStaticRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10)), dtype=np.float32))
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10], dtype=tf.float32)])
def model(x):
seq = tf.split(x, 3, 0)
return tf.compat.v1.nn.static_rnn(
cell, seq, dtype=tf.float32, sequence_length=[1])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
self.assertAllClose(expected, actual)
@test_util.run_v2_only
def testWhileLoop(self):
input_data = tf.constant([1., 2., 3., 4.], shape=[2, 2])
weights = tf.Variable([[0.1, 0.2], [0.3, 0.4]], dtype=tf.float32)
def condition(x):
return tf.reduce_sum(x) < 100
def body(x):
return tf.add(x, weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[2, 2], dtype=tf.float32)])
def model(x):
return tf.while_loop(condition, body, [x])
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)[0]
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
@test_util.run_v2_only
def testDynamicRnn(self):
input_data = tf.constant(
np.array(np.random.random_sample((3, 10, 10)), dtype=np.float32))
cell = tf.compat.v1.nn.rnn_cell.LSTMCell(10)
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 10, 10], dtype=tf.float32)])
def model(x):
return tf.compat.v1.nn.dynamic_rnn(cell, x, dtype=tf.float32)
concrete_func = model.get_concrete_function()
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])
for expected, actual in zip(expected_value, actual_value):
if not isinstance(expected, ops.EagerTensor):
expected = expected.c
self.assertAllClose(expected, actual)
@parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),
('SimpleRNN', tf.keras.layers.SimpleRNN),
('GRU', tf.keras.layers.GRU))
@test_util.run_v2_only
def testKerasRNN(self, rnn_layer):
# This relies on TFLiteConverter to rewrite unknown batch size to 1. The
# model will fail if resizing the input to non-1 batch size.
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
rnn_obj = rnn_layer(units=10, input_shape=(10, 10))
model = tf.keras.models.Sequential([
tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'),
rnn_obj,
])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@parameterized.named_parameters(('LSTM', tf.keras.layers.LSTM),
('SimpleRNN', tf.keras.layers.SimpleRNN),
('GRU', tf.keras.layers.GRU))
@test_util.run_v2_only
def testKerasRNNMultiBatches(self, rnn_layer):
input_data = tf.constant(
np.array(np.random.random_sample((4, 10, 10)), dtype=np.float32))
# Specify a fixed batch size(4) for the test model.
x = tf.keras.layers.Input(batch_shape=(4, 10, 10))
y = rnn_layer(units=10, input_shape=(10, 10))(x)
model = tf.keras.Model(inputs=[x], outputs=[y])
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testKerasBidirectionalRNNReturnSequence(self):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'))
model.add(
tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(units=10, return_sequences=True),
input_shape=(10, 10)))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
@test_util.run_v2_only
def testKerasBidirectionalRNN(self):
input_data = tf.constant(
np.array(np.random.random_sample((1, 10, 10)), dtype=np.float32))
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Input(batch_size=1, shape=(10, 10), name='input'))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(units=10)))
model.add(tf.keras.layers.Dense(5))
model.add(tf.keras.layers.Activation('softmax'))
# Convert model.
converter = lite.TFLiteConverterV2.from_keras_model(model)
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
# Check values from converted model.
expected_value = model.predict(input_data)
self.assertAllClose(expected_value, actual_value, atol=1e-05)
class GrapplerTest(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testConstantFolding(self):
# Constant folding handles the tf.broadcast_to operation which was not
# supported by the TFLite at the time this test was added.
input_data = tf.constant([1., 2., 3., 4., 5., 6., 7., 8., 9.], shape=[3, 3])
@tf.function
def func(x):
y_const = tf.constant([1., 2., 3.])
y_broadcast = tf.broadcast_to(y_const, [3, 3])
return tf.matmul(x, y_broadcast)
root = tracking.AutoTrackable()
root.f = func
concrete_func = root.f.get_concrete_function(input_data)
# Convert model.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = root.f(input_data)
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
# Enable hybrid quantization, same result
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
actual_value = self._evaluateTFLiteModel(tflite_model, [input_data])[0]
self.assertAllClose(expected_value, actual_value)
class UnknownShapes(lite_v2_test_util.ModelTest):
@test_util.run_v2_only
def testMatMul(self):
input_data = tf.constant(
np.array(np.random.random_sample((10, 4)), dtype=np.float32))
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 4], dtype=tf.float32)])
def model(in_tensor):
shape = tf.shape(in_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
return tf.matmul(fill, in_tensor)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data], input_shapes=[([-1, 4], [10, 4])])[0]
self.assertAllClose(expected_value, actual_value, atol=1e-06)
def _getIntegerQuantizeModelWithUnknownShapes(self):
np.random.seed(0)
@tf.function(
input_signature=[tf.TensorSpec(shape=[None, 33], dtype=tf.float32)])
def model(input_tensor):
"""Define a model with tf.MatMul and unknown shapes."""
# We need the tensor to have more than 1024 elements for quantize_weights
# to kick in. Thus, the [33, 33] shape.
const_tensor = tf.constant(
np.random.uniform(low=-10., high=10., size=[33, 33]),
shape=[33, 33],
dtype=tf.float32,
name='inputB')
shape = tf.shape(input_tensor)
fill = tf.transpose(tf.fill(shape, 1.))
mult = tf.matmul(fill, input_tensor)
return tf.matmul(mult, const_tensor)
root = tracking.AutoTrackable()
root.f = model
concrete_func = root.f.get_concrete_function()
def calibration_gen():
for batch in range(5, 20, 5):
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(batch, 33)).astype(np.float32)]
return concrete_func, calibration_gen
@test_util.run_v2_only
def testMatMulQuantize(self):
concrete_func, _ = self._getIntegerQuantizeModelWithUnknownShapes()
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
@test_util.run_v2_only
def testMatMulCalibrateAndQuantize(self):
concrete_func, calibration_gen = \
self._getIntegerQuantizeModelWithUnknownShapes()
float_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
float_tflite_model = float_converter.convert()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions(
[concrete_func])
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
# The default input and output types should be float.
quantized_interpreter = Interpreter(model_content=quantized_tflite_model)
quantized_interpreter.allocate_tensors()
input_details = quantized_interpreter.get_input_details()
self.assertLen(input_details, 1)
self.assertEqual(np.float32, input_details[0]['dtype'])
self.assertAllEqual([-1, 33], input_details[0]['shape_signature'])
# Ensure that the quantized weights tflite model is smaller.
self.assertLess(len(quantized_tflite_model), len(float_tflite_model))
def testBatchMatMul(self):
input_data_1 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
input_data_2 = tf.constant(
np.array(np.random.random_sample((1, 256, 256)), dtype=np.float32))
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32),
tf.TensorSpec(shape=[None, 256, 256], dtype=tf.float32)
])
def model(in_tensor_1, in_tensor_2):
return tf.matmul(in_tensor_1, in_tensor_2)
concrete_func = model.get_concrete_function()
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
tflite_model = converter.convert()
# Check values from converted model.
expected_value = concrete_func(input_data_1, input_data_2)
actual_value = self._evaluateTFLiteModel(
tflite_model, [input_data_1, input_data_2],
input_shapes=[([-1, 256, 256], [1, 256, 256])])[0]
self.assertAllClose(expected_value, actual_value, atol=4)
def testSizeInvalid(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[1, None, 16, 3], dtype=tf.float32)
])
def model(in_tensor):
return in_tensor + in_tensor
concrete_func = model.get_concrete_function()
# Test invalid shape. None after 1st dimension. Run with TOCO in order to
# invoke shape checking code.
converter = lite.TFLiteConverterV2.from_concrete_functions([concrete_func])
converter.experimental_new_converter = False
with self.assertRaises(ValueError) as error:
converter.convert()
self.assertEqual(
'None is only supported in the 1st dimension. Tensor '
'\'in_tensor\' has invalid shape \'[1, None, 16, 3]\'.',
str(error.exception))
if __name__ == '__main__':
test.main()
|
|
import re
import sys
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.models import Permission
from django.core.urlresolvers import clear_url_caches
from django.http import Http404
from django.template import Variable
from django.test.utils import override_settings
from cms.api import create_page, create_title, publish_page
from cms.models import PagePermission, UserSettings, Placeholder
from cms.page_rendering import _handle_no_page
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.conf import get_cms_setting
from cms.views import details
from menus.menu_pool import menu_pool
APP_NAME = 'SampleApp'
APP_MODULE = "cms.test_utils.project.sampleapp.cms_apps"
@override_settings(
CMS_PERMISSION=True,
ROOT_URLCONF='cms.test_utils.project.urls',
)
class ViewTests(CMSTestCase):
def setUp(self):
clear_url_caches()
def test_handle_no_page(self):
"""
Test handle nopage correctly works with DEBUG=True
"""
request = self.get_request('/')
slug = ''
self.assertRaises(Http404, _handle_no_page, request, slug)
with self.settings(DEBUG=True):
request = self.get_request('/en/')
slug = ''
response = _handle_no_page(request, slug)
self.assertEqual(response.status_code, 200)
def test_apphook_not_hooked(self):
"""
Test details view when apphook pool has apphooks, but they're not
actually hooked
"""
if APP_MODULE in sys.modules:
del sys.modules[APP_MODULE]
apphooks = (
'%s.%s' % (APP_MODULE, APP_NAME),
)
create_page("page2", "nav_playground.html", "en", published=True)
with self.settings(CMS_APPHOOKS=apphooks):
self.apphook_clear()
response = self.client.get('/en/')
self.assertEqual(response.status_code, 200)
self.apphook_clear()
def test_external_redirect(self):
# test external redirect
redirect_one = 'https://www.django-cms.org/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path("en"))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], redirect_one)
def test_internal_neutral_redirect(self):
# test internal language neutral redirect
redirect_one = 'https://www.django-cms.org/'
redirect_two = '/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
two = create_page("two", "nav_playground.html", "en", parent=one,
published=True, redirect=redirect_two)
url = two.get_absolute_url()
request = self.get_request(url)
response = details(request, two.get_path())
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '/en/')
def test_internal_forced_redirect(self):
# test internal forced language redirect
redirect_one = 'https://www.django-cms.org/'
redirect_three = '/en/'
one = create_page("one", "nav_playground.html", "en", published=True,
redirect=redirect_one)
three = create_page("three", "nav_playground.html", "en", parent=one,
published=True, redirect=redirect_three)
url = three.get_slug()
request = self.get_request(url)
response = details(request, url.strip('/'))
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], redirect_three)
def test_redirect_to_self(self):
one = create_page("one", "nav_playground.html", "en", published=True,
redirect='/')
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path())
self.assertEqual(response.status_code, 200)
def test_redirect_to_self_with_host(self):
one = create_page("one", "nav_playground.html", "en", published=True,
redirect='http://testserver/en/')
url = one.get_absolute_url()
request = self.get_request(url)
response = details(request, one.get_path())
self.assertEqual(response.status_code, 200)
def test_redirect_with_toolbar(self):
create_page("one", "nav_playground.html", "en", published=True,
redirect='/en/page2')
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This page has no preview')
self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
response = self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
self.assertEqual(response.status_code, 302)
self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
response = self.client.get('/en/?%s' % get_cms_setting('TOOLBAR_URL__BUILD'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This page has no preview')
self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
response = self.client.get('/en/?%s' % get_cms_setting('TOOLBAR_URL__DISABLE'))
self.assertEqual(response.status_code, 302)
def test_login_required(self):
create_page("page", "nav_playground.html", "en", published=True,
login_required=True)
plain_url = '/accounts/'
login_rx = re.compile("%s\?(signin=|next=/en/)&" % plain_url)
with self.settings(LOGIN_URL=plain_url + '?signin'):
request = self.get_request('/en/')
response = details(request, '')
self.assertEqual(response.status_code, 302)
self.assertTrue(login_rx.search(response['Location']))
login_rx = re.compile("%s\?(signin=|next=/)&" % plain_url)
with self.settings(USE_I18N=False, LOGIN_URL=plain_url + '?signin'):
request = self.get_request('/')
response = details(request, '')
self.assertEqual(response.status_code, 302)
self.assertTrue(login_rx.search(response['Location']))
def test_edit_permission(self):
page = create_page("page", "nav_playground.html", "en", published=True)
# Anon user
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertNotContains(response, "cms_toolbar-item-switch-save-edit", 200)
# Superuser
user = self.get_superuser()
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "cms-toolbar-item-switch-save-edit", 1, 200)
# Admin but with no permission
user = self.get_staff_user_with_no_permissions()
user.user_permissions.add(Permission.objects.get(codename='change_page'))
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertNotContains(response, "cms-toolbar-item-switch-save-edit", 200)
PagePermission.objects.create(can_change=True, user=user, page=page)
with self.login_user_context(user):
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "cms-toolbar-item-switch-save-edit", 1, 200)
def test_toolbar_switch_urls(self):
user = self.get_superuser()
user_settings = UserSettings(language="en", user=user)
placeholder = Placeholder(slot="clipboard")
placeholder.save()
user_settings.clipboard = placeholder
user_settings.save()
page = create_page("page", "nav_playground.html", "en", published=True)
create_title("fr", "french home", page)
publish_page(page, user, "fr")
with self.login_user_context(user):
response = self.client.get("/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
self.assertContains(response, "/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'), 1, 200)
response = self.client.get("/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF'))
self.assertContains(response, "/fr/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'), 1, 200)
def test_incorrect_slug_for_language(self):
"""
Test details view when page slug and current language don't match.
In this case we refer to the user's current language and the page slug we have for that language.
"""
create_page("home", "nav_playground.html", "en", published=True)
cms_page = create_page("stevejobs", "nav_playground.html", "en", published=True)
create_title("de", "jobs", cms_page)
cms_page.publish('de')
response = self.client.get('/de/stevejobs/')
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/de/jobs/')
@override_settings(ROOT_URLCONF='cms.test_utils.project.urls')
class ContextTests(CMSTestCase):
def test_context_current_page(self):
"""
Asserts the number of queries triggered by
`cms.context_processors.cms_settings` and `cms.middleware.page`
"""
from django.template import context
page_template = "nav_playground.html"
original_context = {'TEMPLATES': settings.TEMPLATES}
page = create_page("page", page_template, "en", published=True)
page_2 = create_page("page-2", page_template, "en", published=True,
parent=page)
# Tests for standard django applications
# 1 query is executed in get_app_patterns(), not related
# to cms.context_processors.cms_settings.
# Executing this oputside queries assertion context ensure
# repetability
self.client.get("/en/plain_view/")
cache.clear()
menu_pool.clear()
context._standard_context_processors = None
# Number of queries when context processor is enabled
with self.settings(**original_context):
with self.assertNumQueries(FuzzyInt(0, 17)):
response = self.client.get("/en/plain_view/")
# One query when determining current page
with self.assertNumQueries(FuzzyInt(0, 1)):
self.assertFalse(response.context['request'].current_page)
self.assertFalse(response.context['request']._current_page_cache)
# Zero more queries when determining the current template
with self.assertNumQueries(0):
# Template is the first in the CMS_TEMPLATES list
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, get_cms_setting('TEMPLATES')[0][0])
cache.clear()
menu_pool.clear()
# Number of queries when context processors is enabled
with self.settings(**original_context):
with self.assertNumQueries(FuzzyInt(13, 28)) as context:
response = self.client.get("/en/page-2/")
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, page_template)
num_queries_page = len(context.captured_queries)
cache.clear()
menu_pool.clear()
page_2.template = 'INHERIT'
page_2.save()
page_2.publish('en')
with self.settings(**original_context):
# One query more triggered as page inherits template from ancestor
with self.assertNumQueries(num_queries_page + 1):
response = self.client.get("/en/page-2/")
template = Variable('CMS_TEMPLATE').resolve(response.context)
self.assertEqual(template, page_template)
|
|
#!/usr/bin/env python
# encoding: utf-8
#
# MP4.py
# Copyright (c) 2012 Thorsten Philipp <kyrios@kyri0s.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import sys
import os
import unittest
import mediafile
import re
import Utility
import time
import shutil
from xml.dom.minidom import Document
class MP4(mediafile.mediafile):
"""MP4 Container Class"""
filetype = "MP4"
def __init__(self):
super(MP4, self).__init__()
self.__tracks = []
self.__chapters = {} # Containing dictionaries with starttime (milliseconds) as key and title as value
self.__images = {} # See chapters
self.__urls = {} # See chapters
self.meta = dict( # iTunes shows:
composer = None, # Composer
title = None, # Name
artist = None, # Artist
album = None, # Album
encodingTool = None, # Encoded with ..
lyrics = None, # Lyrics
albumArtist = None, # Album Artist
genre = None, # Genre
tracknum = None, # Track Number x of
comment = None, # Comment
podcastURL = None, # Podcast URL
)
self.cover = None
def import_File(self,filename,filetype=None):
"""Import an existing file and add all Tracks to the object"""
if filetype is None:
filetype = filename[-3:]
# =================
# = Source is m4a =
# =================
if filetype == "m4a":
cmd = Utility.Cmd("MP4Box -info %s" % filename)
cmd.run()
if cmd.isError:
print cmd.error
else:
track = None
for line in cmd.response:
# Track Information
# Start of a new Track
p = re.compile(u'^Track.+- TrackID (\d+) - ')
m = p.match(line)
if m:
track = Track()
self.add_Track(track)
track.srctype = "mp4" # That's the file! Type of the file!
track.srctrackid = m.group(1)
track.srcfilename = filename
p = re.compile(u'Media Info: Language "(.+)" - Type "(.+)"')
m = p.match(line)
if m:
track.language = m.group(1)
track.type = m.group(2)
p = re.compile(u'Track is disabled')
if p.match(line):
track.enabled = 0
# =================================
# = Source is raw audio (aif,wav) =
# =================================
elif filetype == "aif" or filetype == "wav":
track = Track()
track.srctype = "raw"
track.srcfilename = filename
self.add_Track(track)
# ==================
# = Unknown format =
# ==================
else:
print >> sys.stderr, "No support for files of type %s" % (filetype)
raise NotImplementedError
# ============
# = Chapters =
# ============
def add_Chapterobj(self,chapter):
"""Fill __chapters[], __urls[], __images[]"""
if chapter.title is not None:
self.__chapters[chapter.starttime] = chapter.title
if chapter.url is not None and chapter.url["link"] is not None and chapter.url["href"] is not None:
self.__urls[chapter.starttime] = chapter.url
if chapter.picture is not None:
self.__images[chapter.starttime] = chapter.picture
def write_chaptersToFile(self,filename):
"""Writes the Chapter from self.__chapters to the file using mp4chaps (libmp4v2 Handbrake Edition)"""
# Prepare the file
try:
chaptertxtname = filename[:-4]+'.chapters.txt'
chaptertxt = open(chaptertxtname,'w')
except:
raise
for starttime in sorted(self.__chapters.keys()):
timestamp = time.strftime("%H:%M:%S",time.gmtime(starttime/1000))
line = "%s.%03d %s\n" % (timestamp,int(starttime % 1000),self.__chapters[starttime])
chaptertxt.write(line)
chaptertxt.close()
cmd = Utility.Cmd("mp4chaps",args="--import \"%s\"" % filename)
cmd.run()
if cmd.isError:
raise Exception(cmd.error)
os.unlink(chaptertxtname)
def write_chapterlinksToFile(self,filename):
"""Write Hyperlink Track to the m4a using MP4Box --ttxt feature"""
(ttxtfile,ttxtfilename) = Utility.filetemp()
ttxt = Document()
TextStream = ttxt.createElement("TextStream")
TextStream.setAttribute("version","1.1")
ttxt.appendChild(TextStream)
TextStreamHeader = ttxt.createElement("TextStreamHeader")
TextStreamHeader.setAttribute("width","160")
TextStreamHeader.setAttribute("height","160")
TextStreamHeader.setAttribute("layer","65534")
TextStreamHeader.setAttribute("translation_x","0")
TextStreamHeader.setAttribute("translation_y","0")
TextStream.appendChild(TextStreamHeader)
TextSampleDescription = ttxt.createElement("TextSampleDescription")
TextSampleDescription.setAttribute("horizontalJustification","center")
TextSampleDescription.setAttribute("verticalJustification","bottom")
TextSampleDescription.setAttribute("backColor","0 0 0 0")
TextSampleDescription.setAttribute("verticalText","no")
TextSampleDescription.setAttribute("fillTextRegion","no")
TextSampleDescription.setAttribute("continousKaraoke","no")
TextSampleDescription.setAttribute("scroll","None")
TextStreamHeader.appendChild(TextSampleDescription)
FontTable = ttxt.createElement("FontTable")
TextSampleDescription.appendChild(FontTable)
FontTableEntry = ttxt.createElement("FontTableEntry")
FontTableEntry.setAttribute("fontName","Serif")
FontTableEntry.setAttribute("fontID","1")
FontTable.appendChild(FontTableEntry)
TextBox = ttxt.createElement("TextBox")
TextBox.setAttribute("top","0")
TextBox.setAttribute("left","0")
TextBox.setAttribute("bottom","160")
TextBox.setAttribute("right","160")
TextSampleDescription.appendChild(TextBox)
Style = ttxt.createElement("Style")
Style.setAttribute("styles","Normal")
Style.setAttribute("fontID","1")
Style.setAttribute("fontSize","0")
Style.setAttribute("color","0 0 0 ff")
TextSampleDescription.appendChild(Style)
for starttime in sorted(self.__urls.keys()):
TextSample = ttxt.createElement("TextSample")
timestamp = time.strftime("%H:%M:%S",time.gmtime(starttime/1000))
timestamp = "%s.%03d" % (timestamp,int(starttime % 1000))
TextSample.setAttribute("sampleTime",str(timestamp))
TextSample.setAttribute("xml:space","preserve")
TextSampletxt = ttxt.createTextNode(str(self.__urls[starttime]["link"]))
TextSample.appendChild(TextSampletxt)
HyperLink = ttxt.createElement("HyperLink")
HyperLink.setAttribute("fromChar","0")
HyperLink.setAttribute("toChar","4")
HyperLink.setAttribute("URL",str(self.__urls[starttime]["href"]))
HyperLink.setAttribute("URLToolTip","")
TextSample.appendChild(HyperLink)
TextStream.appendChild(TextSample)
ttxtfile.write(ttxt.toprettyxml(encoding="UTF-8"))
ttxtfile.close()
# we need to rename the file to nhml for MP4Box to recognize it
shutil.move(ttxtfilename, ttxtfilename + ".ttxt")
ttxtfilename = ttxtfilename + ".ttxt"
print "Filename: %s" % ttxtfilename
cmd = Utility.Cmd("MP4Box",args="-add \'%s\' -ttxt :lang=eng \'%s\'" % (ttxtfilename,filename))
cmd.run()
print cmd.command
print cmd.arguments
if cmd.isError:
raise Exception(cmd.error)
# Unlink files
#os.unlink(ttxtfilename)
def write_chapterimagesToFile(self,filename):
"""Writes Images found in self.__images to the m4a using MP4Box.
"""
# This is kinda complicated. The following things have to happen
# Read all images. Remeber size.
# Write all images to a single temp file.
# Create an NHML file containing the file offset/size and starttime
# import the images with MP4BOX. Specification is in NHML. jpegs in tempfile
(media,mediafile) = Utility.filetemp()
(nhmlfile,nhmlfilename) = Utility.filetemp()
# Prepare NHML File
nhml = Document()
NHNTStream = nhml.createElement("NHNTStream")
nhml.appendChild(NHNTStream)
NHNTStream.setAttribute("baseMediaFile",str(mediafile))
NHNTStream.setAttribute("version","1.0")
NHNTStream.setAttribute("timeScale","1000")
NHNTStream.setAttribute("mediaType","vide")
NHNTStream.setAttribute("mediaSubType","jpeg")
NHNTStream.setAttribute("codecVendor",".....")
NHNTStream.setAttribute("codecVersion","0")
NHNTStream.setAttribute("codecRevision","0")
NHNTStream.setAttribute("width","300")
NHNTStream.setAttribute("height","300")
NHNTStream.setAttribute("compressorName","")
NHNTStream.setAttribute("temporalQuality","0")
NHNTStream.setAttribute("spatialQuality","0")
#NHNTStream.setAttribute("horizontalResolution","4718592")
#NHNTStream.setAttribute("verticalResolution","4718592")
NHNTStream.setAttribute("bitDepth","24")
for starttime in sorted(self.__images.keys()):
try:
imagefile = open(self.__images[starttime],"rb")
except:
raise
NHNTSample = nhml.createElement("NHNTSample")
NHNTStream.appendChild(NHNTSample)
NHNTSample.setAttribute("DTS",str(starttime))
media.write(imagefile.read()) # Append current immage to the tempfile
NHNTSample.setAttribute("dataLength",str(imagefile.tell())) # Size
NHNTSample.setAttribute("isRAP","yes")
imagefile.close()
# Write image to NHML File
media.close()
nhmlfile.write(nhml.toprettyxml(encoding="UTF-8"))
nhmlfile.close()
# we need to rename the file to nhml for MP4Box to recognize it
shutil.move(nhmlfilename, nhmlfilename + ".nhml")
cmd = Utility.Cmd("MP4Box",args="-add %s \"%s\"" % (nhmlfilename + ".nhml",filename))
cmd.run()
if cmd.isError:
raise Exception(cmd.error)
# Unlink files
os.unlink(mediafile)
os.unlink(nhmlfilename + ".nhml")
# ==================
# = Write out file =
# ==================
def write(self,filename):
"""Write the file to disk (craft it..:-)"""
firsttrack = 1
try:
os.unlink(filename)
except OSError:
pass
firsttrack = 1
# ==========================================
# = Add each track to the destination file =
# ==========================================
if len(self.get_Tracks()) < 1 :
raise Exception("No Tracks found")
for track in self.get_Tracks():
print "Adding Track %s (%s)" % (track.srctrackid,track.type)
# ========================
# = # Source is MP4 File =
# ========================
if track.srctype == "mp4" :
# =============
# = soun:mp4a =
# =============
if track.type == "soun:mp4a":
if firsttrack:
cmd = Utility.Cmd("MP4Box",args="-new -add %s#%s:lang=%s \"%s\"" % (track.srcfilename,track.srctrackid,track.language,filename))
firsttrack = 0
else:
cmd = Utility.Cmd("MP4Box",args="-add %s#%s:lang=%s \"%s\"" % (track.srcfilename,track.srctrackid,track.language,filename))
cmd.run()
if cmd.isError:
raise Exception(cmd.error)
else:
print "Don't know how to handle Track of Type %s" % (track.type)
# =======================
# = Raw Track (aif,wav) =
# =======================
elif track.srctype == "raw":
raise NotImplementedError("aif and wav are not implemented yet")
if firsttrack:
cmd = Utility.Cmd("MP4Box",args="-new -add %s \"%s\"" % (track.srcfilename,filename))
else:
cmd = Utility.Cmd("MP4Box",args="-add %s \"%s\"" % (track.srcfilename,filename))
cmd.run()
if cmd.isError:
raise Exception(cmd.error)
# =======================
# = Unknown Source Type =
# =======================
else:
print "Don't know how to handle Track from Source of type %s" % (track.srctype)
self.write_chaptersToFile(filename)
self.write_chapterlinksToFile(filename)
#self.write_chapterimagesToFile(filename)
self.write_meta(filename)
def write_meta(self,filename):
"""Write the meta Atoms to the m4a"""
for atom in self.meta.keys():
if self.meta[atom] is not None:
cmd = Utility.Cmd("Atomicparsley",args="\"%s\" --overWrite --%s \'%s\'" % (filename,atom,self.meta[atom]))
cmd.run()
if cmd.isError:
raise Exception(cmd.error)
# ==========
# = Tracks =
# ==========
def add_Track(self,Trackobj):
"""Add a Track Object to this MP4"""
self.__tracks.append(Trackobj)
def get_Tracks(self):
"""Return all Track Objects currently registered with this MP4"""
return self.__tracks
def delete_Track(self,trackid):
"""Delete a Track from __tracks[]. Trackid is the index. You may find out the index by looking at get_Tracks()"""
# ================
# = Atom Parsing =
# ================
class Track(object):
"""MP4 Track. Can contain anything a MP4 Container Track can contain (well.. in theory)"""
def __init__(self):
super(Track, self).__init__()
self.type = None
self.language = None
self.enabled = 1
self.srctype = "MP4"
self.srcfilename = None
self.srctrackid = None
class MP4Tests(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
====================================
rsfMRI: ANTS, FS, FSL, SPM, aCompCor
====================================
A preprocessing workflow for Siemens resting state data.
This workflow makes use of:
- ANTS
- FreeSurfer
- FSL
- SPM
- CompCor
For example::
python rsfmri_preprocessing.py -d /data/12345-34-1.dcm -f /data/Resting.nii
-s subj001 -o output -p PBS --plugin_args "dict(qsub_args='-q many')"
or
python rsfmri_vol_surface_preprocessing.py -f SUB_1024011/E?/func/rest.nii
-t OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz --TR 2 -s SUB_1024011
--subjects_dir fsdata --slice_times 0 17 1 18 2 19 3 20 4 21 5 22 6 23
7 24 8 25 9 26 10 27 11 28 12 29 13 30 14 31 15 32 16 -o .
This workflow takes resting timeseries and a Siemens dicom file corresponding
to it and preprocesses it to produce timeseries coordinates or grayordinates.
This workflow also requires 2mm subcortical atlas and templates that are
available from:
http://mindboggle.info/data.html
specifically the 2mm versions of:
- `Joint Fusion Atlas <http://mindboggle.info/data/atlases/jointfusion/OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_2mm_v2.nii.gz>`_
- `MNI template <http://mindboggle.info/data/templates/ants/OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz>`_
"""
import os
from nipype.interfaces.base import CommandLine
CommandLine.set_default_terminal_output('allatonce')
from dicom import read_file
from nipype.interfaces import (spm, fsl, Function, ants, freesurfer)
from nipype.interfaces.c3 import C3dAffineTool
fsl.FSLCommand.set_default_output_type('NIFTI')
from nipype import Workflow, Node, MapNode
from nipype.interfaces import matlab as mlab
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodisplay")
# If SPM is not in your MATLAB path you should add it here
# mlab.MatlabCommand.set_default_paths('/software/matlab/spm12')
from nipype.algorithms.rapidart import ArtifactDetect
from nipype.algorithms.misc import TSNR
from nipype.interfaces.utility import Rename, Merge, IdentityInterface
from nipype.utils.filemanip import filename_to_list
from nipype.interfaces.io import DataSink, FreeSurferSource
import numpy as np
import scipy as sp
import nibabel as nb
imports = ['import os',
'import nibabel as nb',
'import numpy as np',
'import scipy as sp',
'from nipype.utils.filemanip import filename_to_list, list_to_filename, split_filename',
'from scipy.special import legendre'
]
def get_info(dicom_files):
from dcmstack.extract import default_extractor
"""Given a Siemens dicom file return metadata
Returns
-------
RepetitionTime
Slice Acquisition Times
Spacing between slices
"""
meta = default_extractor(read_file(filename_to_list(dicom_files)[0],
stop_before_pixels=True,
force=True))
return (meta['RepetitionTime']/1000., meta['CsaImage.MosaicRefAcqTimes'],
meta['SpacingBetweenSlices'])
def median(in_files):
"""Computes an average of the median of each realigned timeseries
Parameters
----------
in_files: one or more realigned Nifti 4D time series
Returns
-------
out_file: a 3D Nifti file
"""
import numpy as np
import nibabel as nb
average = None
for idx, filename in enumerate(filename_to_list(in_files)):
img = nb.load(filename)
data = np.median(img.get_data(), axis=3)
if average is None:
average = data
else:
average = average + data
median_img = nb.Nifti1Image(average/float(idx + 1),
img.get_affine(), img.get_header())
filename = os.path.join(os.getcwd(), 'median.nii.gz')
median_img.to_filename(filename)
return filename
def bandpass_filter(files, lowpass_freq, highpass_freq, fs):
"""Bandpass filter the input files
Parameters
----------
files: list of 4d nifti files
lowpass_freq: cutoff frequency for the low pass filter (in Hz)
highpass_freq: cutoff frequency for the high pass filter (in Hz)
fs: sampling rate (in Hz)
"""
from nipype.utils.filemanip import split_filename, list_to_filename
import numpy as np
import nibabel as nb
out_files = []
for filename in filename_to_list(files):
path, name, ext = split_filename(filename)
out_file = os.path.join(os.getcwd(), name + '_bp' + ext)
img = nb.load(filename)
timepoints = img.shape[-1]
F = np.zeros((timepoints))
lowidx = timepoints/2 + 1
if lowpass_freq > 0:
lowidx = np.round(lowpass_freq / fs * timepoints)
highidx = 0
if highpass_freq > 0:
highidx = np.round(highpass_freq / fs * timepoints)
F[highidx:lowidx] = 1
F = ((F + F[::-1]) > 0).astype(int)
data = img.get_data()
if np.all(F == 1):
filtered_data = data
else:
filtered_data = np.real(np.fft.ifftn(np.fft.fftn(data) * F))
img_out = nb.Nifti1Image(filtered_data, img.get_affine(),
img.get_header())
img_out.to_filename(out_file)
out_files.append(out_file)
return list_to_filename(out_files)
def motion_regressors(motion_params, order=0, derivatives=1):
"""Compute motion regressors upto given order and derivative
motion + d(motion)/dt + d2(motion)/dt2 (linear + quadratic)
"""
import numpy as np
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
out_params = params
for d in range(1, derivatives + 1):
cparams = np.vstack((np.repeat(params[0, :][None, :], d, axis=0),
params))
out_params = np.hstack((out_params, np.diff(cparams, d, axis=0)))
out_params2 = out_params
for i in range(2, order + 1):
out_params2 = np.hstack((out_params2, np.power(out_params, i)))
filename = os.path.join(os.getcwd(), "motion_regressor%02d.txt" % idx)
np.savetxt(filename, out_params2, fmt="%.10f")
out_files.append(filename)
return out_files
def build_filter1(motion_params, comp_norm, outliers, detrend_poly=None):
"""Builds a regressor set comprisong motion parameters, composite norm and
outliers
The outliers are added as a single time point column for each outlier
Parameters
----------
motion_params: a text file containing motion parameters and its derivatives
comp_norm: a text file containing the composite norm
outliers: a text file containing 0-based outlier indices
detrend_poly: number of polynomials to add to detrend
Returns
-------
components_file: a text file containing all the regressors
"""
import numpy as np
import nibabel as nb
from scipy.special import legendre
out_files = []
for idx, filename in enumerate(filename_to_list(motion_params)):
params = np.genfromtxt(filename)
norm_val = np.genfromtxt(filename_to_list(comp_norm)[idx])
out_params = np.hstack((params, norm_val[:, None]))
try:
outlier_val = np.genfromtxt(filename_to_list(outliers)[idx])
except IOError:
outlier_val = np.empty((0))
for index in np.atleast_1d(outlier_val):
outlier_vector = np.zeros((out_params.shape[0], 1))
outlier_vector[index] = 1
out_params = np.hstack((out_params, outlier_vector))
if detrend_poly:
timepoints = out_params.shape[0]
X = np.empty((timepoints, 0))
for i in range(detrend_poly):
X = np.hstack((X, legendre(
i + 1)(np.linspace(-1, 1, timepoints))[:, None]))
out_params = np.hstack((out_params, X))
filename = os.path.join(os.getcwd(), "filter_regressor%02d.txt" % idx)
np.savetxt(filename, out_params, fmt="%.10f")
out_files.append(filename)
return out_files
def extract_noise_components(realigned_file, mask_file, num_components=5,
extra_regressors=None):
"""Derive components most reflective of physiological noise
Parameters
----------
realigned_file: a 4D Nifti file containing realigned volumes
mask_file: a 3D Nifti file containing white matter + ventricular masks
num_components: number of components to use for noise decomposition
extra_regressors: additional regressors to add
Returns
-------
components_file: a text file containing the noise components
"""
from scipy.linalg.decomp_svd import svd
import numpy as np
import nibabel as nb
import os
imgseries = nb.load(realigned_file)
components = None
for filename in filename_to_list(mask_file):
mask = nb.load(filename).get_data()
if len(np.nonzero(mask > 0)[0]) == 0:
continue
voxel_timecourses = imgseries.get_data()[mask > 0]
voxel_timecourses[np.isnan(np.sum(voxel_timecourses, axis=1)), :] = 0
# remove mean and normalize by variance
# voxel_timecourses.shape == [nvoxels, time]
X = voxel_timecourses.T
stdX = np.std(X, axis=0)
stdX[stdX == 0] = 1.
stdX[np.isnan(stdX)] = 1.
stdX[np.isinf(stdX)] = 1.
X = (X - np.mean(X, axis=0))/stdX
u, _, _ = svd(X, full_matrices=False)
if components is None:
components = u[:, :num_components]
else:
components = np.hstack((components, u[:, :num_components]))
if extra_regressors:
regressors = np.genfromtxt(extra_regressors)
components = np.hstack((components, regressors))
components_file = os.path.join(os.getcwd(), 'noise_components.txt')
np.savetxt(components_file, components, fmt="%.10f")
return components_file
def rename(in_files, suffix=None):
from nipype.utils.filemanip import (filename_to_list, split_filename,
list_to_filename)
out_files = []
for idx, filename in enumerate(filename_to_list(in_files)):
_, name, ext = split_filename(filename)
if suffix is None:
out_files.append(name + ('_%03d' % idx) + ext)
else:
out_files.append(name + suffix + ext)
return list_to_filename(out_files)
def get_aparc_aseg(files):
"""Return the aparc+aseg.mgz file"""
for name in files:
if 'aparc+aseg.mgz' in name:
return name
raise ValueError('aparc+aseg.mgz not found')
def extract_subrois(timeseries_file, label_file, indices):
"""Extract voxel time courses for each subcortical roi index
Parameters
----------
timeseries_file: a 4D Nifti file
label_file: a 3D file containing rois in the same space/size of the 4D file
indices: a list of indices for ROIs to extract.
Returns
-------
out_file: a text file containing time courses for each voxel of each roi
The first four columns are: freesurfer index, i, j, k positions in the
label file
"""
from nipype.utils.filemanip import split_filename
import nibabel as nb
import os
img = nb.load(timeseries_file)
data = img.get_data()
roiimg = nb.load(label_file)
rois = roiimg.get_data()
prefix = split_filename(timeseries_file)[1]
out_ts_file = os.path.join(os.getcwd(), '%s_subcortical_ts.txt' % prefix)
with open(out_ts_file, 'wt') as fp:
for fsindex in indices:
ijk = np.nonzero(rois == fsindex)
ts = data[ijk]
for i0, row in enumerate(ts):
fp.write('%d,%d,%d,%d,' % (fsindex, ijk[0][i0],
ijk[1][i0], ijk[2][i0]) +
','.join(['%.10f' % val for val in row]) + '\n')
return out_ts_file
def combine_hemi(left, right):
"""Combine left and right hemisphere time series into a single text file
"""
import os
import numpy as np
lh_data = nb.load(left).get_data()
rh_data = nb.load(right).get_data()
indices = np.vstack((1000000 + np.arange(0, lh_data.shape[0])[:, None],
2000000 + np.arange(0, rh_data.shape[0])[:, None]))
all_data = np.hstack((indices, np.vstack((lh_data.squeeze(),
rh_data.squeeze()))))
filename = left.split('.')[1] + '_combined.txt'
np.savetxt(filename, all_data,
fmt=','.join(['%d'] + ['%.10f'] * (all_data.shape[1] - 1)))
return os.path.abspath(filename)
def create_reg_workflow(name='registration'):
"""Create a FEAT preprocessing workflow together with freesurfer
Parameters
----------
::
name : name of workflow (default: 'registration')
Inputs::
inputspec.source_files : files (filename or list of filenames to register)
inputspec.mean_image : reference image to use
inputspec.anatomical_image : anatomical image to coregister to
inputspec.target_image : registration target
Outputs::
outputspec.func2anat_transform : FLIRT transform
outputspec.anat2target_transform : FLIRT+FNIRT transform
outputspec.transformed_files : transformed files in target space
outputspec.transformed_mean : mean image in target space
"""
register = Workflow(name=name)
inputnode = Node(interface=IdentityInterface(fields=['source_files',
'mean_image',
'subject_id',
'subjects_dir',
'target_image']),
name='inputspec')
outputnode = Node(interface=IdentityInterface(fields=['func2anat_transform',
'out_reg_file',
'anat2target_transform',
'transforms',
'transformed_mean',
'segmentation_files',
'anat2target',
'aparc'
]),
name='outputspec')
# Get the subject's freesurfer source directory
fssource = Node(FreeSurferSource(),
name='fssource')
fssource.run_without_submitting = True
register.connect(inputnode, 'subject_id', fssource, 'subject_id')
register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')
convert = Node(freesurfer.MRIConvert(out_type='nii'),
name="convert")
register.connect(fssource, 'T1', convert, 'in_file')
# Coregister the median to the surface
bbregister = Node(freesurfer.BBRegister(),
name='bbregister')
bbregister.inputs.init = 'fsl'
bbregister.inputs.contrast_type = 't2'
bbregister.inputs.out_fsl_file = True
bbregister.inputs.epi_mask = True
register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
register.connect(inputnode, 'mean_image', bbregister, 'source_file')
register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
"""
Estimate the tissue classes from the anatomical image. But use spm's segment
as FSL appears to be breaking.
"""
stripper = Node(fsl.BET(), name='stripper')
register.connect(convert, 'out_file', stripper, 'in_file')
fast = Node(fsl.FAST(), name='fast')
register.connect(stripper, 'out_file', fast, 'in_files')
"""
Binarize the segmentation
"""
binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
iterfield=['in_file'],
name='binarize')
register.connect(fast, 'partial_volume_files', binarize, 'in_file')
"""
Apply inverse transform to take segmentations to functional space
"""
applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
iterfield=['target_file'],
name='inverse_transform')
register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
register.connect(binarize, 'out_file', applyxfm, 'target_file')
register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
"""
Apply inverse transform to aparc file
"""
aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
interp='nearest'),
name='aparc_inverse_transform')
register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
register.connect(fssource, ('aparc_aseg', get_aparc_aseg),
aparcxfm, 'target_file')
register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
"""
Convert the BBRegister transformation to ANTS ITK format
"""
convert2itk = Node(C3dAffineTool(), name='convert2itk')
convert2itk.inputs.fsl2ras = True
convert2itk.inputs.itk_transform = True
register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
register.connect(inputnode, 'mean_image',convert2itk, 'source_file')
register.connect(stripper, 'out_file', convert2itk, 'reference_file')
"""
Compute registration between the subject's structural and MNI template
This is currently set to perform a very quick registration. However, the
registration can be made significantly more accurate for cortical
structures by increasing the number of iterations
All parameters are set using the example from:
#https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
"""
reg = Node(ants.Registration(), name='antsRegister')
reg.inputs.output_transform_prefix = "output_"
reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
reg.inputs.transform_parameters = [(0.1,), (0.1,), (0.2, 3.0, 0.0)]
reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[100, 30, 20]]
reg.inputs.dimension = 3
reg.inputs.write_composite_transform = True
reg.inputs.collapse_output_transforms = True
reg.inputs.initial_moving_transform_com = True
reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
reg.inputs.convergence_window_size = [20] * 2 + [5]
reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
reg.inputs.sigma_units = ['vox'] * 3
reg.inputs.shrink_factors = [[3, 2, 1]]*2 + [[4, 2, 1]]
reg.inputs.use_estimate_learning_rate_once = [True] * 3
reg.inputs.use_histogram_matching = [False] * 2 + [True]
reg.inputs.winsorize_lower_quantile = 0.005
reg.inputs.winsorize_upper_quantile = 0.995
reg.inputs.args = '--float'
reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
reg.inputs.num_threads = 4
reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
register.connect(stripper, 'out_file', reg, 'moving_image')
register.connect(inputnode,'target_image', reg,'fixed_image')
"""
Concatenate the affine and ants transforms into a list
"""
pickfirst = lambda x: x[0]
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
register.connect(convert2itk, 'itk_transform', merge, 'in2')
register.connect(reg, ('composite_transform', pickfirst), merge, 'in1')
"""
Transform the mean image. First to anatomical and then to target
"""
warpmean = Node(ants.ApplyTransforms(), name='warpmean')
warpmean.inputs.input_image_type = 3
warpmean.inputs.interpolation = 'Linear'
warpmean.inputs.invert_transform_flags = [False, False]
warpmean.inputs.terminal_output = 'file'
warpmean.inputs.args = '--float'
warpmean.inputs.num_threads = 4
register.connect(inputnode,'target_image', warpmean,'reference_image')
register.connect(inputnode, 'mean_image', warpmean, 'input_image')
register.connect(merge, 'out', warpmean, 'transforms')
"""
Assign all the output files
"""
register.connect(reg, 'warped_image', outputnode, 'anat2target')
register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
register.connect(applyxfm, 'transformed_file',
outputnode, 'segmentation_files')
register.connect(aparcxfm, 'transformed_file',
outputnode, 'aparc')
register.connect(bbregister, 'out_fsl_file',
outputnode, 'func2anat_transform')
register.connect(bbregister, 'out_reg_file',
outputnode, 'out_reg_file')
register.connect(reg, 'composite_transform',
outputnode, 'anat2target_transform')
register.connect(merge, 'out', outputnode, 'transforms')
return register
"""
Creates the main preprocessing workflow
"""
def create_workflow(files,
target_file,
subject_id,
TR,
slice_times,
norm_threshold=1,
num_components=5,
vol_fwhm=None,
surf_fwhm=None,
lowpass_freq=-1,
highpass_freq=-1,
subjects_dir=None,
sink_directory=os.getcwd(),
target_subject=['fsaverage3', 'fsaverage4'],
name='resting'):
wf = Workflow(name=name)
# Rename files in case they are named identically
name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
iterfield=['in_file', 'run'],
name='rename')
name_unique.inputs.keep_ext = True
name_unique.inputs.run = range(1, len(files) + 1)
name_unique.inputs.in_file = files
realign = Node(interface=spm.Realign(), name="realign")
realign.inputs.jobtype = 'estwrite'
num_slices = len(slice_times)
slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
slice_timing.inputs.num_slices = num_slices
slice_timing.inputs.time_repetition = TR
slice_timing.inputs.time_acquisition = TR - TR/float(num_slices)
slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
slice_timing.inputs.ref_slice = int(num_slices/2)
# Comute TSNR on realigned data regressing polynomials upto order 2
tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')
# Compute the median image across runs
calc_median = Node(Function(input_names=['in_files'],
output_names=['median_file'],
function=median,
imports=imports),
name='median')
wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
"""Segment and Register
"""
registration = create_reg_workflow(name='registration')
wf.connect(calc_median, 'median_file', registration, 'inputspec.mean_image')
registration.inputs.inputspec.subject_id = subject_id
registration.inputs.inputspec.subjects_dir = subjects_dir
registration.inputs.inputspec.target_image = target_file
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
art = Node(interface=ArtifactDetect(), name="art")
art.inputs.use_differences = [True, True]
art.inputs.use_norm = True
art.inputs.norm_threshold = norm_threshold
art.inputs.zintensity_threshold = 9
art.inputs.mask_type = 'spm_global'
art.inputs.parameter_source = 'SPM'
"""Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
voxel sizes.
"""
wf.connect([(name_unique, realign, [('out_file', 'in_files')]),
(realign, slice_timing, [('realigned_files', 'in_files')]),
(slice_timing, art, [('timecorrected_files', 'realigned_files')]),
(realign, art, [('realignment_parameters', 'realignment_parameters')]),
])
def selectindex(files, idx):
import numpy as np
from nipype.utils.filemanip import filename_to_list, list_to_filename
return list_to_filename(np.array(filename_to_list(files))[idx].tolist())
mask = Node(fsl.BET(), name='getmask')
mask.inputs.mask = True
wf.connect(calc_median, 'median_file', mask, 'in_file')
# get segmentation in normalized functional space
def merge_files(in1, in2):
out_files = filename_to_list(in1)
out_files.extend(filename_to_list(in2))
return out_files
# filter some noise
# Compute motion regressors
motreg = Node(Function(input_names=['motion_params', 'order',
'derivatives'],
output_names=['out_files'],
function=motion_regressors,
imports=imports),
name='getmotionregress')
wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')
# Create a filter to remove motion and art confounds
createfilter1 = Node(Function(input_names=['motion_params', 'comp_norm',
'outliers', 'detrend_poly'],
output_names=['out_files'],
function=build_filter1,
imports=imports),
name='makemotionbasedfilter')
createfilter1.inputs.detrend_poly = 2
wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
wf.connect(art, 'outlier_files', createfilter1, 'outliers')
filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
out_pf_name='pF_mcart.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filtermotion')
wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
filter1, 'out_res_name')
wf.connect(createfilter1, 'out_files', filter1, 'design')
createfilter2 = MapNode(Function(input_names=['realigned_file', 'mask_file',
'num_components',
'extra_regressors'],
output_names=['out_files'],
function=extract_noise_components,
imports=imports),
iterfield=['realigned_file', 'extra_regressors'],
name='makecompcorrfilter')
createfilter2.inputs.num_components = num_components
wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
wf.connect(registration, ('outputspec.segmentation_files', selectindex, [0, 2]),
createfilter2, 'mask_file')
filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
out_pf_name='pF.nii',
demean=True),
iterfield=['in_file', 'design', 'out_res_name'],
name='filter_noise_nosmooth')
wf.connect(filter1, 'out_res', filter2, 'in_file')
wf.connect(filter1, ('out_res', rename, '_cleaned'),
filter2, 'out_res_name')
wf.connect(createfilter2, 'out_files', filter2, 'design')
wf.connect(mask, 'mask_file', filter2, 'mask')
bandpass = Node(Function(input_names=['files', 'lowpass_freq',
'highpass_freq', 'fs'],
output_names=['out_files'],
function=bandpass_filter,
imports=imports),
name='bandpass_unsmooth')
bandpass.inputs.fs = 1./TR
bandpass.inputs.highpass_freq = highpass_freq
bandpass.inputs.lowpass_freq = lowpass_freq
wf.connect(filter2, 'out_res', bandpass, 'files')
"""Smooth the functional data using
:class:`nipype.interfaces.spm.Smooth`.
"""
smooth = Node(interface=spm.Smooth(), name="smooth")
smooth.inputs.fwhm = vol_fwhm
wf.connect(bandpass, 'out_files', smooth, 'in_files')
collector = Node(Merge(2), name='collect_streams')
wf.connect(smooth, 'smoothed_files', collector, 'in1')
wf.connect(bandpass, 'out_files', collector, 'in2')
"""
Transform the remaining images. First to anatomical and then to target
"""
warpall = MapNode(ants.ApplyTransforms(), iterfield=['input_image'],
name='warpall')
warpall.inputs.input_image_type = 3
warpall.inputs.interpolation = 'Linear'
warpall.inputs.invert_transform_flags = [False, False]
warpall.inputs.terminal_output = 'file'
warpall.inputs.reference_image = target_file
warpall.inputs.args = '--float'
warpall.inputs.num_threads = 1
# transform to target
wf.connect(collector, 'out', warpall, 'input_image')
wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')
mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')
wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')
maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
wf.connect(warpall, 'output_image', maskts, 'in_file')
wf.connect(mask_target, 'out_file', maskts, 'mask_file')
# map to surface
# extract aparc+aseg ROIs
# extract subcortical ROIs
# extract target space ROIs
# combine subcortical and cortical rois into a single cifti file
#######
# Convert aparc to subject functional space
# Sample the average time series in aparc ROIs
sampleaparc = MapNode(freesurfer.SegStats(default_color_table=True),
iterfield=['in_file', 'summary_file',
'avgwf_txt_file'],
name='aparc_ts')
sampleaparc.inputs.segment_id = ([8] + range(10, 14) + [17, 18, 26, 47] +
range(49, 55) + [58] + range(1001, 1036) +
range(2001, 2036))
wf.connect(registration, 'outputspec.aparc',
sampleaparc, 'segmentation_file')
wf.connect(collector, 'out', sampleaparc, 'in_file')
def get_names(files, suffix):
"""Generate appropriate names for output files
"""
from nipype.utils.filemanip import (split_filename, filename_to_list,
list_to_filename)
out_names = []
for filename in files:
_, name, _ = split_filename(filename)
out_names.append(name + suffix)
return list_to_filename(out_names)
wf.connect(collector, ('out', get_names, '_avgwf.txt'),
sampleaparc, 'avgwf_txt_file')
wf.connect(collector, ('out', get_names, '_summary.stats'),
sampleaparc, 'summary_file')
# Sample the time series onto the surface of the target surface. Performs
# sampling into left and right hemisphere
target = Node(IdentityInterface(fields=['target_subject']), name='target')
target.iterables = ('target_subject', filename_to_list(target_subject))
samplerlh = MapNode(freesurfer.SampleToSurface(),
iterfield=['source_file'],
name='sampler_lh')
samplerlh.inputs.sampling_method = "average"
samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
samplerlh.inputs.sampling_units = "frac"
samplerlh.inputs.interp_method = "trilinear"
samplerlh.inputs.smooth_surf = surf_fwhm
#samplerlh.inputs.cortex_mask = True
samplerlh.inputs.out_type = 'niigz'
samplerlh.inputs.subjects_dir = subjects_dir
samplerrh = samplerlh.clone('sampler_rh')
samplerlh.inputs.hemi = 'lh'
wf.connect(collector, 'out', samplerlh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
wf.connect(target, 'target_subject', samplerlh, 'target_subject')
samplerrh.set_input('hemi', 'rh')
wf.connect(collector, 'out', samplerrh, 'source_file')
wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
wf.connect(target, 'target_subject', samplerrh, 'target_subject')
# Combine left and right hemisphere to text file
combiner = MapNode(Function(input_names=['left', 'right'],
output_names=['out_file'],
function=combine_hemi,
imports=imports),
iterfield=['left', 'right'],
name="combiner")
wf.connect(samplerlh, 'out_file', combiner, 'left')
wf.connect(samplerrh, 'out_file', combiner, 'right')
# Sample the time series file for each subcortical roi
ts2txt = MapNode(Function(input_names=['timeseries_file', 'label_file',
'indices'],
output_names=['out_file'],
function=extract_subrois,
imports=imports),
iterfield=['timeseries_file'],
name='getsubcortts')
ts2txt.inputs.indices = [8] + range(10, 14) + [17, 18, 26, 47] +\
range(49, 55) + [58]
ts2txt.inputs.label_file = \
os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
'2mm_v2.nii.gz'))
wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')
######
substitutions = [('_target_subject_', ''),
('_filtermotart_cleaned_bp_trans_masked', ''),
('_filtermotart_cleaned_bp', '')
]
regex_subs = [('_ts_masker.*/sar', '/smooth/'),
('_ts_masker.*/ar', '/unsmooth/'),
('_combiner.*/sar', '/smooth/'),
('_combiner.*/ar', '/unsmooth/'),
('_aparc_ts.*/sar', '/smooth/'),
('_aparc_ts.*/ar', '/unsmooth/'),
('_getsubcortts.*/sar', '/smooth/'),
('_getsubcortts.*/ar', '/unsmooth/'),
('series/sar', 'series/smooth/'),
('series/ar', 'series/unsmooth/'),
('_inverse_transform./', ''),
]
# Save the relevant data into an output directory
datasink = Node(interface=DataSink(), name="datasink")
datasink.inputs.base_directory = sink_directory
datasink.inputs.container = subject_id
datasink.inputs.substitutions = substitutions
datasink.inputs.regexp_substitutions = regex_subs #(r'(/_.*(\d+/))', r'/run\2')
wf.connect(realign, 'realignment_parameters', datasink, 'resting.qa.motion')
wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
wf.connect(registration, 'outputspec.segmentation_files', datasink, 'resting.mask_files')
wf.connect(registration, 'outputspec.anat2target', datasink, 'resting.qa.ants')
wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
wf.connect(bandpass, 'out_files', datasink, 'resting.timeseries.@bandpassed')
wf.connect(smooth, 'smoothed_files', datasink, 'resting.timeseries.@smoothed')
wf.connect(createfilter1, 'out_files',
datasink, 'resting.regress.@regressors')
wf.connect(createfilter2, 'out_files',
datasink, 'resting.regress.@compcorr')
wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
wf.connect(sampleaparc, 'summary_file',
datasink, 'resting.parcellations.aparc')
wf.connect(sampleaparc, 'avgwf_txt_file',
datasink, 'resting.parcellations.aparc.@avgwf')
wf.connect(ts2txt, 'out_file',
datasink, 'resting.parcellations.grayo.@subcortical')
datasink2 = Node(interface=DataSink(), name="datasink2")
datasink2.inputs.base_directory = sink_directory
datasink2.inputs.container = subject_id
datasink2.inputs.substitutions = substitutions
datasink2.inputs.regexp_substitutions = regex_subs #(r'(/_.*(\d+/))', r'/run\2')
wf.connect(combiner, 'out_file',
datasink2, 'resting.parcellations.grayo.@surface')
return wf
"""
Creates the full workflow including getting information from dicom files
"""
def create_resting_workflow(args, name=None):
TR = args.TR
slice_times = args.slice_times
if args.dicom_file:
TR, slice_times, slice_thickness = get_info(args.dicom_file)
slice_times = (np.array(slice_times)/1000.).tolist()
if name is None:
name = 'resting_' + args.subject_id
kwargs = dict(files=[os.path.abspath(filename) for filename in args.files],
target_file=os.path.abspath(args.target_file),
subject_id=args.subject_id,
TR=TR,
slice_times=slice_times,
vol_fwhm=args.vol_fwhm,
surf_fwhm=args.surf_fwhm,
norm_threshold=2.,
subjects_dir=os.path.abspath(args.fsdir),
target_subject=args.target_surfs,
lowpass_freq=args.lowpass_freq,
highpass_freq=args.highpass_freq,
sink_directory=os.path.abspath(args.sink),
name=name)
wf = create_workflow(**kwargs)
return wf
if __name__ == "__main__":
from argparse import ArgumentParser, RawTextHelpFormatter
defstr = ' (default %(default)s)'
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument("-d", "--dicom_file", dest="dicom_file",
help="an example dicom file from the resting series")
parser.add_argument("-f", "--files", dest="files", nargs="+",
help="4d nifti files for resting state",
required=True)
parser.add_argument("-t", "--target", dest="target_file",
help=("Target in MNI space. Best to use the MindBoggle "
"template - "
"OASIS-30_Atropos_template_in_MNI152_2mm.nii.gz"),
required=True)
parser.add_argument("-s", "--subject_id", dest="subject_id",
help="FreeSurfer subject id", required=True)
parser.add_argument("--subjects_dir", dest="fsdir",
help="FreeSurfer subject directory", required=True)
parser.add_argument("--target_surfaces", dest="target_surfs", nargs="+",
default=['fsaverage5'],
help="FreeSurfer target surfaces" + defstr)
parser.add_argument("--TR", dest="TR", default=None, type=float,
help="TR if dicom not provided in seconds")
parser.add_argument("--slice_times", dest="slice_times", nargs="+",
type=float, help="Slice onset times in seconds")
parser.add_argument('--vol_fwhm', default=6., dest='vol_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument('--surf_fwhm', default=15., dest='surf_fwhm',
type=float, help="Spatial FWHM" + defstr)
parser.add_argument("-l", "--lowpass_freq", dest="lowpass_freq",
default=0.1, type=float,
help="Low pass frequency (Hz)" + defstr)
parser.add_argument("-u", "--highpass_freq", dest="highpass_freq",
default=0.01, type=float,
help="High pass frequency (Hz)" + defstr)
parser.add_argument("-o", "--output_dir", dest="sink",
help="Output directory base", required=True)
parser.add_argument("-w", "--work_dir", dest="work_dir",
help="Output directory base")
parser.add_argument("-p", "--plugin", dest="plugin",
default='Linear',
help="Plugin to use")
parser.add_argument("--plugin_args", dest="plugin_args",
help="Plugin arguments")
args = parser.parse_args()
wf = create_resting_workflow(args)
if args.work_dir:
work_dir = os.path.abspath(args.work_dir)
else:
work_dir = os.getcwd()
wf.base_dir = work_dir
if args.plugin_args:
wf.run(args.plugin, plugin_args=eval(args.plugin_args))
else:
wf.run(args.plugin)
|
|
#
# Copyright (C) 2000 greg Landrum
#
""" handles doing cross validation with decision trees
This is, perhaps, a little misleading. For the purposes of this module,
cross validation == evaluating the accuracy of a tree.
"""
from __future__ import print_function
from rdkit.ML.DecTree import ID3
from rdkit.ML.Data import SplitData
import numpy
from rdkit.six.moves import xrange
def ChooseOptimalRoot(examples,trainExamples,testExamples,attrs,
nPossibleVals,treeBuilder,nQuantBounds=[],
**kwargs):
""" loops through all possible tree roots and chooses the one which produces the best tree
**Arguments**
- examples: the full set of examples
- trainExamples: the training examples
- testExamples: the testing examples
- attrs: a list of attributes to consider in the tree building
- nPossibleVals: a list of the number of possible values each variable can adopt
- treeBuilder: the function to be used to actually build the tree
- nQuantBounds: an optional list. If present, it's assumed that the builder
algorithm takes this argument as well (for building QuantTrees)
**Returns**
The best tree found
**Notes**
1) Trees are built using _trainExamples_
2) Testing of each tree (to determine which is best) is done using _CrossValidate_ and
the entire set of data (i.e. all of _examples_)
3) _trainExamples_ is not used at all, which immediately raises the question of
why it's even being passed in
"""
attrs = attrs[:]
if nQuantBounds:
for i in range(len(nQuantBounds)):
if nQuantBounds[i]==-1 and i in attrs:
attrs.remove(i)
nAttrs = len(attrs)
trees = [None]*nAttrs
errs = [0]*nAttrs
errs[0] = 1e6
for i in xrange(1,nAttrs):
argD = {'initialVar':attrs[i]}
argD.update(kwargs)
if nQuantBounds is None or nQuantBounds == []:
trees[i] = treeBuilder(trainExamples,attrs,nPossibleVals,**argd)
else:
trees[i] = treeBuilder(trainExamples,attrs,nPossibleVals,nQuantBounds,**argD)
if trees[i]:
errs[i],foo = CrossValidate(trees[i],examples,appendExamples=0)
else:
errs[i] = 1e6
best = numpy.argmin(errs)
# FIX: this used to say 'trees[i]', could that possibly have been right?
return trees[best]
def CrossValidate(tree,testExamples,appendExamples=0):
""" Determines the classification error for the testExamples
**Arguments**
- tree: a decision tree (or anything supporting a _ClassifyExample()_ method)
- testExamples: a list of examples to be used for testing
- appendExamples: a toggle which is passed along to the tree as it does
the classification. The trees can use this to store the examples they
classify locally.
**Returns**
a 2-tuple consisting of:
1) the percent error of the tree
2) a list of misclassified examples
"""
nTest = len(testExamples)
nBad = 0
badExamples = []
for i in xrange(nTest):
testEx = testExamples[i]
trueRes = testEx[-1]
res = tree.ClassifyExample(testEx,appendExamples)
if (trueRes != res).any():
badExamples.append(testEx)
nBad += 1
return float(nBad)/nTest,badExamples
def CrossValidationDriver(examples,attrs,nPossibleVals,holdOutFrac=.3,silent=0,
calcTotalError=0,treeBuilder=ID3.ID3Boot,lessGreedy=0,
startAt=None,
nQuantBounds=[],
maxDepth=-1,
**kwargs):
""" Driver function for building trees and doing cross validation
**Arguments**
- examples: the full set of examples
- attrs: a list of attributes to consider in the tree building
- nPossibleVals: a list of the number of possible values each variable can adopt
- holdOutFrac: the fraction of the data which should be reserved for the hold-out set
(used to calculate the error)
- silent: a toggle used to control how much visual noise this makes as it goes.
- calcTotalError: a toggle used to indicate whether the classification error
of the tree should be calculated using the entire data set (when true) or just
the training hold out set (when false)
- treeBuilder: the function to call to build the tree
- lessGreedy: toggles use of the less greedy tree growth algorithm (see
_ChooseOptimalRoot_).
- startAt: forces the tree to be rooted at this descriptor
- nQuantBounds: an optional list. If present, it's assumed that the builder
algorithm takes this argument as well (for building QuantTrees)
- maxDepth: an optional integer. If present, it's assumed that the builder
algorithm takes this argument as well
**Returns**
a 2-tuple containing:
1) the tree
2) the cross-validation error of the tree
"""
nTot = len(examples)
if not kwargs.get('replacementSelection',0):
testIndices,trainIndices = SplitData.SplitIndices(nTot,holdOutFrac,
silent=1,legacy=1,
replacement=0)
else:
testIndices,trainIndices = SplitData.SplitIndices(nTot,holdOutFrac,
silent=1,legacy=0,
replacement=1)
trainExamples = [examples[x] for x in trainIndices]
testExamples = [examples[x] for x in testIndices]
nTrain = len(trainExamples)
if not silent:
print('Training with %d examples'%(nTrain))
if not lessGreedy:
if nQuantBounds is None or nQuantBounds == []:
tree = treeBuilder(trainExamples,attrs,nPossibleVals,
initialVar=startAt,maxDepth=maxDepth,**kwargs)
else:
tree = treeBuilder(trainExamples,attrs,nPossibleVals,nQuantBounds,
initialVar=startAt,maxDepth=maxDepth,**kwargs)
else:
tree = ChooseOptimalRoot(examples,trainExamples,testExamples,
attrs,nPossibleVals,treeBuilder,nQuantBounds,
maxDepth=maxDepth,**kwargs)
nTest = len(testExamples)
if not silent:
print('Testing with %d examples'%nTest)
if not calcTotalError:
xValError,badExamples = CrossValidate(tree,testExamples,appendExamples=1)
else:
xValError,badExamples = CrossValidate(tree,examples,appendExamples=0)
if not silent:
print('Validation error was %%%4.2f'%(100*xValError))
tree.SetBadExamples(badExamples)
tree.SetTrainingExamples(trainExamples)
tree.SetTestExamples(testExamples)
tree._trainIndices = trainIndices
return tree,xValError
def TestRun():
""" testing code
"""
from rdkit.ML.DecTree import randomtest
examples,attrs,nPossibleVals = randomtest.GenRandomExamples(nExamples = 200)
tree,frac = CrossValidationDriver(examples,attrs,
nPossibleVals)
tree.Pickle('save.pkl')
import copy
t2 = copy.deepcopy(tree)
print('t1 == t2',tree==t2)
l = [tree]
print('t2 in [tree]', t2 in l, l.index(t2))
if __name__ == '__main__':
TestRun()
|
|
from troposphere import Ref, Template, Parameter, GetAZs, Output, Join, GetAtt
from troposphere.autoscaling import LaunchConfiguration, AutoScalingGroup, Tag
from troposphere.ec2 import Instance, SecurityGroup, SecurityGroupRule, EIP
from troposphere.elasticloadbalancing import LoadBalancer, AccessLoggingPolicy
from troposphere.rds import DBInstance, DBParameterGroup, DBSecurityGroup, DBSecurityGroupIngress, RDSSecurityGroup
from troposphere.s3 import Bucket, PublicRead, CorsConfiguration, CorsRules
t = Template()
t.add_description("Create a FireCARES Instance")
base_ami = "ami-d05e75b8"
key_name = t.add_parameter(Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH access to the instances",
Type="AWS::EC2::KeyPair::KeyName",
ConstraintDescription="Must be the name of an existing EC2 KeyPair."
))
s3_static_allowed_cors_origin = t.add_parameter(Parameter(
"S3StaticAllowedCORSOrigin",
Description="Name of the allowed origins for accessing the static FireCARES S3 bucket",
Type="CommaDelimitedList",
ConstraintDescription="Must be a set of origins (including scheme://host)"
))
webserver_sg = t.add_parameter(Parameter(
"WebServerSG",
Description="The GroupID of the Webserver Security Group",
Type="String",
ConstraintDescription="Must be the name of an existing security group",
Default="sg-ee029092"
))
vpc_id = t.add_parameter(Parameter(
"VpcId",
Description="Name of an existing vpc",
Type="String",
Default="vpc-fc94c499",
ConstraintDescription="must be an existing VPC name."
))
db_user = t.add_parameter(Parameter(
"DBUser",
NoEcho=True,
Description="Username for PostgreSQL database access",
Type="String",
ConstraintDescription="Must begin with a letter and contain only alphanumeric characters.",
MinLength=1,
MaxLength=16,
AllowedPattern="[a-zA-Z][a-zA-Z0-9]*",
))
db_password = t.add_parameter(Parameter(
"DBPassword",
NoEcho=True,
Description="Password for PostgreSQL database access",
Type="String",
ConstraintDescription="must contain only alphanumeric characters.",
MinLength=8,
MaxLength=41,
AllowedPattern="[a-zA-Z0-9]*",
))
db_storage = t.add_parameter(Parameter(
"DBAllocatedStorage",
Description="The size of the database (Gb)",
Type="Number",
ConstraintDescription="must be between 10 and 1024Gb.",
MinValue="10",
MaxValue="1024",
Default="10"
))
db_instance_class = t.add_parameter(Parameter(
"DBInstanceClass",
Description="The database instance type",
Type="String",
ConstraintDescription="must select a valid database instance type.",
Default="db.t2.small",
AllowedValues=[
"db.t1.micro",
"db.m1.small",
"db.m1.medium",
"db.m1.large",
"db.m1.xlarge",
"db.m2.xlarge",
"db.m2.2xlarge",
"db.m2.4xlarge",
"db.m3.medium",
"db.m3.large",
"db.m3.xlarge",
"db.m3.2xlarge",
"db.m4.large",
"db.m4.xlarge",
"db.m4.2xlarge",
"db.m4.4xlarge",
"db.m4.10xlarge",
"db.r3.large",
"db.r3.xlarge",
"db.r3.2xlarge",
"db.r3.4xlarge",
"db.r3.8xlarge",
"db.m2.xlarge",
"db.m2.2xlarge",
"db.m2.4xlarge",
"db.cr1.8xlarge",
"db.t2.micro",
"db.t2.small",
"db.t2.medium",
"db.t2.large"
]
))
db_multi_az = t.add_parameter(Parameter(
"MultiAZDatabase",
Description="Create a Multi-AZ PostgreSQL Amazon RDS database instance",
Type="String",
ConstraintDescription="must be either true or false.",
Default="false",
AllowedValues=[
"true",
"false"
]
))
rabbit_instance_class = t.add_parameter(Parameter(
"RabbitInstanceClass",
Default="t2.small",
Description="RabbitMQ EC2 instance type",
Type="String",
ConstraintDescription="must be a valid EC2 instance type.",
AllowedValues=[
"t1.micro",
"t2.nano",
"t2.micro",
"t2.small",
"t2.medium",
"t2.large",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge",
"m2.xlarge",
"m2.2xlarge",
"m2.4xlarge",
"m3.medium",
"m3.large",
"m3.xlarge",
"m3.2xlarge",
"m4.large",
"m4.xlarge",
"m4.2xlarge",
"m4.4xlarge",
"m4.10xlarge",
"c1.medium",
"c1.xlarge",
"c3.large",
"c3.xlarge",
"c3.2xlarge",
"c3.4xlarge",
"c3.8xlarge",
"c4.large",
"c4.xlarge",
"c4.2xlarge",
"c4.4xlarge",
"c4.8xlarge",
"g2.2xlarge",
"g2.8xlarge",
"r3.large",
"r3.xlarge",
"r3.2xlarge",
"r3.4xlarge",
"r3.8xlarge",
"i2.xlarge",
"i2.2xlarge",
"i2.4xlarge",
"i2.8xlarge",
"d2.xlarge",
"d2.2xlarge",
"d2.4xlarge",
"d2.8xlarge",
"hi1.4xlarge",
"hs1.8xlarge",
"cr1.8xlarge",
"cc2.8xlarge",
"cg1.4xlarge"
]
))
environment = t.add_parameter(Parameter(
"Environment",
Description="Stack environment (e.g. prod, dev, int)",
Type="String",
MinLength="1",
MaxLength="12",
Default="dev",
))
db_sg = t.add_resource(SecurityGroup(
"DBSecurityGroup",
GroupDescription="Client access.",
VpcId=Ref(vpc_id),
SecurityGroupIngress=[
SecurityGroupRule("WebAppAccess",
SourceSecurityGroupId=Ref(webserver_sg),
ToPort="5432",
FromPort="5432",
IpProtocol="tcp"),
SecurityGroupRule("TylerAccess", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="73.173.214.176/32"),
SecurityGroupRule("JoeAccess", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="65.254.97.100/32"),
SecurityGroupRule("JoeAccess2", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="108.66.75.162/32"),
SecurityGroupRule("JoeAccess3", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="71.86.4.190/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="75.133.14.178/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="54.87.125.141/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="54.167.99.192/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="52.205.224.226/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="52.206.122.170/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="5432", ToPort="5432", CidrIp="52.202.117.147/32")
]
))
db = t.add_resource(DBInstance(
"db",
AllocatedStorage=Ref(db_storage),
DBInstanceClass=Ref(db_instance_class),
Engine="postgres",
MasterUsername=Ref(db_user),
MasterUserPassword=Ref(db_password),
VPCSecurityGroups=[Ref(db_sg)]
))
rabbit_mq_sg = t.add_resource(SecurityGroup(
"RabbitMQ",
GroupDescription="rabbitmq-sg-ingress",
SecurityGroupIngress=[
SecurityGroupRule("JenkinsAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="54.173.150.226/32"),
SecurityGroupRule("TylerAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="73.173.214.176/32"),
SecurityGroupRule("JoeAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="65.254.97.100/32"),
SecurityGroupRule("JoeAccess2", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="108.66.75.162/32"),
SecurityGroupRule("JoeAccess3", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="71.86.4.190/32"),
SecurityGroupRule("JoeAccess4", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="75.133.14.178/32"),
SecurityGroupRule("SontagAccess", IpProtocol="tcp", FromPort="22", ToPort="22", CidrIp="47.215.167.239/32"),
SecurityGroupRule("RabbitMQWeb", IpProtocol="tcp", FromPort="15672", ToPort="15672", CidrIp="69.255.184.149/32"),
SecurityGroupRule("RabbitMQ", IpProtocol="tcp", FromPort="5672", ToPort="5672", CidrIp="69.255.184.149/32"),
SecurityGroupRule("ClientAccess", IpProtocol="tcp", FromPort="5672", ToPort="5672", SourceSecurityGroupId=Ref(webserver_sg))
],
))
ec2_instance = t.add_resource(Instance(
"Ec2Instance",
ImageId=base_ami,
InstanceType=Ref(rabbit_instance_class),
KeyName=Ref(key_name),
SecurityGroups=[Ref(rabbit_mq_sg)],
Tags=[{'Key': 'Name', 'Value': Join('-', ['rabbitmq', Ref(environment)])}]
))
eip = t.add_resource(EIP(
"RabbitMQEIP",
InstanceId=Ref(ec2_instance),
Domain="vpc"
))
static_bucket = t.add_resource(Bucket("StaticBucket",
BucketName=Join('-', ['firecares', Ref(environment), 'static']),
AccessControl=PublicRead,
CorsConfiguration=CorsConfiguration(CorsRules=[CorsRules(AllowedOrigins=Ref(s3_static_allowed_cors_origin), AllowedMethods=['GET', 'HEAD'])])
))
document_upload_bucket = t.add_resource(Bucket("DocumentUploadBucket",
BucketName=Join('-', ['firecares', Ref(environment), 'uploads']),
AccessControl=PublicRead))
t.add_output([
Output(
"RabbitMQIP",
Description="RabbitMQ's Elastic IP",
Value=Ref(eip),
)
])
t.add_output([
Output(
"WebServerSecurityGroup",
Description="WebserverSecurityGroup",
Value=Ref(webserver_sg),
)
])
if __name__ == '__main__':
print t.to_json()
|
|
################################################################################
# Copyright (c) 2011-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Two-stage deferred indexer for objects with expensive __getitem__ calls."""
import copy
import threading
from functools import partial, reduce
from numbers import Integral
import dask.array as da
import dask.highlevelgraph
import dask.optimization
import numpy as np
# TODO support advanced integer indexing with non-strictly increasing indices (i.e. out-of-order and duplicates)
def _range_to_slice(index):
"""Convert sequence of evenly spaced non-negative ints to equivalent slice.
If the returned slice object is `s = slice(start, stop, step)`, the
following holds:
list(range(*s.indices(length))) == list(index)
where `length = max(start, 0 if stop is None else stop) + 1` (a proxy for
`max(index) + 1`). If the spacing between elements of `index` is zero
or uneven, raise ValueError.
"""
if not len(index):
return slice(None, 0, None)
if any(i < 0 for i in index):
raise ValueError(f'Could not convert {index} to a slice '
'(contains negative elements)')
increments_left = set(np.diff(index))
step = increments_left.pop() if increments_left else 1
if step == 0 or increments_left:
raise ValueError(f'Could not convert {index} to a slice '
'(unevenly spaced or zero increments)')
start = index[0]
stop = index[-1] + step
# Avoid descending below 0 and thereby wrapping back to the top
return slice(start, stop if stop >= 0 else None, step)
def _simplify_index(indices, shape):
"""Generate an equivalent index expression that is cheaper to evaluate.
Advanced ("fancy") indexing using arrays/lists of booleans or ints is much
slower than basic indexing using slices and scalar ints in dask. If the
fancy index on a specific axis/dimension selects a range with a fixed
(non-zero) step size between indices, however, it can be converted into an
equivalent slice to get a simple index instead.
Note that when indexing along multiple axes with arrays, this may change
the semantics of the indexing (see NumPy's `NEP 21`_ for details). This
simplification is only guaranteed to be safe when used with outer indexing.
.. _NEP 21: http://www.numpy.org/neps/nep-0021-advanced-indexing.html
"""
# First clean up and check indices, unpacking ellipsis and boolean arrays
indices = da.slicing.normalize_index(indices, shape)
out = []
axis = 0
for index in indices:
if index is not np.newaxis:
length = shape[axis]
axis += 1
# If there is 1-D fancy index on this axis, try to convert to slice
if isinstance(index, np.ndarray) and index.ndim == 1:
try:
index = _range_to_slice(index)
except ValueError:
pass
else:
index = da.slicing.normalize_slice(index, length)
out.append(index)
return tuple(out)
def _dask_oindex(x, indices):
"""Perform outer indexing on dask array `x`, one dimension at a time.
It is assumed that `indices` is suitably normalised (no ellipsis, etc.)
"""
axis = 0
for index in indices:
x = da.take(x, index, axis=axis)
# If axis wasn't dropped by a scalar index:
if not isinstance(index, Integral):
axis += 1
return x
def dask_getitem(x, indices):
"""Index a dask array, with N-D fancy index support and better performance.
This is a drop-in replacement for ``x[indices]`` that goes one further
by implementing "N-D fancy indexing" which is still unsupported in dask.
If `indices` contains multiple fancy indices, perform outer (`oindex`)
indexing. This behaviour deviates from NumPy, which performs the more
general (but also more obtuse) vectorized (`vindex`) indexing in this case.
See NumPy `NEP 21`_, `dask #433`_ and `h5py #652`_ for more
details.
.. _NEP 21: http://www.numpy.org/neps/nep-0021-advanced-indexing.html
.. _dask #433: https://github.com/dask/dask/issues/433
.. _h5py #652: https://github.com/h5py/h5py/issues/652
In addition, this optimises performance by culling unnecessary nodes from
the dask graph after indexing, which makes it cheaper to compute if only a
small piece of the graph is needed, and by collapsing fancy indices in
`indices` to slices where possible (which also implies oindex semantics).
"""
indices = _simplify_index(indices, x.shape)
try:
out = x[indices]
except NotImplementedError:
out = _dask_oindex(x, indices)
# dask does culling anyway as part of optimization, but it first calls
# ensure_dict, which copies all the keys, presumably to speed up the
# case where most keys are retained. A lazy indexer is normally used to
# fetch a small part of the data.
if np.product(out.numblocks) < 0.5 * np.product(x.numblocks):
dsk = dask.optimization.cull(out.dask, out.__dask_keys__())[0]
out.dask = dask.highlevelgraph.HighLevelGraph.from_collections(out.name, dsk)
return out
def _callable_name(f):
"""Determine appropriate name for callable `f` (akin to function name)."""
try:
return f.__name__
except AttributeError:
if isinstance(f, partial):
return f.func.__name__
return f.__class__.__name__
# -------------------------------------------------------------------------------------------------
# -- CLASS : LazyTransform
# -------------------------------------------------------------------------------------------------
class InvalidTransform(Exception):
"""Transform changes data shape in unallowed way."""
class LazyTransform:
"""Transformation to be applied by LazyIndexer after final indexing.
A :class:`LazyIndexer` potentially applies a chain of transforms to the
data after the final second-stage indexing is done. These transforms are
restricted in their capabilities to simplify the indexing process.
Specifically, when it comes to the data shape, transforms may only::
- add dimensions at the end of the data shape, or
- drop dimensions at the end of the data shape.
The preserved dimensions are not allowed to change their shape or
interpretation so that the second-stage indexing matches the first-stage
indexing on these dimensions. The data type (aka `dtype`) is allowed to
change.
Parameters
----------
name : string or None, optional
Name of transform
transform : function, signature ``data = f(data, keep)``, optional
Transform to apply to data (`keep` is user-specified second-stage index)
new_shape : function, signature ``new_shape = f(old_shape)``, optional
Function that predicts data array shape tuple after first-stage indexing
and transformation, given its original shape tuple as input.
Restrictions apply as described above.
dtype : :class:`numpy.dtype` object or equivalent or None, optional
Type of output array after transformation (None if same as input array)
"""
def __init__(self, name=None, transform=lambda d, k: d, new_shape=lambda s: tuple(s), dtype=None):
self.name = 'unnamed' if name is None else name
self.transform = transform
self.new_shape = new_shape
self.dtype = np.dtype(dtype) if dtype is not None else None
def __repr__(self):
"""Short human-friendly string representation of lazy transform object."""
class_name = self.__class__.__name__
dtype = 'unchanged' if self.dtype is None else self.dtype
return f"<katdal.{class_name} '{self.name}': type '{dtype}' at {id(self):#x}>"
def __call__(self, data, keep):
"""Transform data (`keep` is user-specified second-stage index)."""
return self.transform(data, keep)
# -------------------------------------------------------------------------------------------------
# -- CLASS : LazyIndexer
# -------------------------------------------------------------------------------------------------
class LazyIndexer:
"""Two-stage deferred indexer for objects with expensive __getitem__ calls.
This class was originally designed to extend and speed up the indexing
functionality of HDF5 datasets as found in :mod:`h5py`, but works on any
equivalent object (defined as any object with `shape`, `dtype` and
`__getitem__` members) where a call to __getitem__ may be very expensive.
The following discussion focuses on the HDF5 use case as the main example.
Direct extraction of a subset of an HDF5 dataset via the __getitem__
interface (i.e. `dataset[index]`) has a few issues:
1. Data access can be very slow (or impossible) if a very large dataset is
fully loaded into memory and then indexed again at a later stage
2. Advanced indexing (via boolean masks or sequences of integer indices) is
only supported on a single dimension in the current version of h5py (2.0)
3. Even though advanced indexing has limited support, simple indexing (via
single integer indices or slices) is frequently much faster.
This class wraps an :class:`h5py.Dataset` or equivalent object and exposes a
new __getitem__ interface on it. It efficiently composes two stages of
indexing: a first stage specified at object instantiation time and a second
stage that applies on top of the first stage when __getitem__ is called on
this object. The data are only loaded after the combined index is determined,
addressing issue 1.
Furthermore, advanced indexing is allowed on any dimension by decomposing
the selection as a series of slice selections covering contiguous segments
of the dimension to alleviate issue 2. Finally, this also allows faster
data retrieval by extracting a large slice from the HDF5 dataset and then
performing advanced indexing on the resulting :class:`numpy.ndarray` object
instead, in response to issue 3.
The `keep` parameter of the :meth:`__init__` and :meth:`__getitem__` methods
accepts a generic index or slice specification, i.e. anything that would be
accepted by the :meth:`__getitem__` method of a :class:`numpy.ndarray` of
the same shape as the dataset. This could be a single integer index, a
sequence of integer indices, a slice object (representing the colon operator
commonly used with __getitem__, e.g. representing `x[1:10:2]` as
`x[slice(1,10,2)]`), a sequence of booleans as a mask, or a tuple containing
any number of these (typically one index item per dataset dimension). Any
missing dimensions will be fully selected, and any extra dimensions will
be ignored.
Parameters
----------
dataset : :class:`h5py.Dataset` object or equivalent
Underlying dataset or array object on which lazy indexing will be done.
This can be any object with shape, dtype and __getitem__ members.
keep : NumPy index expression, optional
First-stage index as a valid index or slice specification
(supports arbitrary slicing or advanced indexing on any dimension)
transforms : list of :class:`LazyTransform` objects or None, optional
Chain of transforms to be applied to data after final indexing. The
chain as a whole may only add or drop dimensions at the end of data
shape without changing the preserved dimensions.
Attributes
----------
name : string
Name of HDF5 dataset (or empty string for unnamed ndarrays, etc.)
Raises
------
InvalidTransform
If transform chain does not obey restrictions on changing the data shape
"""
def __init__(self, dataset, keep=slice(None), transforms=None):
self.dataset = dataset
self.transforms = [] if transforms is None else transforms
self.name = getattr(self.dataset, 'name', '')
# Ensure that keep is a tuple (then turn it into a list to simplify further processing)
keep = list(keep) if isinstance(keep, tuple) else [keep]
# Ensure that keep is same length as data shape (truncate or pad with blanket slices as necessary)
keep = keep[:len(dataset.shape)] + [slice(None)] * (len(dataset.shape) - len(keep))
# Ensure that each index in lookup is an array of integer indices, or None
self._lookup = []
for dim_keep, dim_len in zip(keep, dataset.shape):
if isinstance(dim_keep, slice):
# Turn slice into array of integer indices (or None if it contains all indices)
dim_keep = dim_keep.indices(dim_len)
dim_keep = np.arange(*dim_keep) if dim_keep != slice(None).indices(dim_len) else None
else:
dim_keep = np.atleast_1d(dim_keep)
# Turn boolean mask into integer indices (True means keep that index), or None if all is True
if dim_keep.dtype == np.bool and len(dim_keep) == dim_len:
dim_keep = np.nonzero(dim_keep)[0] if not dim_keep.all() else None
self._lookup.append(dim_keep)
# Shape of data array after first-stage indexing and before transformation
self._initial_shape = tuple([(len(dim_keep) if dim_keep is not None else dim_len)
for dim_keep, dim_len in zip(self._lookup, self.dataset.shape)])
# Type of data array before transformation
self._initial_dtype = self.dataset.dtype
# Test validity of shape and dtype
self.shape, self.dtype
def __repr__(self):
"""Short human-friendly string representation of lazy indexer object."""
return "<katdal.{} '{}': shape {}, type {} at {:#x}>".format(
self.__class__.__name__, self.name, self.shape, self.dtype, id(self))
def _name_shape_dtype(self, name, shape, dtype):
"""Helper function to create strings for display (limits dtype length)."""
dtype_str = (str(dtype)[:50] + '...') if len(str(dtype)) > 50 else str(dtype)
return f"{name} -> {shape} {dtype_str}"
def __str__(self):
"""Verbose human-friendly string representation of lazy indexer object."""
shape, dtype = self._initial_shape, self._initial_dtype
descr = [self._name_shape_dtype(self.name, shape, dtype)]
for transform in self.transforms:
shape, dtype = transform.new_shape(shape), transform.dtype if transform.dtype is not None else dtype
descr += ['-> ' + self._name_shape_dtype(transform.name, shape, dtype)]
return '\n'.join(descr)
def __len__(self):
"""Length operator."""
return self.shape[0]
def __iter__(self):
"""Iterator."""
for index in range(len(self)):
yield self[index]
def __getitem__(self, keep):
"""Extract a selected array from the underlying dataset.
This applies the given second-stage index on top of the first-stage index
and retrieves the relevant data from the dataset as an array, optionally
transforming it afterwards.
Parameters
----------
keep : NumPy index expression
Second-stage index as a valid index or slice specification
(supports arbitrary slicing or advanced indexing on any dimension)
Returns
-------
data : array
Extracted output array
"""
ndim = len(self.dataset.shape)
# Ensure that keep is a tuple (then turn it into a list to simplify further processing)
keep = list(keep) if isinstance(keep, tuple) else [keep]
# The original keep tuple will be passed to data transform chain
original_keep = tuple(keep)
# Ensure that keep is same length as data dimension (truncate or pad with blanket slices as necessary)
keep = keep[:ndim] + [slice(None)] * (ndim - len(keep))
# Map current selection to original data indices based on any existing initial selection, per data dimension
keep = [(dkeep if dlookup is None else dlookup[dkeep]) for dkeep, dlookup in zip(keep, self._lookup)]
# Iterate over dimensions of dataset, storing information on selection on each dimension:
# `selection` is a list with one element per dimension; each element is a list of contiguous segments along
# the dimension, and each segment is represented by a tuple of 3 elements:
# (dataset selection, post-selection, output array selection)
# Similarly, `segment_sizes` is a list of lists of segment lengths (empty lists for scalar-selected dimensions)
selection, segment_sizes = [], []
for dim_keep, dim_len in zip(keep, self.dataset.shape):
if np.isscalar(dim_keep):
# If selection is a scalar, pass directly to dataset selector and remove dimension from output
selection.append([(dim_keep, None, None)])
segment_sizes.append([])
elif isinstance(dim_keep, slice):
# If selection is a slice, pass directly to dataset selector without post-selection
start, stop, stride = dim_keep.indices(dim_len)
segm_size = len(range(start, stop, stride))
selection.append([(slice(start, stop, stride), slice(None), slice(0, segm_size, 1))])
segment_sizes.append([segm_size])
elif len(dim_keep) == 0:
# If selection is empty, pass to post-selector, as HDF5 datasets do not support zero-length selection
selection.append([(slice(0, 1, 1), slice(0, 0, 1), slice(0, 0, 1))])
segment_sizes.append([0])
else:
# Anything else is advanced indexing via bool or integer sequences
dim_keep = np.atleast_1d(dim_keep)
# Turn boolean mask into integer indices (True means keep that index)
if dim_keep.dtype == np.bool and len(dim_keep) == dim_len:
dim_keep = np.nonzero(dim_keep)[0]
elif not np.all(dim_keep == np.unique(dim_keep)):
raise TypeError('LazyIndexer cannot handle duplicate or unsorted advanced integer indices')
# Split indices into multiple contiguous segments (specified by first and one-past-last data indices)
jumps = np.nonzero(np.diff(dim_keep) > 1)[0]
first = [dim_keep[0]] + dim_keep[jumps + 1].tolist()
last = dim_keep[jumps].tolist() + [dim_keep[-1]]
segments = np.c_[first, np.array(last) + 1]
if len(dim_keep) > 0.2 * dim_len and len(segments) > 1:
# If more than 20% of data are selected in 2 or more separate segments (the Ratcliffian benchmark),
# select data at dataset level with a single slice spanning segments and then postselect the ndarray
selection.append([(slice(segments[0, 0], segments[-1, 1], 1),
dim_keep - dim_keep[0], slice(0, len(dim_keep), 1))])
segment_sizes.append([len(dim_keep)])
else:
# Turn each segment into a separate slice at dataset level without post-selection,
# and construct contiguous output slices of the same segment sizes
segm_sizes = [end - start for start, end in segments]
segm_starts = np.cumsum([0] + segm_sizes)
selection.append([(slice(start, end, 1), slice(None), slice(segm_starts[n], segm_starts[n + 1], 1))
for n, (start, end) in enumerate(segments)])
segment_sizes.append(segm_sizes)
# Short-circuit the selection if all dimensions are selected with scalars (resulting in a scalar output)
if segment_sizes == [[]] * ndim:
out_data = self.dataset[tuple([select[0][0] for select in selection])]
else:
# Use dense N-dimensional meshgrid to slice data set into chunks, based on segments along each dimension
chunk_indices = np.mgrid[[slice(0, len(select), 1) for select in selection]]
# Pre-allocate output ndarray to have the correct shape and dtype (will be at least 1-dimensional)
out_data = np.empty([np.sum(segments) for segments in segment_sizes if segments], dtype=self.dataset.dtype)
# Iterate over chunks, extracting them from dataset and inserting them into the right spot in output array
for chunk_index in chunk_indices.reshape(ndim, -1).T:
# Extract chunk from dataset (don't use any advanced indexing here, only scalars and slices)
dataset_select = tuple([select[segment][0] for select, segment in zip(selection, chunk_index)])
chunk = self.dataset[dataset_select]
# Perform post-selection on chunk (can be fancier / advanced indexing because chunk is now an ndarray)
post_select = [select[segment][1] for select, segment in zip(selection, chunk_index)]
# If any dimensions were dropped due to scalar indexing, drop them from post_select/out_select tuples
post_select = tuple([select for select in post_select if select is not None])
# Do post-selection one dimension at a time, as ndarray does not allow simultaneous advanced indexing
# on more than one dimension. This caters for the scenario where more than one dimension satisfies
# the Ratcliffian benchmark (the only way to get advanced post-selection).
for dim in range(len(chunk.shape)):
# Only do post-selection on this dimension if non-trivial (otherwise an unnecessary copy happens)
if not (isinstance(post_select[dim], slice) and post_select[dim] == slice(None)):
# Prepend the appropriate number of colons to the selection to place it at correct dimension
chunk = chunk[[slice(None)] * dim + [post_select[dim]]]
# Determine appropriate output selection and insert chunk into output array
out_select = [select[segment][2] for select, segment in zip(selection, chunk_index)]
out_select = tuple([select for select in out_select if select is not None])
out_data[out_select] = chunk
# Apply transform chain to output data, if any
return reduce(lambda data, transform: transform(data, original_keep), self.transforms, out_data)
@property
def shape(self):
"""Shape of data array after first-stage indexing and transformation, i.e. ``self[:].shape``."""
new_shape = reduce(lambda shape, transform: transform.new_shape(shape), self.transforms, self._initial_shape)
# Do a quick test of shape transformation as verification of the transform chain
allowed_shapes = [self._initial_shape[:(n + 1)] for n in range(len(self._initial_shape))]
if new_shape[:len(self._initial_shape)] not in allowed_shapes:
raise InvalidTransform('Transform chain may only add or drop dimensions at the end of data shape: '
f'final shape is {new_shape}, expected one of {allowed_shapes}')
return new_shape
@property
def dtype(self):
"""Type of data array after transformation, i.e. ``self[:].dtype``."""
return reduce(lambda dtype, transform: transform.dtype if transform.dtype is not None else dtype,
self.transforms, self._initial_dtype)
class DaskLazyIndexer:
"""Turn a dask Array into a LazyIndexer by computing it upon indexing.
The LazyIndexer wraps an underlying `dataset` in the form of a dask Array.
Upon first use, it applies a stored first-stage selection (`keep`) to the
array, followed by a series of `transforms`. All of these actions are lazy
and only update the dask graph of the `dataset`. Since these updates are
computed only on first use, there is minimal cost in constructing an
instance and immediately throwing it away again.
Second-stage selection occurs via a :meth:`__getitem__` call on this
object, which also triggers dask computation to return the final
:class:`numpy.ndarray` output. Both selection steps follow outer indexing
("oindex") semantics, by indexing each dimension / axis separately.
DaskLazyIndexers can also index other DaskLazyIndexers, which allows them
to share first-stage selections and/or transforms, and to construct nested
or hierarchical indexers.
Parameters
----------
dataset : :class:`dask.Array` or :class:`DaskLazyIndexer`
The full dataset, from which a subset is chosen by `keep`
keep : NumPy index expression, optional
Index expression describing first-stage selection (e.g. as applied by
:meth:`katdal.DataSet.select`), with oindex semantics
transforms : sequence of function, signature ``array = f(array)``, optional
Transformations that are applied after indexing by `keep` but
before indexing on this object. Each transformation is a callable
that takes a dask array and returns another dask array.
Attributes
----------
name : str
The name of the (full) underlying dataset, useful for reporting
dataset : :class:`dask.Array`
The dask array that is accessed by indexing (after applying `keep` and
`transforms`). It can be used directly to perform dask computations.
"""
def __init__(self, dataset, keep=(), transforms=()):
self.name = getattr(dataset, 'name', '')
# Fancy indices can be mutable arrays, so take a copy to protect
# against the caller mutating the array before we apply it.
self.keep = copy.deepcopy(keep)
self._transforms = list(transforms)
self._orig_dataset = dataset
self._dataset = None
self._lock = threading.Lock()
@property
def transforms(self):
"""Transformations that are applied after first-stage indexing."""
return self._transforms
@property
def dataset(self):
"""Array after first-stage indexing and transformation."""
with self._lock:
if self._dataset is None:
if isinstance(self._orig_dataset, DaskLazyIndexer):
self._orig_dataset = self._orig_dataset.dataset
dataset = dask_getitem(self._orig_dataset, self.keep)
for transform in self.transforms:
dataset = transform(dataset)
self._dataset = dataset
self._orig_dataset = None
return self._dataset
def __getitem__(self, keep):
"""Extract a selected array from the underlying dataset.
This applies the given second-stage index on top of the current
dataset, which already has a first-stage index and optional transforms
applied to it. The indexer also finally stops being lazy and triggers
dask computation to arrive at the output array.
Both indexing stages perform "outer" indexing (aka oindex), which
indexes each dimension independently. This is especially relevant for
advanced or fancy indexing.
Parameters
----------
keep : NumPy index expression
Second-stage index as a valid index or slice specification
(supports arbitrary slicing or advanced indexing on any dimension)
Returns
-------
out : :class:`numpy.ndarray`
Extracted output array (computed from the final dask version)
"""
return self.get([self], keep)[0]
@classmethod
def get(cls, arrays, keep, out=None):
"""Extract several arrays from the underlying dataset.
This is a variant of :meth:`__getitem__` that pulls from several arrays
jointly. This can be significantly more efficient if intermediate dask
nodes can be shared.
Parameters
----------
arrays : list of :class:`DaskLazyIndexer`
Arrays to index
keep : NumPy index expression
Second-stage index as a valid index or slice specification
(supports arbitrary slicing or advanced indexing on any dimension)
out : list of :class:`np.ndarray`
If specified, output arrays in which to store results. It must be
the same length as `arrays` and each array must have the
appropriate shape and dtype.
Returns
-------
out : sequence of :class:`numpy.ndarray`
Extracted output array (computed from the final dask version)
"""
kept = [dask_getitem(array.dataset, keep) for array in arrays]
# Workaround for https://github.com/dask/dask/issues/7187
# This is equivalent to da.compute(kept), but does not allocate
# excessive memory and is potentially faster.
if out is None:
out = [np.empty(array.shape, array.dtype) for array in kept]
da.store(kept, out, lock=False)
return out
def __len__(self):
"""Length operator."""
return self.shape[0]
def __iter__(self):
"""Iterator."""
for index in range(len(self)):
yield self[index]
def __repr__(self):
"""Short human-friendly string representation of indexer object."""
return "<katdal.{} '{}': shape {}, type {} at {:#x}>".format(
self.__class__.__name__, self.name, self.shape, self.dtype, id(self))
def __str__(self):
"""Verbose human-friendly string representation of indexer object."""
names = [self.name]
names += [_callable_name(transform) for transform in self.transforms]
return ' | '.join(names) + f' -> {self.shape} {self.dtype}'
@property
def shape(self):
"""Shape of array after first-stage indexing and transformation."""
return self.dataset.shape
@property
def dtype(self):
"""Data type of array after first-stage indexing and transformation."""
return self.dataset.dtype
|
|
"""
This module implements the class `truncated_gaussian` which
performs (conditional) UMPU tests for Gaussians
restricted to a set of intervals.
"""
import numpy as np
from ..distributions.pvalue import (norm_pdf,
truncnorm_cdf,
norm_q,
norm_interval,
mp)
from scipy.stats import norm as ndist
from .base import truncated, find_root
class truncated_gaussian(truncated):
"""
>>> from intervals import intervals
>>> I = intervals.intersection(intervals((-1, 6)), \
intervals(( 0, 7)), \
~intervals((1, 4)))
#### THIS TEST SHOULD BE FIXED
>>> distr = trunc_gaussian(I, 3.1, 2.)
>>> print distr.cdf(0)
0.0
>>> z = distr.quantile(distr.cdf(5.))
>>> np.abs(z - 5) < 1e-2
True
"""
def __init__(self, I, mu=0, scale = 1.):
"""
Create a new object for a truncated_gaussian distribution
Parameters
----------
I : intervals
The intervals the distribution is truncated to.
mu : int
Mean of Gaussian that is truncated.
scale : float
SD of Gaussian that is truncated.
"""
self._mu = mu
self._mu = mu
truncated.__init__(self, I)
def _cdf_notTruncated(self, a, b, dps):
"""
Compute the probability of being in the interval (a, b)
for a variable with a Gaussian distribution (not truncated)
Parameters
----------
a, b : float
Bounds of the interval. Can be infinite.
dps : int
Decimal precision (decimal places). Used in mpmath
Returns
-------
p : float
The probability of being in the intervals (a, b)
P( a < X < b)
for a non truncated variable
"""
scale = self._scale
mu = self._mu
dps_temp = mp.mp.dps
mp.mp.dps = dps
val = norm_interval((a-mu)/sigma,
(b-mu)/sigma)
mp.mp.dps = dps_temp
return val
def _pdf_notTruncated(self, z, dps):
scale = self._scale
mu = self._mu
dps_temp = mp.mp.dps
mp.mp.dps = dps
val = norm_pdf(Z)
mp.mp.dps = dps_temp
return val
def _quantile_notTruncated(self, q, tol=1.e-6):
"""
Compute the quantile for the non truncated distribution
Parameters
----------
q : float
quantile you want to compute. Between 0 and 1
tol : float
precision for the output
Returns
-------
x : float
x such that P(X < x) = q
"""
scale = self._scale
mu = self._mu
dps_temp = mp.mp.dps
mp.mp.dps = dps
val = norm_q(q)
mp.mp.dps = dps_temp
return val
class truncated_gaussian_old(object):
"""
A Gaussian distribution, truncated to
"""
def __init__(self, intervals, mu=0, sigma=1):
intervals = np.unique(intervals)
intervals = np.asarray(intervals).reshape(-1)
# makes assumption intervals are disjoint
# and the sorted endpoints give the correct
# set of intervals...
self._cutoff_array = np.sort(intervals)
D = self.intervals[:,1]-self.intervals[:,0]
I = self.intervals[D != 0]
self._cutoff_array = I.reshape(-1)
self._mu = mu
self._sigma = sigma
self._mu_or_sigma_changed()
def __array__(self):
return self.intervals
@property
def intervals(self):
return self._cutoff_array.reshape((-1,2))
@property
def negated(self):
if not hasattr(self,"_negated"):
self._negated = truncated_gaussian(np.asarray(-self._cutoff_array[::-1]),
mu=-self.mu,
sigma=self.sigma)
return self._negated
# private method to update P and D after a change of parameters
def _mu_or_sigma_changed(self):
mu, sigma = self.mu, self.sigma
self.P = np.array([norm_interval((a-mu)/sigma,
(b-mu)/sigma)
for a, b in self.intervals])
self.D = np.array([(norm_pdf((a-mu)/sigma),
norm_pdf((b-mu)/sigma))
for a, b in self.intervals])
# mean parameter : mu
def set_mu(self, mu):
self._mu = mu
self._mu_or_sigma_changed()
def get_mu(self):
return self._mu
mu = property(get_mu, set_mu)
# variance parameter : sigma
def set_sigma(self, sigma):
self._sigma = sigma
self._mu_or_sigma_changed()
def get_sigma(self):
return self._sigma
sigma = property(get_sigma, set_sigma)
@property
def delta(self):
r"""
.. math::
\begin{align}
\delta_\mu(a,b) &\triangleq \int_a^b x\phi(x-\mu)\,dx \\
&= - \phi(b-\mu) + \phi(a-\mu) +
\mu\left(\Phi(b-\mu)-\Phi(a-\mu)\right),
\end{align}
"""
mu, P, D = self.mu, self.P, self.D
return D[:,0] - D[:,1] + mu * P
# End of properties
@staticmethod
def twosided(thresh, mu=0, sigma=1):
thresh = np.fabs(thresh)
return truncated_gaussian([(-np.inf,-thresh),(thresh,np.inf)],
mu=mu, sigma=sigma)
def __repr__(self):
return '''%s(%s, mu=%0.3e, sigma=%0.3e)''' % (self.__class__.__name__,
self.intervals,
self.mu,
self.sigma)
def cdf(self, observed):
P, mu, sigma = self.P, self.mu, self.sigma
z = observed
k = int(np.floor((self.intervals <= observed).sum() / 2))
if k < self.intervals.shape[0]:
if observed > self.intervals[k,0]:
return (P[:k].sum() +
(norm_interval((self.intervals[k,0] - mu) / sigma,
(observed - mu) / sigma))
) / P.sum()
else:
return P[:k].sum() / P.sum()
else:
return 1.
def quantile(self, q):
P, mu, sigma = self.P, self.mu, self.sigma
Psum = P.sum()
Csum = np.cumsum(np.array([0]+list(P)))
k = max(np.nonzero(Csum < Psum*q)[0])
try:
k = max(np.nonzero(Csum < Psum*q)[0])
except ValueError:
if np.isnan(q):
raise TruncatedGaussianError('invalid quantile')
pnorm_increment = Psum*q - Csum[k]
if np.mean(self.intervals[k]) < 0:
return mu + norm_q(norm_interval(-np.inf,(self.intervals[k,0]-mu)/sigma) + pnorm_increment) * sigma
else:
return mu - norm_q(norm_interval((self.intervals[k,0]-mu)/sigma, np.inf) - pnorm_increment) * sigma
# make a function for vector version?
def right_endpoint(self, left_endpoint, alpha):
c1 = left_endpoint # shorthand from Will's code
mu, P = self.mu, self.P
alpha1 = self.cdf(left_endpoint)
if (alpha1 > alpha):
return np.nan
alpha2 = np.array(alpha - alpha1)
return self.quantile(1-alpha2)
def G(self, left_endpoint, alpha):
"""
$g_{\mu}$ from Will's code
"""
c1 = left_endpoint # shorthand from Will's code
mu, P, D = self.mu, self.P, self.D
const = np.array(1-alpha)*(np.sum(D[:,0]-D[:,1]) + mu*P.sum())
right_endpoint = float(self.right_endpoint(left_endpoint, alpha))
if np.isnan(right_endpoint):
return np.inf
valid_intervals = []
for a, b in self.intervals:
intersection = (max(left_endpoint, a),
min(right_endpoint, b))
if intersection[1] > intersection[0]:
valid_intervals.append(intersection)
if valid_intervals:
return truncated_gaussian(valid_intervals, mu=self.mu, sigma=self.sigma).delta.sum() - const
return 0
def dG(self, left_endpoint, alpha):
"""
$gg_{\mu}$ from Will's code
"""
c1 = left_endpoint # shorthand from Will's code
D = self.D
return (self.right_endpoint(left_endpoint, alpha) -
left_endpoint) * norm_pdf((left_endpoint - self.mu) /
self.sigma)
def equal_tailed_interval(self, observed, alpha):
old_mu = self.mu
lb = self.mu - 20. * self.sigma
ub = self.mu + 20. * self.sigma
def F(param):
self.mu = param
return self.cdf(observed)
L = find_root(F, 1.0 - 0.5 * alpha, lb, ub)
U = find_root(F, 0.5 * alpha, lb, ub)
self.mu = old_mu
return np.array([L, U])
def UMAU_interval(self, observed, alpha,
mu_lo=None,
mu_hi=None,
tol=1.e-8):
old_mu = self.mu
try:
upper = _UMAU(observed,
alpha, self,
mu_lo=mu_lo,
mu_hi=mu_hi,
tol=tol)
except TruncatedGaussianError:
upper = np.inf
tg_neg = self.negated
try:
lower = -_UMAU(-observed,
alpha, tg_neg,
mu_lo=mu_hi,
mu_hi=mu_lo,
tol=tol)
except:
lower = -np.inf
self.mu, self.negated.mu = old_mu, old_mu
return np.array([lower, upper])
def G(left_endpoints, mus, alpha, tg):
"""
Compute the $G$ function of `tg(intervals)` over
`zip(left_endpoints, mus)`.
A copy is made of `tg` and its $(\mu,\sigma)$ are not modified.
"""
tg = truncated_gaussian(tg.intervals)
results = []
for left_endpoint, mu in zip(left_endpoints, mus):
tg.mu = mu
results.append(tg.G(left_endpoint, alpha))
return np.array(results)
def dG(left_endpoints, mus, alpha, tg):
"""
Compute the $G$ function of `tg(intervals)` over
`zip(left_endpoints, mus)`.
A copy is made of `tg` and its $(\mu,\sigma)$ are not modified.
"""
tg = truncated_gaussian(tg.intervals)
results = []
for left_endpoint, mu in zip(left_endpoints, mus):
tg.mu = mu
results.append(tg.dG(left_endpoint, alpha))
return np.array(results)
class TruncatedGaussianError(ValueError):
pass
def _UMAU(observed, alpha, tg,
mu_lo=None,
mu_hi=None,
tol=1.e-8):
tg = truncated_gaussian(tg.intervals, sigma=tg.sigma)
X = observed # shorthand
if mu_lo is None:
mu_lo = X
if mu_hi is None:
mu_hi = X + 2
# find upper and lower points for bisection
tg.mu = mu_lo
while tg.G(X, alpha) < 0: # mu_too_high
mu_lo, mu_hi = mu_lo - 2, mu_lo
tg.mu = mu_lo
tg.mu = mu_hi
while tg.G(X, alpha) > 0: # mu_too_low
mu_lo, mu_hi = mu_hi, mu_hi + 2
tg.mu = mu_hi
# bisection
while mu_hi - mu_lo > tol:
mu_bar = 0.5 * (mu_lo + mu_hi)
tg.mu = mu_bar
if tg.G(X, alpha) < 0:
mu_hi = mu_bar
else:
mu_lo = mu_bar
return mu_bar
def find_root(f, y, lb, ub, tol=1e-6):
"""
searches for solution to f(x) = y in (lb, ub), where
f is a monotone decreasing function
"""
# make sure solution is in range
a, b = lb, ub
fa, fb = f(a), f(b)
# assume a < b
if fa > y and fb > y:
while fb > y :
b, fb = b + (b-a), f(b + (b-a))
elif fa < y and fb < y:
while fa < y :
a, fa = a - (b-a), f(a - (b-a))
# determine the necessary number of iterations
max_iter = int( np.ceil( ( np.log(tol) - np.log(b-a) ) / np.log(0.5) ) )
# bisect (slow but sure) until solution is obtained
for _ in xrange(max_iter):
c, fc = (a+b)/2, f((a+b)/2)
if fc > y: a = c
elif fc < y: b = c
return c
|
|
#!/usr/bin/env python
"""Client utilities common to all platforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
import logging
import os
import platform
import subprocess
import threading
import time
from future.utils import itervalues
from grr_response_client.local import binary_whitelist
from grr_response_core import config
from grr_response_core.lib import constants
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
def HandleAlarm(process):
try:
logging.info("Killing child process due to timeout")
process.kill()
# There is a race condition here where the process terminates
# just before it would be killed. We ignore the exception
# in that case as the process is already gone.
except OSError:
pass
def Execute(cmd,
args,
time_limit=-1,
bypass_whitelist=False,
daemon=False,
use_client_context=False,
cwd=None):
"""Executes commands on the client.
This function is the only place where commands will be executed
by the GRR client. This makes sure that all issued commands are compared to a
white list and no malicious commands are issued on the client machine.
Args:
cmd: The command to be executed.
args: List of arguments.
time_limit: Time in seconds the process is allowed to run.
bypass_whitelist: Allow execution of things that are not in the whitelist.
Note that this should only ever be called on a binary that passes the
VerifySignedBlob check.
daemon: Start the new process in the background.
use_client_context: Run this script in the client's context. Defaults to
system context.
cwd: Current working directory for the command.
Returns:
A tuple of stdout, stderr, return value and time taken.
"""
if not bypass_whitelist and not IsExecutionWhitelisted(cmd, args):
# Whitelist doesn't contain this cmd/arg pair
logging.info("Execution disallowed by whitelist: %s %s.", cmd,
" ".join(args))
return (b"", b"Execution disallowed by whitelist.", -1, -1)
if daemon:
pid = os.fork()
if pid == 0:
# This is the child, it will run the daemon process. We call os.setsid
# here to become the session leader of this new session and the process
# group leader of the new process group so we don't get killed when the
# main process exits.
try:
os.setsid()
except OSError:
# This only works if the process is running as root.
pass
_Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)
os._exit(0) # pylint: disable=protected-access
else:
return _Execute(
cmd, args, time_limit, use_client_context=use_client_context, cwd=cwd)
def _Execute(cmd, args, time_limit=-1, use_client_context=False, cwd=None):
"""Executes cmd."""
run = [cmd]
run.extend(args)
env = os.environ.copy()
if use_client_context:
env.pop("LD_LIBRARY_PATH", None)
env.pop("PYTHON_PATH", None)
context = "client"
else:
context = "system"
logging.info("Executing %s in %s context.", " ".join(run), context)
p = subprocess.Popen(
run,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd)
alarm = None
if time_limit > 0:
alarm = threading.Timer(time_limit, HandleAlarm, (p,))
alarm.setDaemon(True)
alarm.start()
stdout, stderr, exit_status = b"", b"", -1
start_time = time.time()
try:
stdout, stderr = p.communicate()
exit_status = p.returncode
except IOError:
# If we end up here, the time limit was exceeded
pass
finally:
if alarm:
alarm.cancel()
alarm.join()
return (stdout, stderr, exit_status, time.time() - start_time)
def IsExecutionWhitelisted(cmd, args):
"""Check if a binary and args is whitelisted.
Args:
cmd: Canonical path to the binary.
args: List of arguments to be passed to the binary.
Returns:
Bool, True if it is whitelisted.
These whitelists could also go in the platform specific client files
client_utils_<platform>.py. We chose to leave them here instead of putting
them in global arrays to discourage people coding other modules from adding
new commands to the whitelist before running them.
The idea is to have a single place that lists every command we can run during
normal operation (obviously doesn't catch the special cases where we bypass
the list).
A deployment-specific list is also checked (see local/binary_whitelist.py).
"""
if platform.system() == "Windows":
whitelist = [
("arp.exe", ["-a"]),
("driverquery.exe", ["/v"]),
("ipconfig.exe", ["/all"]),
("netsh.exe", ["advfirewall", "firewall", "show", "rule", "name=all"]),
("netsh.exe",
["advfirewall", "monitor", "show", "firewall", "rule", "name=all"]),
("tasklist.exe", ["/SVC"]),
("tasklist.exe", ["/v"]),
]
elif platform.system() == "Linux":
whitelist = [
("/bin/df", []),
("/bin/echo", ["1"]),
("/bin/rpm", ["-qa"]),
("/bin/sleep", ["10"]),
("/sbin/auditctl", ["-l"]),
("/sbin/ifconfig", ["-a"]),
("/sbin/iptables", ["-L", "-n", "-v"]),
("/sbin/lsmod", []),
("/usr/bin/dpkg", ["--list"]),
("/usr/bin/last", []),
("/usr/bin/yum", ["list", "installed", "-q"]),
("/usr/bin/yum", ["repolist", "-v", "-q"]),
("/usr/bin/who", []),
("/usr/sbin/arp", ["-a"]),
("/usr/sbin/dmidecode", ["-q"]),
("/usr/sbin/sshd", ["-T"]),
]
elif platform.system() == "Darwin":
whitelist = [
("/bin/echo", ["1"]),
("/bin/launchctl", ["unload", config.CONFIG["Client.plist_path"]]),
("/usr/bin/hdiutil", ["info"]),
("/usr/bin/last", []),
("/usr/bin/who", []),
("/usr/sbin/arp", ["-a"]),
("/usr/sbin/kextstat", []),
("/usr/sbin/system_profiler", ["-xml", "SPHardwareDataType"]),
("/usr/libexec/firmwarecheckers/ethcheck/ethcheck", ["--show-hashes"]),
]
else:
whitelist = []
for (allowed_cmd, allowed_args) in whitelist:
if cmd == allowed_cmd and args == allowed_args:
return True
# Check if this is whitelisted locally.
if binary_whitelist.IsExecutionWhitelisted(cmd, args):
return True
return False
class MultiHasher(object):
"""An utility class that is able to applies multiple hash algorithms.
Objects that need to construct `Hash` object with multiple hash values need
to apply multiple hash algorithms to the given data. This class removes some
boilerplate associated with it and provides a readable API similar to the one
exposed by Python's `hashlib` module.
Args:
algorithms: List of names of the algorithms from the `hashlib` module that
need to be applied.
progress: An (optional) progress callback called when hashing functions are
applied to the data.
"""
def __init__(self, algorithms=None, progress=None):
if not algorithms:
algorithms = ["md5", "sha1", "sha256"]
self._hashers = {}
for algorithm in algorithms:
self._hashers[algorithm] = hashlib.new(algorithm)
self._bytes_read = 0
self._progress = progress
def HashFilePath(self, path, byte_count):
"""Updates underlying hashers with file on a given path.
Args:
path: A path to the file that is going to be fed to the hashers.
byte_count: A maximum numbers of bytes that are going to be processed.
"""
with open(path, "rb") as fd:
self.HashFile(fd, byte_count)
def HashFile(self, fd, byte_count):
"""Updates underlying hashers with a given file.
Args:
fd: A file object that is going to be fed to the hashers.
byte_count: A maximum number of bytes that are going to be processed.
"""
while byte_count > 0:
buf_size = min(byte_count, constants.CLIENT_MAX_BUFFER_SIZE)
buf = fd.read(buf_size)
if not buf:
break
self.HashBuffer(buf)
byte_count -= buf_size
def HashBuffer(self, buf):
"""Updates underlying hashers with a given buffer.
Args:
buf: A byte buffer (string object) that is going to be fed to the hashers.
"""
for hasher in itervalues(self._hashers):
hasher.update(buf)
if self._progress:
self._progress()
self._bytes_read += len(buf)
def GetHashObject(self):
"""Returns a `Hash` object with appropriate fields filled-in."""
hash_object = rdf_crypto.Hash()
hash_object.num_bytes = self._bytes_read
for algorithm in self._hashers:
setattr(hash_object, algorithm, self._hashers[algorithm].digest())
return hash_object
|
|
#-------------------------------------------------------------------------------
# Name: opan_utils_vector
# Purpose: Test objects for opan.utils.vector
#
# Author: Brian Skinn
# bskinn@alum.mit.edu
#
# Created: 19 Apr 2016
# Copyright: (c) Brian Skinn 2016
# License: The MIT License; see "license.txt" for full license terms
# and contributor agreement.
#
# This file is part of opan (Open Anharmonic), a system for automated
# computation of anharmonic properties of molecular systems via wrapper
# calls to computational/quantum chemical software packages.
#
# http://www.github.com/bskinn/opan
#
#-------------------------------------------------------------------------------
import unittest
from opan.test.utils import inject_tests
class TestOpanUtilsVectorParallelCheck(unittest.TestCase):
def test_Utils_Vector_ParallelCheck_Good(self):
from opan.utils.vector import parallel_check as pc
import numpy as np
# Parallel vectors
self.assertTrue(pc(np.array([1, 2, 3]),
np.array([1.5, 3, 4.5])))
# Anti-parallel vectors
self.assertTrue(pc(np.array([-1, 5.3, 3.1]),
np.array([3, -15.9, -9.3])))
# Non-(anti-)parallel vectors
self.assertFalse(pc(np.array([4.8, 0.35, -1.822]),
np.array([-1.3, 3.77, 19.14])))
def test_Utils_Vector_ParallelCheck_BadShape(self):
from opan.utils.vector import parallel_check as pc
import numpy as np
self.assertRaises(ValueError, pc,
np.array([[1, 2, 3], [3, 2, 1]]),
np.array([2, 4, 9]))
def test_Utils_Vector_ParallelCheck_LenMismatch(self):
from opan.utils.vector import parallel_check as pc
import numpy as np
self.assertRaises(ValueError, pc,
np.array(range(3)),
np.array(range(4)))
class TestOpanUtilsVectorProjRejAngle(unittest.TestCase):
import numpy as np
from opan.const import OpanEnum
# Enum for the types of data stored for calculations
class DType(OpanEnum):
V1 = 'V1'
V2 = 'V2'
PROJ = 'PROJ'
REJ = 'REJ'
ANG = 'ANG'
# Enum for the types of vector pairs being tested
class VecType(OpanEnum): # Types of vectors
O1 = 'O1' # Both order-one
LOL = 'LOL' # Both large (large on large)
SOS = 'SOS' # Both small (small on small)
LOS = 'LOS' # Large onto small
SOL = 'SOL' # Small onto large
BS = 'BS' # Badly-scaled
# Enum for the relationship between the vectors being tested
class RelType(OpanEnum): # Type of vector relationship
NS = 'NS' # Nonspecific
PAR = 'PAR' # Nearly parallel
NORM = 'NORM' # Nearly normal
AP = 'AP' # Nearly anti-parallel
# Substitution string for naming the vector datasets
namestr = "{0}_{1}"
# Dict of dicts of data
data = {
# Unremarkable vectors with ~order-one components
namestr.format(RelType.NS, VecType.O1):
{DType.V1: np.array([1, 2, 3]),
DType.V2: np.array([-1, 3, 8]),
DType.PROJ: np.array([-0.391892, 1.175676, 3.135135]),
DType.REJ: np.array([1.391892, 0.824324, -0.135135]),
DType.ANG: np.float_(25.712002)},
# Nearly-normal vectors with ~order-one components
namestr.format(RelType.NORM, VecType.O1):
{DType.V1: np.array([2, 8, -4, 2.5, -1.4]),
DType.V2: np.array([-1, 3, 6.2, 5, 7]),
DType.PROJ: np.array([0.000817, -0.002450, -0.005064,
-0.004084, -0.005717]),
DType.REJ: np.array([1.999183, 8.002450, -3.994936,
2.504084, -1.394283]),
DType.ANG: np.float_(90.053923)},
# Nearly-parallel vectors with ~order-one components
namestr.format(RelType.PAR, VecType.O1):
{DType.V1: np.array([1, 2, 2.9999, 4]),
DType.V2: np.array([1.0001, 2, 3, 4]),
DType.PROJ: np.array([1.000087, 1.999973, 2.999960, 3.999947]),
DType.REJ: np.array([-0.000087, 0.000027,
-0.000060, 0.000053]),
DType.ANG: np.float_(0.001267)},
# Nearly-antiparallel vectors with ~order-one components
namestr.format(RelType.AP, VecType.O1):
{DType.V1: np.array([-0.5, 2.3, 1.4, -3.1]),
DType.V2: np.array([0.50001, -2.29999, -1.4, 3.1]),
DType.PROJ: np.array([-0.500011, 2.299992,
1.400001, -3.100003]),
DType.REJ: np.array([0.000011, 0.000008,
-0.000001, 0.000003]),
DType.ANG: np.float_(179.999814)},
# Two large vectors far from parallel/normal
namestr.format(RelType.NS, VecType.LOL):
{DType.V1: np.array([376328., 384874.,
992834., 182873.]),
DType.V2: np.array([538344., 283747.,
1838447., 929292.]),
DType.PROJ: np.array([269185.658799, 141880.699195,
919270.144855, 464669.577884]),
DType.REJ: np.array([107142.341201, 242993.300805,
73563.855145, -281796.577884]),
DType.ANG: np.float_(20.151554)},
# Two small vectors far from parallel/normal
namestr.format(RelType.NS, VecType.SOS):
{DType.V1: np.array([0.000045, -0.00031,
-0.000915, 0.000002]),
DType.V2: np.array([0.0002874, -0.0003987,
0.0000034, 0.000719]),
DType.PROJ: np.array([0.000051, -0.000071,
0.000001, 0.000128]),
DType.REJ: np.array([-0.000006, -0.000239,
-0.000916, -0.000126]),
DType.ANG: np.float_(80.787151)},
# Large onto small, far from parallel/normal
namestr.format(RelType.NS, VecType.LOS):
{DType.V1: np.array([238973., 239884.,
-1092938., -893983.]),
DType.V2: np.array([0.0002874, -0.0003987,
0.0000034, 0.000719]),
DType.PROJ: np.array([-255163.218951, 353979.037564,
-3018.632375, -638351.963904]),
DType.REJ: np.array([494136.218951, -114095.037564,
-1089919.367625, -255631.036096]),
DType.ANG: np.float_(122.176632)},
# Small onto large, far from parallel/normal
namestr.format(RelType.NS, VecType.SOL):
{DType.V1: np.array([0.00000374, -0.0000233,
0.0002837, 0.0000026]),
DType.V2: np.array([538344., 283747., 1838447., 929292.]),
DType.PROJ: np.array([0.000061, 0.000032,
0.000207, 0.000105]),
DType.REJ: np.array([-0.000057, -0.000055,
0.000077, -0.000102]),
DType.ANG: np.float_(31.859107)},
# Two badly scaled vectors far from parallel/normal
namestr.format(RelType.NS, VecType.BS):
{DType.V1: np.array([0.000015, 6214., -0.000235, 12374.]),
DType.V2: np.array([-0.00005, 38184., 0.000045, 21669.]),
DType.PROJ: np.array([-0.000013, 10011.853795,
0.000012, 5681.616904]),
DType.REJ: np.array([0.000028, -3797.853795,
-0.000247, 6692.383096]),
DType.ANG: np.float_(33.760587)},
# Two large vectors nearly parallel
namestr.format(RelType.PAR, VecType.LOL):
{DType.V1: np.array([554387., 38185., -532247., 12374.]),
DType.V2: np.array([554389., 38184., -532248., 12375.]),
DType.PROJ: np.array([554387.488030, 38183.895862,
-532246.548415, 12374.966250]),
DType.REJ: np.array([-0.488030, 1.104138,
-0.451585, -0.966250]),
DType.ANG: np.float_(0.000120)},
# Two small vectors nearly parallel
namestr.format(RelType.PAR, VecType.SOS):
{DType.V1: np.array([0.000015, 0.000016, -0.000042,
-0.000034, 0.000065, -0.000033]),
DType.V2: np.array([0.000014, 0.000017, -0.000041,
-0.000033, 0.000066, -0.000032]),
DType.PROJ: np.array([0.000014, 0.000017, -0.000041,
-0.000033, 0.000066, -0.000032]),
DType.REJ: np.array([0.000001, -0.000001, -0.000001,
-0.000001, -0.000001, -0.000001]),
DType.ANG: np.float_(1.483536)},
# Large onto small, nearly parallel
namestr.format(RelType.PAR, VecType.LOS):
{DType.V1: np.array([14001., 17002., -41003., -33001.,
66004., -32005., 75008.]),
DType.V2: np.array([0.000014, 0.000017, -0.000041,
-0.000033, 0.000066, -0.000032, 0.000075]),
DType.PROJ: np.array([14001.205610, 17001.463955,
-41003.530715, -33002.841795,
66005.683590, -32002.755680,
75006.458626]),
DType.REJ: np.array([-0.205610, 0.536045, 0.530715,
1.841795, -1.683590, -2.244320,
1.541374]),
DType.ANG: np.float_(0.001811)},
# Small onto large, nearly parallel
namestr.format(RelType.PAR, VecType.SOL):
{DType.V1: np.array([1.20E-05, -1.30E-05, 3.80E-05,
-7.90E-05, -8.50E-05, 2.20E-05,
4.50E-05]),
DType.V2: np.array([12006., -13009., 38007.,
-79008., -85006., 22001.,
45003.]),
DType.PROJ: np.array([0.000012, -0.000013, 0.000038,
-0.000079, -0.000085, 0.000022,
0.000045]),
DType.REJ: np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),
DType.ANG: np.float_(0.004356)},
# Two badly scaled vectors nearly parallel
namestr.format(RelType.PAR, VecType.BS):
{DType.V1: np.array([0.00053, -2135., 0.00015, 65548.,
-0.0085, 0.00022, -17815., -0.00035]),
DType.V2: np.array([0.00054, -2136., 0.00014, 65549.,
-0.0084, 0.00023, -17816., -0.00036]),
DType.PROJ: np.array([0.000540, -2135.960458, 0.000140,
65547.786547, -0.008400, 0.000230,
-17815.670188, -0.000360]),
DType.REJ: np.array([-0.000010, 0.960458, 0.000010,
0.213453, -0.000100, -0.000010,
0.670188,0.000010]),
DType.ANG: np.float_(0.001004)},
# Two large vectors nearly normal
namestr.format(RelType.NORM, VecType.LOL):
{DType.V1: np.array([654564., 48249., -248896.,
-54789., 24444.]),
DType.V2: np.array([-6048., 68589., -34061.,
95549., -108100.]),
DType.PROJ: np.array([11.146008, -126.404358, 62.771856,
-176.089606, 199.220153]),
DType.REJ: np.array([654552.853992, 48375.404358,
-248958.771856, -54612.910394,
24244.779847]),
DType.ANG: np.float_(90.024498)},
# Two small vectors nearly normal
namestr.format(RelType.NORM, VecType.SOS):
{DType.V1: np.array([0.000048, 0.000017, -0.000032,
0.000091, -0.000016]),
DType.V2: np.array([0.000015, 0.000007, 0.00001,
0.000001, 0.000035]),
DType.PROJ: np.array([0.000000, 0.000000, 0.000000,
0.000000, 0.000001]),
DType.REJ: np.array([0.000048, 0.000017, -0.000032,
0.000091, -0.000017]),
DType.ANG: np.float_(89.350346)},
# Large onto small, nearly normal
namestr.format(RelType.NORM, VecType.LOS):
{DType.V1: np.array([32185., 27265., -30185.,
108115., -13755.]),
DType.V2: np.array([0.000015, 0.000007, 0.00001,
0.000001, 0.000035]),
DType.PROJ: np.array([-14.343750, -6.693750, -9.562500,
-0.956250, -33.468750]),
DType.REJ: np.array([32199.343750, 27271.693750,
-30175.437500, 108115.956250,
-13721.531250]),
DType.ANG: np.float_(90.018157)},
# Small onto large, nearly normal
namestr.format(RelType.NORM, VecType.SOL):
{DType.V1: np.array([0.000028, -0.000035, 0.000017,
0.000098, 0.000072, -0.000055]),
DType.V2: np.array([16589., 22852., 44185.,
14273., 24599., 65489.]),
DType.PROJ: np.array([0., 0., 0., 0., 0., 0.]),
DType.REJ: np.array([0.000028, -0.000035, 0.000017,
0.000098, 0.000072, -0.000055]),
DType.ANG: np.float_(90.073867)},
# Two badly scaled vectors nearly normal
namestr.format(RelType.NORM, VecType.BS):
{DType.V1: np.array([0.0015, 6214., 2319., 145.]),
DType.V2: np.array([8285., -0.0004, 0.0034, 2166.]),
DType.PROJ: np.array([35.485053, -0.000002,
0.000015, 9.277082]),
DType.REJ: np.array([-35.483553, 6214.000002,
2318.999985, 135.722918]),
DType.ANG: np.float_(89.683234)},
# Two large vectors nearly anti-parallel
namestr.format(RelType.AP, VecType.LOL):
{DType.V1: np.array([215484., 665452., -654587.,
541887., 64657., -6546347.,
-687887., -1137889.]),
DType.V2: np.array([-215485., -665453., 654589.,
-541882., -64659., 6546378.,
687889., 1137888.]),
DType.PROJ: np.array([215484.046715, 665450.056100,
-654586.104161, 541879.602766,
64658.713955, -6546349.039449,
-687885.956845, -1137882.966092]),
DType.REJ: np.array([-0.046715, 1.943900, -0.895839,
7.397234, -1.713955, 2.039449,
-1.043155, -6.033908]),
DType.ANG: np.float_(179.999914)},
# Two small vectors nearly anti-parallel
namestr.format(RelType.AP, VecType.SOS):
{DType.V1: np.array([0.000041, -0.000038, 0.000091,
-0.000019, 0.000037, -0.000068,
-0.000071, -0.000055]),
DType.V2: np.array([-0.000041, 0.000039, -0.00009,
0.000019, -0.000036, 0.000068,
0.00007, 0.000055]),
DType.PROJ: np.array([0.000041, -0.000039, 0.000091,
-0.000019, 0.000036, -0.000068,
-0.000070, -0.000055]),
DType.REJ: np.array([0.000000, 0.000001, 0.000000,
0.000000, 0.000001, 0.000000,
-0.000001, 0.000000]),
DType.ANG: np.float_(179.379006)},
# Large onto small, nearly anti-parallel
namestr.format(RelType.AP, VecType.LOS):
{DType.V1: np.array([41002., -38997., 90004., -18997.,
36001., -68002., -70003., -54988.]),
DType.V2: np.array([-0.000041, 0.000039, -0.00009,
0.000019, -0.000036, 0.000068,
0.00007, 0.000055]),
DType.PROJ: np.array([40999.983927, -38999.984711,
89999.964717, -18999.992551,
35999.985887, -67999.973342,
-69999.972558, -54999.978438]),
DType.REJ: np.array([2.016073, 2.984711, 4.035283,
2.992551, 1.014113, -2.026658,
-3.027442, 11.978438]),
DType.ANG: np.float_(179.994978)},
# Small onto large, nearly anti-parallel
namestr.format(RelType.AP, VecType.SOL):
{DType.V1: np.array([0.000038, -0.000044, 0.000057,
0.000098, 0.000089, -0.000072,
-0.000022, 0.000025, -0.000017]),
DType.V2: np.array([-38007., 44009., -56995.,
-98004., -88999., 71995.,
22002., -25001., 16996.]),
DType.PROJ: np.array([0.000038, -0.000044, 0.000057,
0.000098, 0.000089, -0.000072,
-0.000022, 0.000025, -0.000017]),
DType.REJ: np.array([0., 0., 0., 0., 0., 0., 0., 0., 0.]),
DType.ANG: np.float_(179.995212)},
# Two badly scaled vectors nearly anti-parallel
namestr.format(RelType.AP, VecType.BS):
{DType.V1: np.array([25778., -35778., 0.000032, -47789.,
-0.000038, 0.000041, 24448., -35779.,
-0.000017]),
DType.V2: np.array([-25779., 35772., -0.000031, 47788.,
0.000038, -0.000041, -24444., 35777.,
0.000016]),
DType.PROJ: np.array([25780.714146, -35774.378619, 0.000031,
-47791.177610, -0.000038, 0.000041,
24445.625376, -35779.378952, -0.000016]),
DType.REJ: np.array([-2.714146, -3.621381, 0.000001,
2.177610, 0.000000, 0.000000,
2.374624, 0.378952, -0.000001]),
DType.ANG: np.float_(179.995917)}
}
# Template functions
# Vector projection template
def template_proj(self, name, data):
from opan.utils.vector import proj
v1 = data[self.DType.V1]
v2 = data[self.DType.V2]
p = proj(v1, v2)
for i, t in enumerate(zip(p, data[self.DType.PROJ])):
self.assertAlmostEqual(*t, delta=1e-6,
msg="Test {0}: Index {1}; V1 = {2}; V2 = {3}"
.format(name, i, v1, v2))
# Vector rejection template
def template_rej(self, name, data):
from opan.utils.vector import rej
v1 = data[self.DType.V1]
v2 = data[self.DType.V2]
r = rej(v1, v2)
for i, t in enumerate(zip(r, data[self.DType.REJ])):
self.assertAlmostEqual(*t, delta=1e-6,
msg="Test {0}: Index {1}; V1 = {2}; V2 = {3}"
.format(name, i, v1, v2))
# Vector angle template
def template_angle(self, name, data):
from opan.utils.vector import vec_angle
v1 = data[self.DType.V1]
v2 = data[self.DType.V2]
a = vec_angle(v1, v2)
self.assertAlmostEqual(a, data[self.DType.ANG], delta=1e-6,
msg="Test {0}: V1 = {1}; V2 = {2}"
.format(name, v1, v2))
# Populate the local namespace with the auto-generated
# test methods
_locals = locals()
inject_tests(_locals, data, "test_Vector_Proj_Good_{0}", template_proj)
inject_tests(_locals, data, "test_Vector_Rej_Good_{0}", template_rej)
inject_tests(_locals, data, "test_Vector_Angle_Good_{0}", template_angle)
def setUp(self):
self.longMessage = True
def test_Utils_Vector_Proj_BadVec_NotVector(self):
import numpy as np
from opan.utils.vector import proj
self.assertRaises(ValueError, proj,
np.array(range(16)).reshape((4, 4)),
np.array(range(16)))
def test_Utils_Vector_Proj_BadVecOnto_NotVector(self):
import numpy as np
from opan.utils.vector import proj
self.assertRaises(ValueError, proj,
np.array(range(16)),
np.array(range(16)).reshape((4, 4)))
def test_Utils_Vector_Proj_BadVecsShapeMismatch(self):
import numpy as np
from opan.utils.vector import proj
self.assertRaises(ValueError, proj,
np.array(range(5)), np.array(range(6)))
def test_Utils_Vector_Angle_BadVecShapes(self):
import numpy as np
from opan.utils.vector import vec_angle as ang
self.assertRaises(ValueError, ang,
np.array(range(6)).reshape((2,3)),
np.array(range(6)))
self.assertRaises(ValueError, ang,
np.array(range(6)),
np.array(range(6)).reshape((3,2)))
def test_Utils_Vector_Angle_VecLenMismatch(self):
import numpy as np
from opan.utils.vector import vec_angle as ang
self.assertRaises(ValueError, ang,
np.array(range(4)), np.array(range(12)))
def test_Utils_Vector_Angle_VecNormTooSmall(self):
import numpy as np
from opan.utils.vector import vec_angle as ang
self.assertRaises(ValueError, ang,
1e-12* np.array(range(4)),
np.array(range(-3,1)))
self.assertRaises(ValueError, ang,
np.array(range(4)),
1e-12 * np.array(range(-3,1)))
class TestOpanUtilsVectorOrthoBasis(unittest.TestCase):
def setUp(self):
self.longMessage = True
def test_Utils_Vector_OrthoBasis_GoodRandomRefVec(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
from opan.utils.vector import vec_angle as ang
nv = [8.5, -3.15, -2.1884]
on1, on2 = ob(nv)
self.assertAlmostEqual(np.dot(nv, on1), 0.0,
delta=1e-10,
msg="Normal vector not normal to first basis vector")
self.assertAlmostEqual(np.dot(nv, on2), 0.0,
delta=1e-10,
msg="Normal vector not normal to second basis vector")
self.assertAlmostEqual(np.dot(on1, on2), 0.0,
delta=1e-10,
msg="Basis vectors not normal")
self.assertAlmostEqual(np.sum(np.power(on1, 2.)), 1.0,
delta=1e-10,
msg="First basis vector not normalized")
self.assertAlmostEqual(np.sum(np.power(on2, 2.)), 1.0,
delta=1e-10,
msg="Second basis vector not normalized")
self.assertAlmostEqual(ang(nv, np.cross(on1, on2)), 0.0,
delta=1e-5,
msg="Incorrect handedness of basis vectors")
def test_Utils_Vector_OrthoBasis_GoodDefinedRefVec(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
from opan.utils.vector import vec_angle as ang
from opan.utils.vector import rej
nv = [2.5, 31.15, -25.1884]
rv = [-12.15, 0.0034, 35.18]
on1, on2 = ob(nv, rv)
self.assertAlmostEqual(np.dot(nv, on1), 0.0,
delta=1e-10,
msg="Normal vector not normal to first basis vector")
self.assertAlmostEqual(np.dot(nv, on2), 0.0,
delta=1e-10,
msg="Normal vector not normal to second basis vector")
self.assertAlmostEqual(np.dot(on1, on2), 0.0,
delta=1e-10,
msg="Basis vectors not normal")
self.assertAlmostEqual(np.sum(np.power(on1, 2.)), 1.0,
delta=1e-10,
msg="First basis vector not normalized")
self.assertAlmostEqual(np.sum(np.power(on2, 2.)), 1.0,
delta=1e-10,
msg="Second basis vector not normalized")
self.assertAlmostEqual(ang(nv, np.cross(on1, on2)), 0.0,
delta=1e-5,
msg="Incorrect handedness of basis vectors")
self.assertAlmostEqual(ang(rej(rv, nv), on1), 0.0,
delta=1e-5,
msg="First basis vector not aligned with reference")
def test_Utils_Vector_OrthoBasis_TooParallelRefVec(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
from opan.test.utils import assertErrorAndTypecode
from opan.error import VectorError as VErr
nv = np.array([2.81, -3.855, -12.384])
assertErrorAndTypecode(self, VErr, ob, VErr.NONPRL,
nv, nv*1.1111)
def test_Utils_Vector_OrthoBasis_BadNormArrayShape(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
with self.assertRaises(ValueError):
ob(np.array(range(9)).reshape((3,3)))
def test_Utils_Vector_OrthoBasis_BadNormArrayLen(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
with self.assertRaises(ValueError):
ob(np.array(range(6)))
def test_Utils_Vector_OrthoBasis_BadRefArrayShape(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
with self.assertRaises(ValueError):
ob(np.array(range(1,4)), np.array(range(9)).reshape((3,3)))
def test_Utils_Vector_OrthoBasis_BadRefArrayLen(self):
import numpy as np
from opan.utils.vector import ortho_basis as ob
with self.assertRaises(ValueError):
ob(np.array(range(1,4)), np.array(range(6)))
class TestOpanUtilsVectorOrthonormCheck(unittest.TestCase):
def setUp(self):
self.longMessage = True
def test_Utils_Vector_ONCheck_Trivial1D(self):
import numpy as np
from scipy import linalg as spla
from opan.utils.vector import orthonorm_check as onchk
vec = np.array(range(15))
vec = vec / spla.norm(vec)
c, nf, of = onchk(vec)
self.assertTrue(c)
self.assertIsNone(nf)
self.assertIsNone(of)
c, nf, of = onchk(vec, report=True)
self.assertTrue(c)
self.assertEqual(len(nf), 0)
self.assertEqual(len(of), 0)
def test_Utils_Vector_ONCheck_Trivial2D(self):
import numpy as np
from scipy import linalg as spla
from opan.utils.vector import orthonorm_check as onchk
from opan.utils.vector import ortho_basis as ob
vec = np.array([3.18, -2.25, 1.0005])
on1, on2 = ob(vec)
m = np.column_stack((vec / spla.norm(vec), on1, on2))
c, nf, of = onchk(m)
self.assertTrue(c)
self.assertIsNone(nf)
self.assertIsNone(of)
c, nf, of = onchk(m, report=True)
self.assertTrue(c)
self.assertEqual(len(nf), 0)
self.assertEqual(len(of), 0)
def test_Utils_Vector_ONCheck_NonNormedVec(self):
import numpy as np
from opan.utils.vector import orthonorm_check as onchk
from opan.utils.vector import ortho_basis as ob
vec = np.array([-2.112, 2923.3, -0.2323])
on1, on2 = ob(vec)
m = np.column_stack((vec, on1, on2))
c, nf, of = onchk(m)
self.assertFalse(c)
self.assertIsNone(nf)
self.assertIsNone(of)
c, nf, of = onchk(m, report=True)
self.assertFalse(c)
self.assertEqual(len(of), 0)
self.assertEqual(len(nf), 1)
self.assertEqual(nf[0], 0)
def test_Utils_Vector_ONCheck_SkewedVec(self):
import numpy as np
from scipy import linalg as spla
from opan.utils.vector import orthonorm_check as onchk
from opan.utils.vector import ortho_basis as ob
vec = np.array([-6.277, 1.345, -23.8734])
offvec = np.subtract(vec, np.array([-3, 5, -12]))
offvec = offvec / spla.norm(offvec)
on1, on2 = ob(vec)
m = np.column_stack((offvec, on1, on2))
c, nf, of = onchk(m, report=True)
self.assertFalse(c)
self.assertEqual(len(nf), 0)
self.assertEqual(len(of), 2)
self.assertEqual(of[0], (0,1))
self.assertEqual(of[1], (0,2))
def suite():
s = unittest.TestSuite()
tl = unittest.TestLoader()
s.addTests([tl.loadTestsFromTestCase(TestOpanUtilsVectorParallelCheck),
tl.loadTestsFromTestCase(TestOpanUtilsVectorProjRejAngle),
tl.loadTestsFromTestCase(TestOpanUtilsVectorOrthoBasis),
tl.loadTestsFromTestCase(TestOpanUtilsVectorOrthonormCheck)
])
return s
if __name__ == '__main__': # pragma: no cover
print("Module not executable.")
|
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or bowscoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18145 if testnet else 8145
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
|
# basic dependencies
import os
import sys
import subprocess
from glob import glob
import math
# main dependencies: numpy, nibabel
import numpy
import nibabel
# nighresjava and nighres functions
import nighresjava
from ..io import load_volume, save_volume
from ..utils import _output_dir_4saving, _fname_4saving, \
_check_topology_lut_dir
# convenience labels
X=0
Y=1
Z=2
T=3
def surface_antsreg(source_surface, target_surface,
max_dist=10.0,
run_rigid=True,
rigid_iterations=1000,
run_affine=True,
affine_iterations=1000,
run_syn=True,
coarse_iterations=100,
medium_iterations=70, fine_iterations=20,
cost_function='Demons',
interpolation='Linear',
regularization='Low',
convergence=1e-6,
mask_zero=False,
crop=True,
ignore_affine=False, ignore_header=False,
save_data=False, overwrite=False, output_dir=None,
file_name=None):
""" Embedded ANTS Registration for surfaces
Runs the rigid and/or Symmetric Normalization (SyN) algorithm of ANTs and
formats the output deformations into voxel coordinate mappings as used in
CBSTools registration and transformation routines. Uses all input contrasts
with equal weights.
Parameters
----------
source_surface: niimg
Levelset surface image to register
target_surface: niimg
Reference levelset surface image to match
run_rigid: bool
Whether or not to run a rigid registration first (default is False)
rigid_iterations: float
Number of iterations in the rigid step (default is 1000)
run_affine: bool
Whether or not to run a affine registration first (default is False)
affine_iterations: float
Number of iterations in the affine step (default is 1000)
run_syn: bool
Whether or not to run a SyN registration (default is True)
coarse_iterations: float
Number of iterations at the coarse level (default is 40)
medium_iterations: float
Number of iterations at the medium level (default is 50)
fine_iterations: float
Number of iterations at the fine level (default is 40)
cost_function: {'LeastSquares', 'Demons'}
Cost function for the registration (default is 'Demons')
interpolation: {'NearestNeighbor', 'Linear'}
Interpolation for the registration result (default is 'Linear')
regularization: {'Low', 'Medium', 'High'}
Regularization preset for the SyN deformation (default is 'Medium')
convergence: float
Threshold for convergence, can make the algorithm very slow (default is convergence)
mask_zero: bool
Mask regions with zero value
(default is False)
ignore_affine: bool
Ignore the affine matrix information extracted from the image header
(default is False)
ignore_header: bool
Ignore the orientation information and affine matrix information
extracted from the image header (default is False)
save_data: bool
Save output data to file (default is False)
overwrite: bool
Overwrite existing results (default is False)
output_dir: str, optional
Path to desired output directory, will be created if it doesn't exist
file_name: str, optional
Desired base name for output files with file extension
(suffixes will be added)
Returns
----------
dict
Dictionary collecting outputs under the following keys
(suffix of output files in brackets)
* mapping (niimg): Coordinate mapping from source to target (_ants_map)
* inverse (niimg): Inverse coordinate mapping from target to source (_ants_invmap)
Notes
----------
Port of the CBSTools Java module by Pierre-Louis Bazin. The main algorithm
is part of the ANTs software by Brian Avants and colleagues [1]_. Parameters
have been set to values commonly found in neuroimaging scripts online, but
not necessarily optimal.
References
----------
.. [1] Avants et al (2008), Symmetric diffeomorphic
image registration with cross-correlation: evaluating automated labeling
of elderly and neurodegenerative brain, Med Image Anal. 12(1):26-41
"""
print('\nEmbedded ANTs Registration Surfaces')
# check if ants is installed to raise sensible error
try:
subprocess.run('antsRegistration', stdout=subprocess.DEVNULL)
except FileNotFoundError:
sys.exit("\nCould not find command 'antsRegistration'. Make sure ANTs is"
" installed and can be accessed from the command line.")
try:
subprocess.run('antsApplyTransforms', stdout=subprocess.DEVNULL)
except FileNotFoundError:
sys.exit("\nCould not find command 'antsApplyTransforms'. Make sure ANTs"
" is installed and can be accessed from the command line.")
# make sure that saving related parameters are correct
# output files needed for intermediate results
output_dir = _output_dir_4saving(output_dir, source_surface)
mapping_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='ants-map'))
inverse_mapping_file = os.path.join(output_dir,
_fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='ants-invmap'))
if save_data:
if overwrite is False \
and os.path.isfile(mapping_file) \
and os.path.isfile(inverse_mapping_file) :
print("skip computation (use existing results)")
output = {'mapping': mapping_file,
'inverse': inverse_mapping_file}
return output
# cropping and masking do not work well together?
if crop: mask_zero=False
# load and get dimensions and resolution from input images
source = load_volume(source_surface)
# flip the data around, threshold
source_ls = numpy.minimum(numpy.maximum(max_dist - source.get_data(),0.0),2.0*max_dist)
if crop:
# crop images for speed?
src_xmin, src_xmax = numpy.where(numpy.any(source_ls>0.1, axis=(1,2)))[0][[0, -1]]
src_ymin, src_ymax = numpy.where(numpy.any(source_ls>0.1, axis=(0,2)))[0][[0, -1]]
src_zmin, src_zmax = numpy.where(numpy.any(source_ls>0.1, axis=(0,1)))[0][[0, -1]]
source_ls = source_ls[src_xmin:src_xmax+1, src_ymin:src_ymax+1, src_zmin:src_zmax+1]
src_img = nibabel.Nifti1Image(source_ls, source.affine, source.header)
src_img.update_header()
src_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcimg'))
save_volume(src_img_file, src_img)
source = load_volume(src_img_file)
src_affine = source.affine
src_header = source.header
nsx = source.header.get_data_shape()[X]
nsy = source.header.get_data_shape()[Y]
nsz = source.header.get_data_shape()[Z]
rsx = source.header.get_zooms()[X]
rsy = source.header.get_zooms()[Y]
rsz = source.header.get_zooms()[Z]
orig_src_aff = source.affine
orig_src_hdr = source.header
target = load_volume(target_surface)
# flip the data around
target_ls = numpy.minimum(numpy.maximum(max_dist - target.get_data(),0.0),2.0*max_dist)
if crop:
# crop images for speed?
trg_xmin, trg_xmax = numpy.where(numpy.any(target_ls>0.1, axis=(1,2)))[0][[0, -1]]
trg_ymin, trg_ymax = numpy.where(numpy.any(target_ls>0.1, axis=(0,2)))[0][[0, -1]]
trg_zmin, trg_zmax = numpy.where(numpy.any(target_ls>0.1, axis=(0,1)))[0][[0, -1]]
target_ls = target_ls[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1]
trg_img = nibabel.Nifti1Image(target_ls, target.affine, target.header)
trg_img.update_header()
trg_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=target_surface,
suffix='tmp_trgimg'))
save_volume(trg_img_file, trg_img)
target = load_volume(trg_img_file)
trg_affine = target.affine
trg_header = target.header
ntx = target.header.get_data_shape()[X]
nty = target.header.get_data_shape()[Y]
ntz = target.header.get_data_shape()[Z]
rtx = target.header.get_zooms()[X]
rty = target.header.get_zooms()[Y]
rtz = target.header.get_zooms()[Z]
orig_trg_aff = target.affine
orig_trg_hdr = target.header
# in case the affine transformations are not to be trusted: make them equal
if ignore_affine or ignore_header:
# create generic affine aligned with the orientation for the source
new_affine = numpy.zeros((4,4))
if ignore_header:
new_affine[0][0] = rsx
new_affine[1][1] = rsy
new_affine[2][2] = rsz
new_affine[0][3] = -rsx*nsx/2.0
new_affine[1][3] = -rsy*nsy/2.0
new_affine[2][3] = -rsz*nsz/2.0
else:
mx = numpy.argmax(numpy.abs([src_affine[0][0],src_affine[1][0],src_affine[2][0]]))
my = numpy.argmax(numpy.abs([src_affine[0][1],src_affine[1][1],src_affine[2][1]]))
mz = numpy.argmax(numpy.abs([src_affine[0][2],src_affine[1][2],src_affine[2][2]]))
new_affine[mx][0] = rsx*numpy.sign(src_affine[mx][0])
new_affine[my][1] = rsy*numpy.sign(src_affine[my][1])
new_affine[mz][2] = rsz*numpy.sign(src_affine[mz][2])
if (numpy.sign(src_affine[mx][0])<0):
new_affine[mx][3] = rsx*nsx/2.0
else:
new_affine[mx][3] = -rsx*nsx/2.0
if (numpy.sign(src_affine[my][1])<0):
new_affine[my][3] = rsy*nsy/2.0
else:
new_affine[my][3] = -rsy*nsy/2.0
if (numpy.sign(src_affine[mz][2])<0):
new_affine[mz][3] = rsz*nsz/2.0
else:
new_affine[mz][3] = -rsz*nsz/2.0
new_affine[3][3] = 1.0
src_img = nibabel.Nifti1Image(source.get_data(), new_affine, source.header)
src_img.update_header()
src_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcimg'))
save_volume(src_img_file, src_img)
source = load_volume(src_img_file)
src_affine = source.affine
src_header = source.header
# create generic affine aligned with the orientation for the target
new_affine = numpy.zeros((4,4))
if ignore_header:
new_affine[0][0] = rtx
new_affine[1][1] = rty
new_affine[2][2] = rtz
new_affine[0][3] = -rtx*ntx/2.0
new_affine[1][3] = -rty*nty/2.0
new_affine[2][3] = -rtz*ntz/2.0
else:
#mx = numpy.argmax(numpy.abs(trg_affine[0][0:3]))
#my = numpy.argmax(numpy.abs(trg_affine[1][0:3]))
#mz = numpy.argmax(numpy.abs(trg_affine[2][0:3]))
#new_affine[0][mx] = rtx*numpy.sign(trg_affine[0][mx])
#new_affine[1][my] = rty*numpy.sign(trg_affine[1][my])
#new_affine[2][mz] = rtz*numpy.sign(trg_affine[2][mz])
#if (numpy.sign(trg_affine[0][mx])<0):
# new_affine[0][3] = rtx*ntx/2.0
#else:
# new_affine[0][3] = -rtx*ntx/2.0
#
#if (numpy.sign(trg_affine[1][my])<0):
# new_affine[1][3] = rty*nty/2.0
#else:
# new_affine[1][3] = -rty*nty/2.0
#
#if (numpy.sign(trg_affine[2][mz])<0):
# new_affine[2][3] = rtz*ntz/2.0
#else:
# new_affine[2][3] = -rtz*ntz/2.0
mx = numpy.argmax(numpy.abs([trg_affine[0][0],trg_affine[1][0],trg_affine[2][0]]))
my = numpy.argmax(numpy.abs([trg_affine[0][1],trg_affine[1][1],trg_affine[2][1]]))
mz = numpy.argmax(numpy.abs([trg_affine[0][2],trg_affine[1][2],trg_affine[2][2]]))
#print('mx: '+str(mx)+', my: '+str(my)+', mz: '+str(mz))
#print('rx: '+str(rtx)+', ry: '+str(rty)+', rz: '+str(rtz))
new_affine[mx][0] = rtx*numpy.sign(trg_affine[mx][0])
new_affine[my][1] = rty*numpy.sign(trg_affine[my][1])
new_affine[mz][2] = rtz*numpy.sign(trg_affine[mz][2])
if (numpy.sign(trg_affine[mx][0])<0):
new_affine[mx][3] = rtx*ntx/2.0
else:
new_affine[mx][3] = -rtx*ntx/2.0
if (numpy.sign(trg_affine[my][1])<0):
new_affine[my][3] = rty*nty/2.0
else:
new_affine[my][3] = -rty*nty/2.0
if (numpy.sign(trg_affine[mz][2])<0):
new_affine[mz][3] = rtz*ntz/2.0
else:
new_affine[mz][3] = -rtz*ntz/2.0
#if (numpy.sign(trg_affine[0][mx])<0): new_affine[mx][3] = rtx*ntx
#if (numpy.sign(trg_affine[1][my])<0): new_affine[my][3] = rty*nty
#if (numpy.sign(trg_affine[2][mz])<0): new_affine[mz][3] = rtz*ntz
#new_affine[0][3] = ntx/2.0
#new_affine[1][3] = nty/2.0
#new_affine[2][3] = ntz/2.0
new_affine[3][3] = 1.0
#print("\nbefore: "+str(trg_affine))
#print("\nafter: "+str(new_affine))
trg_img = nibabel.Nifti1Image(target.get_data(), new_affine, target.header)
trg_img.update_header()
trg_img_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_trgimg'))
save_volume(trg_img_file, trg_img)
target = load_volume(trg_img_file)
trg_affine = target.affine
trg_header = target.header
if mask_zero:
# create and save temporary masks
trg_mask_data = (target.get_data()!=0)*(target.get_data()!=2.0*max_dist)
trg_mask = nibabel.Nifti1Image(trg_mask_data, target.affine, target.header)
trg_mask_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_trgmask'))
save_volume(trg_mask_file, trg_mask)
src_mask_data = (source.get_data()!=0)*(source.get_data()!=2.0*max_dist)
src_mask = nibabel.Nifti1Image(src_mask_data, source.affine, source.header)
src_mask_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srcmask'))
save_volume(src_mask_file, src_mask)
# run the main ANTS software: here we directly build the command line call
reg = 'antsRegistration --collapse-output-transforms 1 --dimensionality 3' \
+' --initialize-transforms-per-stage 0 --interpolation Linear'
# add a prefix to avoid multiple names?
prefix = _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_syn')
prefix = os.path.basename(prefix)
prefix = prefix.split(".")[0]
#reg.inputs.output_transform_prefix = prefix
reg = reg+' --output '+prefix
if mask_zero:
reg = reg+' --masks ['+trg_mask_file+', '+src_mask_file+']'
srcfiles = []
trgfiles = []
print("registering "+source.get_filename()+"\n to "+target.get_filename())
srcfiles.append(source.get_filename())
trgfiles.append(target.get_filename())
weight = 1.0/len(srcfiles)
# set parameters for all the different types of transformations
if run_rigid is True:
reg = reg + ' --transform Rigid[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(rigid_iterations)+'x' \
+str(rigid_iterations)+'x'+str(rigid_iterations) \
+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 4.0x2.0x0.0'
reg = reg + ' --shrink-factors 16x4x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_affine is True:
reg = reg + ' --transform Affine[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(affine_iterations)+'x' \
+str(affine_iterations)+'x'+str(affine_iterations) \
+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 4.0x2.0x0.0'
reg = reg + ' --shrink-factors 16x4x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_syn is True:
if regularization == 'Low': syn_param = [0.1, 1.0, 0.0]
elif regularization == 'Medium': syn_param = [0.1, 3.0, 0.0]
elif regularization == 'High': syn_param = [0.2, 4.0, 3.0]
else: syn_param = [0.1, 3.0, 0.0]
reg = reg + ' --transform SyN'+str(syn_param)
if (cost_function=='Demons'):
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric Demons['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 4, Random, 0.3 ]'
else:
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric MeanSquares['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 0, Random, 0.3 ]'
reg = reg + ' --convergence ['+str(coarse_iterations)+'x' \
+str(coarse_iterations)+'x'+str(medium_iterations)+'x' \
+str(medium_iterations)+'x' \
+str(fine_iterations)+', '+str(convergence)+', 5 ]'
reg = reg + ' --smoothing-sigmas 9.0x6.0x3.0x1.0x0.0'
reg = reg + ' --shrink-factors 16x8x4x2x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
if run_rigid is False and run_affine is False and run_syn is False:
reg = reg + ' --transform Rigid[0.1]'
for idx,img in enumerate(srcfiles):
reg = reg + ' --metric CC['+trgfiles[idx]+', '+srcfiles[idx] \
+', '+'{:.3f}'.format(weight)+', 5, Random, 0.3 ]'
reg = reg + ' --convergence [ 0x0x0, 1.0, 2 ]'
reg = reg + ' --smoothing-sigmas 3.0x2.0x1.0'
reg = reg + ' --shrink-factors 4x2x1'
reg = reg + ' --use-histogram-matching 0'
#reg = reg + ' --winsorize-image-intensities [ 0.001, 0.999 ]'
reg = reg + ' --write-composite-transform 0'
# run the ANTs command directly
print(reg)
try:
subprocess.check_output(reg, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = 'execution failed (error code '+str(e.returncode)+')\n Output: '+str(e.output)
raise subprocess.CalledProcessError(msg)
# output file names
results = sorted(glob(prefix+'*'))
forward = []
flag = []
for res in results:
if res.endswith('GenericAffine.mat'):
forward.append(res)
flag.append(False)
elif res.endswith('Warp.nii.gz') and not res.endswith('InverseWarp.nii.gz'):
forward.append(res)
flag.append(False)
#print('forward transforms: '+str(forward))
inverse = []
linear = []
for res in results[::-1]:
if res.endswith('GenericAffine.mat'):
inverse.append(res)
linear.append(True)
elif res.endswith('InverseWarp.nii.gz'):
inverse.append(res)
linear.append(False)
#print('inverse transforms: '+str(inverse))
#transform input (for checking)
# src_at = 'antsApplyTransforms --dimensionality 3 --input-image-type 3'
# src_at = src_at+' --input '+source.get_filename()
# src_at = src_at+' --reference-image '+target.get_filename()
# src_at = src_at+' --interpolation Linear'
# for idx,transform in enumerate(forward):
# if flag[idx]:
# src_at = src_at+' --transform ['+transform+', 1]'
# else:
# src_at = src_at+' --transform ['+transform+', 0]'
# src_at = src_at+' --output '+mapping_file
#
# print(src_at)
# try:
# subprocess.check_output(src_at, shell=True, stderr=subprocess.STDOUT)
# except subprocess.CalledProcessError as e:
# msg = 'execution failed (error code '+e.returncode+')\n Output: '+e.output
# raise subprocess.CalledProcessError(msg)
# Create forward coordinate mapping
src_coord = numpy.zeros((nsx,nsy,nsz,3))
src_coord[:,:,:,0] = numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsx)),1),2) \
*numpy.ones((1,nsy,1))*numpy.ones((1,1,nsz))
src_coord[:,:,:,1] = numpy.ones((nsx,1,1))*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsy)),0),2) \
*numpy.ones((1,1,nsz))
src_coord[:,:,:,2] = numpy.ones((nsx,1,1))*numpy.ones((1,nsy,1)) \
*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nsz)),0),1)
src_map = nibabel.Nifti1Image(src_coord, source.affine, source.header)
src_map_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_srccoord'))
save_volume(src_map_file, src_map)
src_at = 'antsApplyTransforms --dimensionality 3 --input-image-type 3'
src_at = src_at+' --input '+src_map.get_filename()
src_at = src_at+' --reference-image '+target.get_filename()
src_at = src_at+' --interpolation Linear'
for idx,transform in enumerate(forward):
if flag[idx]:
src_at = src_at+' --transform ['+transform+', 1]'
else:
src_at = src_at+' --transform ['+transform+', 0]'
src_at = src_at+' --output '+mapping_file
print(src_at)
try:
subprocess.check_output(src_at, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = 'execution failed (error code '+e.returncode+')\n Output: '+e.output
raise subprocess.CalledProcessError(msg)
# uncrop if needed
if crop:
orig = load_volume(target_surface)
nx = orig.header.get_data_shape()[X]
ny = orig.header.get_data_shape()[Y]
nz = orig.header.get_data_shape()[Z]
coord = -numpy.ones((nx,ny,nz,3))
mapping = load_volume(mapping_file).get_data()
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 0] = mapping[:,:,:,0] + src_xmin
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 1] = mapping[:,:,:,1] + src_ymin
coord[trg_xmin:trg_xmax+1, trg_ymin:trg_ymax+1, trg_zmin:trg_zmax+1, 2] = mapping[:,:,:,2] + src_zmin
coord_img = nibabel.Nifti1Image(coord, orig.affine, orig.header)
save_volume(mapping_file, coord_img)
# Create backward coordinate mapping
trg_coord = numpy.zeros((ntx,nty,ntz,3))
trg_coord[:,:,:,0] = numpy.expand_dims(numpy.expand_dims(numpy.array(range(ntx)),1),2) \
*numpy.ones((1,nty,1))*numpy.ones((1,1,ntz))
trg_coord[:,:,:,1] = numpy.ones((ntx,1,1))*numpy.expand_dims(numpy.expand_dims(numpy.array(range(nty)),0),2) \
*numpy.ones((1,1,ntz))
trg_coord[:,:,:,2] = numpy.ones((ntx,1,1))*numpy.ones((1,nty,1)) \
*numpy.expand_dims(numpy.expand_dims(numpy.array(range(ntz)),0),1)
trg_map = nibabel.Nifti1Image(trg_coord, target.affine, target.header)
trg_map_file = os.path.join(output_dir, _fname_4saving(module=__name__,file_name=file_name,
rootfile=source_surface,
suffix='tmp_trgcoord'))
save_volume(trg_map_file, trg_map)
trg_at = 'antsApplyTransforms --dimensionality 3 --input-image-type 3'
trg_at = trg_at+' --input '+trg_map.get_filename()
trg_at = trg_at+' --reference-image '+source.get_filename()
trg_at = trg_at+' --interpolation Linear'
for idx,transform in enumerate(inverse):
if linear[idx]:
trg_at = trg_at+' --transform ['+transform+', 1]'
else:
trg_at = trg_at+' --transform ['+transform+', 0]'
trg_at = trg_at+' --output '+inverse_mapping_file
print(trg_at)
try:
subprocess.check_output(trg_at, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
msg = 'execution failed (error code '+e.returncode+')\n Output: '+e.output
raise subprocess.CalledProcessError(msg)
# uncrop if needed
if crop:
orig = load_volume(source_surface)
nx = orig.header.get_data_shape()[X]
ny = orig.header.get_data_shape()[Y]
nz = orig.header.get_data_shape()[Z]
coord = -numpy.ones((nx,ny,nz,3))
mapping = load_volume(inverse_mapping_file).get_data()
coord[src_xmin:src_xmax+1, src_ymin:src_ymax+1, src_zmin:src_zmax+1, 0] = mapping[:,:,:,0] + trg_xmin
coord[src_xmin:src_xmax+1, src_ymin:src_ymax+1, src_zmin:src_zmax+1, 1] = mapping[:,:,:,1] + trg_ymin
coord[src_xmin:src_xmax+1, src_ymin:src_ymax+1, src_zmin:src_zmax+1, 2] = mapping[:,:,:,2] + trg_zmin
coord_img = nibabel.Nifti1Image(coord, orig.affine, orig.header)
save_volume(inverse_mapping_file, coord_img)
# clean-up intermediate files
# if os.path.exists(src_map_file): os.remove(src_map_file)
# if os.path.exists(trg_map_file): os.remove(trg_map_file)
# if os.path.exists(src_img_file): os.remove(src_img_file)
# if os.path.exists(trg_img_file): os.remove(trg_img_file)
# if mask_zero:
# if os.path.exists(src_mask_file): os.remove(src_mask_file)
# if os.path.exists(trg_mask_file): os.remove(trg_mask_file)
for name in forward:
if os.path.exists(name): os.remove(name)
for name in inverse:
if os.path.exists(name): os.remove(name)
# if ignoring header and/or affine, must paste back the correct headers
if ignore_affine or ignore_header:
mapping = load_volume(mapping_file)
save_volume(mapping_file, nibabel.Nifti1Image(mapping.get_data(), orig_trg_aff, orig_trg_hdr))
inverse = load_volume(inverse_mapping_file)
save_volume(inverse_mapping_file, nibabel.Nifti1Image(inverse.get_data(), orig_src_aff, orig_src_hdr))
if not save_data:
# collect saved outputs
output = {'mapping': load_volume(mapping_file),
'inverse': load_volume(inverse_mapping_file)}
# remove output files if *not* saved
if os.path.exists(mapping_file): os.remove(mapping_file)
if os.path.exists(inverse_mapping_file): os.remove(inverse_mapping_file)
return output
else:
# collect saved outputs
output = {'mapping': mapping_file,
'inverse': inverse_mapping_file}
return output
|
|
#
# topology_writer_json - writes a topology in JSON format
#
# Copyright (c) 2014 Benocs GmbH
#
# author: Robert Wuttke <robert@benocs.com>
#
# See the LICENSE file included in this distribution.
#
import json
from core.misc.netid import NetIDSubnetMap
from coretopogen.topology_readwriter import TopologyWriter
class TopologyWriterJson(TopologyWriter):
FILE_EXTENSION = 'json'
default_options_global = {
# show interface names in core-gui
'interface_names': 'no',
# show ipv4 addresses in core-gui
'ip_addresses': 'no',
# show ipv6 addresses in core-gui
'ipv6_addresses': 'no',
# show node labels in core-gui
'node_labels': 'no',
# show link labels in core-gui
'link_labels': 'no',
# show api-messages in core-gui
'show_api': 'no',
# show background images in core-gui
'background_images': 'no',
# show annotations in core-gui
'annotations': 'no',
# show grid in core-gui
'grid': 'no',
# not used. we have our own traffic generators
'traffic_start': 0,
}
default_options_session = {
'controlnet': '192.168.128.0/17',
'enablerj45': 1,
'enablesdt': 0,
'preservedir': 0,
}
default_canvas = {'name': '{Canvas1}'}
# key: topology key - value: json key
link_properties_map = {
'bw': 'bandwidth',
'delay': 'delay',
}
link_properties_factor_map = {
'bw': 1,
'delay': 1,
}
@staticmethod
def __get_option_global__(options=None):
if options is None:
options = TopologyWriterJson.default_options_global
cfg = {'option_global': {}}
for k, v in options.items():
cfg['option_global'][str(k)] = str(v)
return cfg
@staticmethod
def __get_option_session__(options=None):
if options is None:
options = TopologyWriterJson.default_options_session
cfg = {'option_session': {}}
for k, v in options.items():
cfg['option_session'][str(k)] = str(v)
return cfg
@staticmethod
def __get_canvas__(canvas=None):
if canvas is None:
canvas = TopologyWriterJson.default_canvas
cfg = {'c1': {}}
for k, v in canvas.items():
cfg['c1'][str(k)] = str(v)
return cfg
@staticmethod
def __get_links__(links=None):
if links is None:
raise ValueError('refusing to write a topology without any links')
if not isinstance(links, list):
raise ValueError('links needs to be a list')
if len(links) == 0:
raise ValueError('refusing to write a topology without any links')
cfgdict = {}
link_count = 1
for link in links:
cfgdict[link_count] = {}
for k, v in link.properties.items():
# do needed translations and only write known properties
if k in TopologyWriterJson.link_properties_map:
k = TopologyWriterJson.link_properties_map[k]
if k in TopologyWriterJson.link_properties_factor_map:
v = v * TopologyWriterJson.link_properties_factor_map[k]
cfgdict[link_count][str(k)] = str(v)
cfgdict[link_count]['nodes'] = [node[0].nodeid for node in link.nodes]
link_count += 1
return cfgdict
@staticmethod
def __get_node_networkconfig__(node):
cfgdict = {}
cfgdict['hostname'] = node.get_name()
cfgdict['interfaces'] = {}
for intf in node.get_interfaces():
cfgdict['interfaces'][intf.get_name()] = {}
cfgdict['interfaces'][intf.get_name()]['ipv4'] = []
for ipv4_addr in intf.get_ipv4_addresses():
cfgdict['interfaces'][intf.get_name()]['ipv4'].append(
'%s/%s' % (str(ipv4_addr),
str(ipv4_addr.get_prefixlen())))
cfgdict['interfaces'][intf.get_name()]['ipv6'] = []
for ipv6_addr in intf.get_ipv6_addresses():
cfgdict['interfaces'][intf.get_name()]['ipv6'].append(
'%s/%s' % (str(ipv6_addr),
str(ipv6_addr.get_prefixlen())))
return cfgdict
@staticmethod
def __get_node_links__(localnode, global_links):
cfglist = []
for link in global_links:
# check whether this node is part of the link
# if not, goto fail; err next link
if not localnode in [linknode for linknode, linkinterface in link.nodes]:
continue
# find local interface
localinterface = None
for linknode, linkinterface in link.nodes:
if linknode == localnode:
localinterface = linkinterface
break
if localinterface is None:
raise ValueError(('something is seriously wrong. in this link, '
'i found our node but not our interface'))
for remotenode, remoteinterface in link.nodes:
# don't create a link to ourself
if localnode == remotenode:
continue
cfglist.append((localinterface.get_name(), remotenode.nodeid))
return cfglist
@staticmethod
def __get_netid_subnet_map__(topology=None):
if topology is None:
raise ValueError('refusing to write a topology without any topology')
sessionid = -1
if not sessionid in NetIDSubnetMap.__mapping__:
raise ValueError(('sessionid: "%s" not found in NetIDSubnetMap' %
str(sessionid)))
subnetmap = {}
for ipfam in 4, 6:
subnetmap[ipfam] = []
for subnet, netid in NetIDSubnetMap.__mapping__[sessionid][ipfam].items():
subnetmap[ipfam].append((netid, subnet))
return subnetmap
@staticmethod
def __get_nodes__(nodes=None, links=None):
if nodes is None:
raise ValueError('refusing to write a topology without any nodes')
if not isinstance(nodes, dict):
raise ValueError('nodes needs to be a dict')
if len(nodes) == 0:
raise ValueError('refusing to write a topology without any nodes')
if links is None:
raise ValueError('refusing to write a topology without any links')
if not isinstance(links, list):
raise ValueError('links needs to be a list')
if len(links) == 0:
raise ValueError('refusing to write a topology without any links')
nodesdict = {}
for node in nodes.values():
nodesdict[node.nodeid] = {}
if node.get_state():
nodesdict[node.nodeid]['state'] = 'on'
else:
nodesdict[node.nodeid]['state'] = 'off'
nodesdict[node.nodeid]['network_cfg'] = TopologyWriterJson.__get_node_networkconfig__(node)
nodesdict[node.nodeid]['netid'] = node.get_asn()
nodesdict[node.nodeid]['type'] = node.get_type()
nodesdict[node.nodeid]['model'] = node.get_model()
nodesdict[node.nodeid]['canvas'] = 'canvas c1'
node_pos = node.get_position()
nodesdict[node.nodeid]['pos'] = '%d.0 %d.0' % node_pos
# put the label 40px below the node
nodesdict[node.nodeid]['label_pos'] = '%d.0 %d.0' % (node_pos[0], node_pos[1] + 40)
nodesdict[node.nodeid]['links'] = TopologyWriterJson.__get_node_links__(node, links)
return nodesdict
@staticmethod
def write(filename=None, topology=None):
if topology is None:
raise ValueError('topology is None')
if filename is None:
raise ValueError('filename is None')
filename = '%s.%s' % (filename, TopologyWriterJson.FILE_EXTENSION)
topologydict = {}
with open(filename, 'w') as fd:
topologydict['netid_subnet_map'] = TopologyWriterJson.__get_netid_subnet_map__(topology)
topologydict['nodes'] = TopologyWriterJson.__get_nodes__(topology.get_nodes(),
topology.get_links())
topologydict['links'] = TopologyWriterJson.__get_links__(topology.get_links())
topologydict['canvas'] = TopologyWriterJson.__get_canvas__()
topologydict['global'] = TopologyWriterJson.__get_option_global__()
topologydict['session'] = TopologyWriterJson.__get_option_session__()
fd.write(json.dumps(topologydict, sort_keys=True, indent=4))
return True
|
|
import os
import xml.dom.minidom
import pycmark.cmarkgfm as cmark
import io
from ..ast.DocumentTree import DocumentTree
from .CodeHighlighter import highlight, HIGHLIGHT_LANGUAGES, highlight_css
from .Katex import processLatex, escapeUnicode
from .AssetLoader import loadAsset, loadBinaryAsset, ROOT
################################################################################
def toStyledHTML(txt, withIndex=False):
# generate a LatexDocument with [toc] entries converted to something that won't get wrapped
doc = cmark.parse(txt.replace('[TOC]', '<toc/>').replace('[toc]', '<toc/>'))
# hierarchical document tree
dt = DocumentTree.fromAst(doc.toAST())
# generate html and wrap in a dom object
dom = xml.dom.minidom.parseString('<body>' + doc.toHTML() + '</body>')
# identify <pre><code> elements
pre_code = []
for pre in dom.getElementsByTagName('pre'):
if pre.hasChildNodes() and pre.firstChild.tagName == 'code':
if 'class' in pre.firstChild.attributes.keys():
language = pre.firstChild.attributes['class'].value.replace('language-', '').lower()
if language == 'text':
continue
language = {'cpp': 'c++', 'docker': 'dockerfile'}.get(language, language)
assert language in HIGHLIGHT_LANGUAGES
else:
language = None
pre_code.append((pre, language, pre.firstChild.firstChild.nodeValue))
# perform syntax highlighting on <pre><code> elements
syn = highlight([(lang, src) for _, lang, src in pre_code])
for (pre, lang, _), src in zip(pre_code, syn):
code = xml.dom.minidom.parseString(src)
pre.replaceChild(code.firstChild, pre.firstChild)
# escape special characters in html
def escapeHTML(txt):
return txt.replace('&', '&').replace('<', '<').replace('>', '>')
# generate a table of contents
def toOL(entries, ordered=True):
out = ['<ol>' if ordered else '<ul>']
for entry in entries:
if len(entry.Children) == 0:
out.append('<li><a href="#%s">%s</a></li>' % (entry.ID, escapeHTML(entry.title)))
else:
out += ['<li><a href="#%s">%s</a>' % (entry.ID, escapeHTML(entry.title))] \
+ toOL(entry.Children, ordered) + ['</li>']
return out + ['</ol>' if ordered else '</ul>']
toc_list = xml.dom.minidom.parseString(TOC.format(toc='\n'.join(toOL(dt.Children)))).firstChild
# replace <toc/> entries with generated table of contents
for t in dom.getElementsByTagName('toc'):
t.parentNode.replaceChild(toc_list, t)
# add anchors to headings
headings = [el for el in dom.firstChild.childNodes
if isinstance(el, xml.dom.minidom.Element) and el.tagName in {'h2', 'h3', 'h4', 'h5', 'h6'}]
elements = [el for el in dt.walk() if el.isHeading()]
assert len(headings) == len(elements)
for h, e in zip(headings, elements):
h.attributes['id'] = e.ID
# style tables
for t in dom.getElementsByTagName('table'):
t.attributes['class'] = 'table table-striped table-hover table-condensed'
for t in dom.getElementsByTagName('thead'):
t.attributes['class'] = 'btn-primary'
# open hyperlinks in a new tab
for a in dom.getElementsByTagName('a'):
if 'href' in a.attributes.keys() and not a.attributes['href'].value.startswith('#'):
a.attributes['target'] = '_blank'
# create bootstrap alert boxes
for p in dom.getElementsByTagName('p'):
if isinstance(p.firstChild, xml.dom.minidom.Text):
if p.firstChild.nodeValue.startswith('NOTE:'):
p.attributes['class'] = 'alert alert-info'
elif p.firstChild.nodeValue.startswith('WARNING:'):
p.attributes['class'] = 'alert alert-warning'
# identify <latex> elements
latex = [(el, el.firstChild.nodeValue, el.attributes['class'].nodeValue == 'block')
for el in dom.getElementsByTagName('latex')]
# render latex equations
latexMap = {}
if len(latex) > 0:
rendered = processLatex([(src, blk) for _, src, blk in latex])
for (el, oldsrc, _), newsrc in zip(latex, rendered):
key = 'latex_{}'.format(len(latexMap))
el.replaceChild(dom.createTextNode(key), el.firstChild)
latexMap[key] = newsrc
# convert DOM back into text. escape unicode characters.
body = escapeUnicode(dom.firstChild.toxml().replace('<body>', '').replace('</body>', ''))
# replace any latex placeholders with rendered latex
if '<latex' in body:
buf = io.StringIO() # output buffer
a = 0 # initialize processed data pointer
b = body.find('<latex') # find next latex tag
while b >= 0: # while there is a latex tag to process...
b = body.find('>', b) + 1 # jump ahead to end of latex tag
buf.write(body[a:b]) # write unprocessed data up to current location
a = b # advance processed data pointer
b = body.find('</latex', b) # look ahead for closing tag
key = body[a:b] # <latex...>key</latex>
a = b # advance processed data pointer
buf.write(latexMap.get(key, key)) # write latex data in lookup table corresponding to key
b = body.find('<latex', b) # find next latex tag
buf.write(body[a:]) # write remaining html
body = buf.getvalue() # extract data from buffer
buf.close() # close buffer
# add any content-specific assets
jslib = []
if len(pre_code) > 0: # highlighted code needs stylesheet
jslib.append(highlight_css)
if len(latex) > 0: # rendered latex needs stylesheet and fonts
jslib.append("<style type='text/css'>")
for line in loadAsset('node_modules/katex/dist/katex.css', escape=False, indent=4).rstrip().split('\n'):
if 'src: url' in line:
line = line.split(',')[0].strip()
a = line.find('(') + 1
b = line.find(')', a)
encoded = loadBinaryAsset(os.path.join(ROOT, 'node_modules', 'katex', 'dist', line[a:b]))
jslib.append(' '*6 + "src: url(data:font/woff2;base64,%s) format('woff2');" % encoded)
else:
jslib.append(line)
jslib.append("</style>")
# assemble html
return HTML.format(
navlinks='<li class="nav-item"><a class="nav-link" href="index.html">Page Index</a></li>' if withIndex else '',
toc='\n'.join(toOL(dt.Children, ordered=False)).replace('href="', 'href="#" data-href="'),
content=body,
jslib=' \n'.join(jslib)
)
TOC = '''
<details class="toc">
<summary>Table of Contents</summary>
<toc>
{toc}
</toc>
</details>
'''
################################################################################
CSS = loadAsset('style.css') + loadAsset('bootswatch-cosmo-4.3.1.min.css')
JSCORE = loadAsset('jquery-3.3.1.slim.min.js') + loadAsset('bootstrap-4.3.1.min.js')
POSTPROCESS = loadAsset('process-rendered.js')
FAVICON = loadBinaryAsset('favicon.ico')
HTML = '''<!DOCTYPE html>
<html lang="en">
<head>
<style type='text/css'>
{css}
</style>
<link rel="shortcut icon"type="image/x-icon" href="data:image/x-icon;base64,{favicon}"/>
</head>
<body>
<!-- MODAL HELP DIALOG -->
<div class="modal fade" id="helpDialog" tabindex="-1" role="dialog" aria-labelledby="helpDialogLabel" aria-hidden="true">
<div class="modal-dialog modal-dialog-centered" role="document">
<div class="modal-content">
<div class="modal-header bg-primary">
<h5 class="modal-title" id="helpDialogLabel" style="color: #fff">Usage</h5>
<button type="button" class="close" data-dismiss="modal" aria-label="Close">
<span aria-hidden="true" style="color: #fff">×</span>
</button>
</div>
<div class="modal-body">
<h5>Navigation Bar</h5>
<ul>
<li>Click on hamburger menu (or slide from right on iPhone) to toggle navigation bar</li>
<li>Current location is highlighted red in navigation</li>
<li>Click on a node's bullet to expand/collapse the node</li>
<li>Double click on a node's bullet to expand entire tree</li>
<li>Click on a node's text jump to the associated section</li>
</ul>
<h5>Map Mode</h5>
<ul>
<li>Click on "Map Mode" in toolbar to toggle map mode</li>
<li>Clicking on node text in the navigation bar filters text to that section</li>
<li>Clicking on "Show All" in toolbar shows entire document</li>
</ul>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-dismiss="modal">Close</button>
</div>
</div>
</div>
</div>
<!-- CONTENT -->
<div id="container" class="container">
<!-- NAVIGATION BAR -->
<nav class="navbar navbar-expand-md navbar-dark bg-primary">
<a id="navbar-title" class='navbar-brand' href='#'>☰ <span>Page Title</span></a>
<div id="navbar" class="navbar-collapse collapse">
<ul class="navbar-nav mr-auto">
<li class="nav-item"><a class="nav-link" id="map-mode-toggle" href="#">Map Mode <span class="sr-only">(current)</span></a></li>
<li class="nav-item"><a class="nav-link" id="map-show-all" href="#">Show All</a></li>
{{navlinks}}
<li class="nav-item"><a class="nav-link" id="map-mode-help" data-toggle="modal" data-target="#helpDialog" href="#">Help</a></li>
</ul>
</div>
</nav>
<div id="row" class="row">
<div id="markdown-toc" class="hidden-print col-md-auto">
{{toc}}
</div> <!-- markdown-toc -->
<div id="markdown-container" class="col">
{{content}}
</div> <!-- markdown-container -->
</div></div>
<!-- JAVASCRIPT LIBRARIES -->
<script type="text/javascript">
{jscore}
</script>
{{jslib}}
<!-- PROCESS RENDERED MARKDOWN -->
<script type="text/javascript">
{postprocess}
</script>
</body>
</html>
'''.format(css=CSS, favicon=FAVICON, jscore=JSCORE, postprocess=POSTPROCESS)
################################################################################
|
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2.0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Video(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, id_product=None, id_video_group=None, id_product_image=None, id_language=None, language_filter=None, id_media_source=None, name=None, description=None, duration=None, filename=None, position=None, subscription=None, free=None, download=None, active=None, date_add=None, date_upd=None, can_watch=None, cover=None, thumbnail=None, geoloc_enabled=None, behavior_detected_countries=None, behavior_non_detected_countries=None, has_free_access=None, advertising_url=None):
"""
Video - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'id_product': 'int',
'id_video_group': 'int',
'id_product_image': 'int',
'id_language': 'int',
'language_filter': 'int',
'id_media_source': 'int',
'name': 'list[I18nField]',
'description': 'list[I18nField]',
'duration': 'int',
'filename': 'str',
'position': 'int',
'subscription': 'int',
'free': 'int',
'download': 'int',
'active': 'bool',
'date_add': 'str',
'date_upd': 'str',
'can_watch': 'bool',
'cover': 'str',
'thumbnail': 'str',
'geoloc_enabled': 'bool',
'behavior_detected_countries': 'str',
'behavior_non_detected_countries': 'str',
'has_free_access': 'VideoFreeAccess',
'advertising_url': 'str'
}
self.attribute_map = {
'id': 'id',
'id_product': 'id_product',
'id_video_group': 'id_video_group',
'id_product_image': 'id_product_image',
'id_language': 'id_language',
'language_filter': 'language_filter',
'id_media_source': 'id_media_source',
'name': 'name',
'description': 'description',
'duration': 'duration',
'filename': 'filename',
'position': 'position',
'subscription': 'subscription',
'free': 'free',
'download': 'download',
'active': 'active',
'date_add': 'date_add',
'date_upd': 'date_upd',
'can_watch': 'can_watch',
'cover': 'cover',
'thumbnail': 'thumbnail',
'geoloc_enabled': 'geoloc_enabled',
'behavior_detected_countries': 'behavior_detected_countries',
'behavior_non_detected_countries': 'behavior_non_detected_countries',
'has_free_access': 'has_free_access',
'advertising_url': 'advertising_url'
}
self._id = id
self._id_product = id_product
self._id_video_group = id_video_group
self._id_product_image = id_product_image
self._id_language = id_language
self._language_filter = language_filter
self._id_media_source = id_media_source
self._name = name
self._description = description
self._duration = duration
self._filename = filename
self._position = position
self._subscription = subscription
self._free = free
self._download = download
self._active = active
self._date_add = date_add
self._date_upd = date_upd
self._can_watch = can_watch
self._cover = cover
self._thumbnail = thumbnail
self._geoloc_enabled = geoloc_enabled
self._behavior_detected_countries = behavior_detected_countries
self._behavior_non_detected_countries = behavior_non_detected_countries
self._has_free_access = has_free_access
self._advertising_url = advertising_url
@property
def id(self):
"""
Gets the id of this Video.
:return: The id of this Video.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Video.
:param id: The id of this Video.
:type: int
"""
self._id = id
@property
def id_product(self):
"""
Gets the id_product of this Video.
:return: The id_product of this Video.
:rtype: int
"""
return self._id_product
@id_product.setter
def id_product(self, id_product):
"""
Sets the id_product of this Video.
:param id_product: The id_product of this Video.
:type: int
"""
self._id_product = id_product
@property
def id_video_group(self):
"""
Gets the id_video_group of this Video.
:return: The id_video_group of this Video.
:rtype: int
"""
return self._id_video_group
@id_video_group.setter
def id_video_group(self, id_video_group):
"""
Sets the id_video_group of this Video.
:param id_video_group: The id_video_group of this Video.
:type: int
"""
self._id_video_group = id_video_group
@property
def id_product_image(self):
"""
Gets the id_product_image of this Video.
:return: The id_product_image of this Video.
:rtype: int
"""
return self._id_product_image
@id_product_image.setter
def id_product_image(self, id_product_image):
"""
Sets the id_product_image of this Video.
:param id_product_image: The id_product_image of this Video.
:type: int
"""
self._id_product_image = id_product_image
@property
def id_language(self):
"""
Gets the id_language of this Video.
:return: The id_language of this Video.
:rtype: int
"""
return self._id_language
@id_language.setter
def id_language(self, id_language):
"""
Sets the id_language of this Video.
:param id_language: The id_language of this Video.
:type: int
"""
self._id_language = id_language
@property
def language_filter(self):
"""
Gets the language_filter of this Video.
:return: The language_filter of this Video.
:rtype: int
"""
return self._language_filter
@language_filter.setter
def language_filter(self, language_filter):
"""
Sets the language_filter of this Video.
:param language_filter: The language_filter of this Video.
:type: int
"""
self._language_filter = language_filter
@property
def id_media_source(self):
"""
Gets the id_media_source of this Video.
:return: The id_media_source of this Video.
:rtype: int
"""
return self._id_media_source
@id_media_source.setter
def id_media_source(self, id_media_source):
"""
Sets the id_media_source of this Video.
:param id_media_source: The id_media_source of this Video.
:type: int
"""
self._id_media_source = id_media_source
@property
def name(self):
"""
Gets the name of this Video.
:return: The name of this Video.
:rtype: list[I18nField]
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Video.
:param name: The name of this Video.
:type: list[I18nField]
"""
self._name = name
@property
def description(self):
"""
Gets the description of this Video.
:return: The description of this Video.
:rtype: list[I18nField]
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Video.
:param description: The description of this Video.
:type: list[I18nField]
"""
self._description = description
@property
def duration(self):
"""
Gets the duration of this Video.
:return: The duration of this Video.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this Video.
:param duration: The duration of this Video.
:type: int
"""
self._duration = duration
@property
def filename(self):
"""
Gets the filename of this Video.
:return: The filename of this Video.
:rtype: str
"""
return self._filename
@filename.setter
def filename(self, filename):
"""
Sets the filename of this Video.
:param filename: The filename of this Video.
:type: str
"""
self._filename = filename
@property
def position(self):
"""
Gets the position of this Video.
:return: The position of this Video.
:rtype: int
"""
return self._position
@position.setter
def position(self, position):
"""
Sets the position of this Video.
:param position: The position of this Video.
:type: int
"""
self._position = position
@property
def subscription(self):
"""
Gets the subscription of this Video.
:return: The subscription of this Video.
:rtype: int
"""
return self._subscription
@subscription.setter
def subscription(self, subscription):
"""
Sets the subscription of this Video.
:param subscription: The subscription of this Video.
:type: int
"""
self._subscription = subscription
@property
def free(self):
"""
Gets the free of this Video.
:return: The free of this Video.
:rtype: int
"""
return self._free
@free.setter
def free(self, free):
"""
Sets the free of this Video.
:param free: The free of this Video.
:type: int
"""
self._free = free
@property
def download(self):
"""
Gets the download of this Video.
:return: The download of this Video.
:rtype: int
"""
return self._download
@download.setter
def download(self, download):
"""
Sets the download of this Video.
:param download: The download of this Video.
:type: int
"""
self._download = download
@property
def active(self):
"""
Gets the active of this Video.
:return: The active of this Video.
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this Video.
:param active: The active of this Video.
:type: bool
"""
self._active = active
@property
def date_add(self):
"""
Gets the date_add of this Video.
:return: The date_add of this Video.
:rtype: str
"""
return self._date_add
@date_add.setter
def date_add(self, date_add):
"""
Sets the date_add of this Video.
:param date_add: The date_add of this Video.
:type: str
"""
self._date_add = date_add
@property
def date_upd(self):
"""
Gets the date_upd of this Video.
:return: The date_upd of this Video.
:rtype: str
"""
return self._date_upd
@date_upd.setter
def date_upd(self, date_upd):
"""
Sets the date_upd of this Video.
:param date_upd: The date_upd of this Video.
:type: str
"""
self._date_upd = date_upd
@property
def can_watch(self):
"""
Gets the can_watch of this Video.
:return: The can_watch of this Video.
:rtype: bool
"""
return self._can_watch
@can_watch.setter
def can_watch(self, can_watch):
"""
Sets the can_watch of this Video.
:param can_watch: The can_watch of this Video.
:type: bool
"""
self._can_watch = can_watch
@property
def cover(self):
"""
Gets the cover of this Video.
:return: The cover of this Video.
:rtype: str
"""
return self._cover
@cover.setter
def cover(self, cover):
"""
Sets the cover of this Video.
:param cover: The cover of this Video.
:type: str
"""
self._cover = cover
@property
def thumbnail(self):
"""
Gets the thumbnail of this Video.
:return: The thumbnail of this Video.
:rtype: str
"""
return self._thumbnail
@thumbnail.setter
def thumbnail(self, thumbnail):
"""
Sets the thumbnail of this Video.
:param thumbnail: The thumbnail of this Video.
:type: str
"""
self._thumbnail = thumbnail
@property
def geoloc_enabled(self):
"""
Gets the geoloc_enabled of this Video.
:return: The geoloc_enabled of this Video.
:rtype: bool
"""
return self._geoloc_enabled
@geoloc_enabled.setter
def geoloc_enabled(self, geoloc_enabled):
"""
Sets the geoloc_enabled of this Video.
:param geoloc_enabled: The geoloc_enabled of this Video.
:type: bool
"""
self._geoloc_enabled = geoloc_enabled
@property
def behavior_detected_countries(self):
"""
Gets the behavior_detected_countries of this Video.
:return: The behavior_detected_countries of this Video.
:rtype: str
"""
return self._behavior_detected_countries
@behavior_detected_countries.setter
def behavior_detected_countries(self, behavior_detected_countries):
"""
Sets the behavior_detected_countries of this Video.
:param behavior_detected_countries: The behavior_detected_countries of this Video.
:type: str
"""
self._behavior_detected_countries = behavior_detected_countries
@property
def behavior_non_detected_countries(self):
"""
Gets the behavior_non_detected_countries of this Video.
:return: The behavior_non_detected_countries of this Video.
:rtype: str
"""
return self._behavior_non_detected_countries
@behavior_non_detected_countries.setter
def behavior_non_detected_countries(self, behavior_non_detected_countries):
"""
Sets the behavior_non_detected_countries of this Video.
:param behavior_non_detected_countries: The behavior_non_detected_countries of this Video.
:type: str
"""
self._behavior_non_detected_countries = behavior_non_detected_countries
@property
def has_free_access(self):
"""
Gets the has_free_access of this Video.
:return: The has_free_access of this Video.
:rtype: VideoFreeAccess
"""
return self._has_free_access
@has_free_access.setter
def has_free_access(self, has_free_access):
"""
Sets the has_free_access of this Video.
:param has_free_access: The has_free_access of this Video.
:type: VideoFreeAccess
"""
self._has_free_access = has_free_access
@property
def advertising_url(self):
"""
Gets the advertising_url of this Video.
:return: The advertising_url of this Video.
:rtype: str
"""
return self._advertising_url
@advertising_url.setter
def advertising_url(self, advertising_url):
"""
Sets the advertising_url of this Video.
:param advertising_url: The advertising_url of this Video.
:type: str
"""
self._advertising_url = advertising_url
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
"""Support for a Hue API to control Home Assistant."""
import logging
from aiohttp import web
from homeassistant import core
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_VOLUME_SET, SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, STATE_ON,
STATE_OFF, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, ATTR_SUPPORTED_FEATURES
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS, SUPPORT_BRIGHTNESS
)
from homeassistant.components.climate.const import (
SERVICE_SET_TEMPERATURE, SUPPORT_TARGET_TEMPERATURE
)
from homeassistant.components.media_player.const import (
ATTR_MEDIA_VOLUME_LEVEL, SUPPORT_VOLUME_SET,
)
from homeassistant.components.fan import (
ATTR_SPEED, SUPPORT_SET_SPEED, SPEED_OFF, SPEED_LOW,
SPEED_MEDIUM, SPEED_HIGH
)
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION, ATTR_POSITION, SERVICE_SET_COVER_POSITION,
SUPPORT_SET_POSITION
)
from homeassistant.components import (
climate, cover, fan, media_player, light, script, scene
)
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.const import KEY_REAL_IP
from homeassistant.util.network import is_local
_LOGGER = logging.getLogger(__name__)
HUE_API_STATE_ON = 'on'
HUE_API_STATE_BRI = 'bri'
class HueUsernameView(HomeAssistantView):
"""Handle requests to create a username for the emulated hue bridge."""
url = '/api'
name = 'emulated_hue:api:create_username'
extra_urls = ['/api/']
requires_auth = False
async def post(self, request):
"""Handle a POST request."""
try:
data = await request.json()
except ValueError:
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
if 'devicetype' not in data:
return self.json_message('devicetype not specified',
HTTP_BAD_REQUEST)
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{'success': {'username': '12345678901234567890'}}])
class HueAllGroupsStateView(HomeAssistantView):
"""Group handler."""
url = '/api/{username}/groups'
name = 'emulated_hue:all_groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to make the Brilliant Lightpad work."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json({
})
class HueGroupView(HomeAssistantView):
"""Group handler to get Logitech Pop working."""
url = '/api/{username}/groups/0/action'
name = 'emulated_hue:groups:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def put(self, request, username):
"""Process a request to make the Logitech Pop working."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
return self.json([{
'error': {
'address': '/groups/0/action/scene',
'type': 7,
'description': 'invalid value, dummy for parameter, scene'
}
}])
class HueAllLightsStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights'
name = 'emulated_hue:lights:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username):
"""Process a request to get the list of available lights."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
json_response = {}
for entity in hass.states.async_all():
if self.config.is_entity_exposed(entity):
state, brightness = get_entity_state(self.config, entity)
number = self.config.entity_id_to_number(entity.entity_id)
json_response[number] = entity_to_json(
self.config, entity, state, brightness)
return self.json(json_response)
class HueOneLightStateView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_id}'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request, username, entity_id):
"""Process a request to get the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
hass = request.app['hass']
entity_id = self.config.number_to_entity_id(entity_id)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return web.Response(text="Entity not found", status=404)
if not self.config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
state, brightness = get_entity_state(self.config, entity)
json_response = entity_to_json(self.config, entity, state, brightness)
return self.json(json_response)
class HueOneLightChangeView(HomeAssistantView):
"""Handle requests for getting and setting info about entities."""
url = '/api/{username}/lights/{entity_number}/state'
name = 'emulated_hue:light:state'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
async def put(self, request, username, entity_number):
"""Process a request to set the state of an individual light."""
if not is_local(request[KEY_REAL_IP]):
return self.json_message('only local IPs allowed',
HTTP_BAD_REQUEST)
config = self.config
hass = request.app['hass']
entity_id = config.number_to_entity_id(entity_number)
if entity_id is None:
_LOGGER.error('Unknown entity number: %s', entity_number)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
entity = hass.states.get(entity_id)
if entity is None:
_LOGGER.error('Entity not found: %s', entity_id)
return self.json_message('Entity not found', HTTP_NOT_FOUND)
if not config.is_entity_exposed(entity):
_LOGGER.error('Entity not exposed: %s', entity_id)
return web.Response(text="Entity not exposed", status=404)
try:
request_json = await request.json()
except ValueError:
_LOGGER.error('Received invalid json')
return self.json_message('Invalid JSON', HTTP_BAD_REQUEST)
# Parse the request into requested "on" status and brightness
parsed = parse_hue_api_put_light_body(request_json, entity)
if parsed is None:
_LOGGER.error('Unable to parse data: %s', request_json)
return web.Response(text="Bad request", status=400)
result, brightness = parsed
# Choose general HA domain
domain = core.DOMAIN
# Entity needs separate call to turn on
turn_on_needed = False
# Convert the resulting "on" status into the service we need to call
service = SERVICE_TURN_ON if result else SERVICE_TURN_OFF
# Construct what we need to send to the service
data = {ATTR_ENTITY_ID: entity_id}
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if entity_features & SUPPORT_BRIGHTNESS:
if brightness is not None:
data[ATTR_BRIGHTNESS] = brightness
# If the requested entity is a script add some variables
elif entity.domain == script.DOMAIN:
data['variables'] = {
'requested_state': STATE_ON if result else STATE_OFF
}
if brightness is not None:
data['variables']['requested_level'] = brightness
# If the requested entity is a climate, set the temperature
elif entity.domain == climate.DOMAIN:
# We don't support turning climate devices on or off,
# only setting the temperature
service = None
if entity_features & SUPPORT_TARGET_TEMPERATURE:
if brightness is not None:
domain = entity.domain
service = SERVICE_SET_TEMPERATURE
data[ATTR_TEMPERATURE] = brightness
# If the requested entity is a media player, convert to volume
elif entity.domain == media_player.DOMAIN:
if entity_features & SUPPORT_VOLUME_SET:
if brightness is not None:
turn_on_needed = True
domain = entity.domain
service = SERVICE_VOLUME_SET
# Convert 0-100 to 0.0-1.0
data[ATTR_MEDIA_VOLUME_LEVEL] = brightness / 100.0
# If the requested entity is a cover, convert to open_cover/close_cover
elif entity.domain == cover.DOMAIN:
domain = entity.domain
if service == SERVICE_TURN_ON:
service = SERVICE_OPEN_COVER
else:
service = SERVICE_CLOSE_COVER
if entity_features & SUPPORT_SET_POSITION:
if brightness is not None:
domain = entity.domain
service = SERVICE_SET_COVER_POSITION
data[ATTR_POSITION] = brightness
# If the requested entity is a fan, convert to speed
elif entity.domain == fan.DOMAIN:
if entity_features & SUPPORT_SET_SPEED:
if brightness is not None:
domain = entity.domain
# Convert 0-100 to a fan speed
if brightness == 0:
data[ATTR_SPEED] = SPEED_OFF
elif 0 < brightness <= 33.3:
data[ATTR_SPEED] = SPEED_LOW
elif 33.3 < brightness <= 66.6:
data[ATTR_SPEED] = SPEED_MEDIUM
elif 66.6 < brightness <= 100:
data[ATTR_SPEED] = SPEED_HIGH
if entity.domain in config.off_maps_to_on_domains:
# Map the off command to on
service = SERVICE_TURN_ON
# Caching is required because things like scripts and scenes won't
# report as "off" to Alexa if an "off" command is received, because
# they'll map to "on". Thus, instead of reporting its actual
# status, we report what Alexa will want to see, which is the same
# as the actual requested command.
config.cached_states[entity_id] = (result, brightness)
# Separate call to turn on needed
if turn_on_needed:
hass.async_create_task(hass.services.async_call(
core.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: entity_id},
blocking=True))
if service is not None:
hass.async_create_task(hass.services.async_call(
domain, service, data, blocking=True))
json_response = \
[create_hue_success_response(entity_id, HUE_API_STATE_ON, result)]
if brightness is not None:
json_response.append(create_hue_success_response(
entity_id, HUE_API_STATE_BRI, brightness))
return self.json(json_response)
def parse_hue_api_put_light_body(request_json, entity):
"""Parse the body of a request to change the state of a light."""
if HUE_API_STATE_ON in request_json:
if not isinstance(request_json[HUE_API_STATE_ON], bool):
return None
if request_json['on']:
# Echo requested device be turned on
brightness = None
report_brightness = False
result = True
else:
# Echo requested device be turned off
brightness = None
report_brightness = False
result = False
if HUE_API_STATE_BRI in request_json:
try:
# Clamp brightness from 0 to 255
brightness = \
max(0, min(int(request_json[HUE_API_STATE_BRI]), 255))
except ValueError:
return None
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if entity_features & SUPPORT_BRIGHTNESS:
report_brightness = True
result = (brightness > 0)
elif entity.domain == scene.DOMAIN:
brightness = None
report_brightness = False
result = True
elif entity.domain in [
script.DOMAIN, media_player.DOMAIN,
fan.DOMAIN, cover.DOMAIN, climate.DOMAIN]:
# Convert 0-255 to 0-100
level = brightness / 255 * 100
brightness = round(level)
report_brightness = True
result = True
return (result, brightness) if report_brightness else (result, None)
def get_entity_state(config, entity):
"""Retrieve and convert state and brightness values for an entity."""
cached_state = config.cached_states.get(entity.entity_id, None)
if cached_state is None:
final_state = entity.state != STATE_OFF
final_brightness = entity.attributes.get(
ATTR_BRIGHTNESS, 255 if final_state else 0)
# Make sure the entity actually supports brightness
entity_features = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if entity.domain == light.DOMAIN:
if entity_features & SUPPORT_BRIGHTNESS:
pass
elif entity.domain == climate.DOMAIN:
temperature = entity.attributes.get(ATTR_TEMPERATURE, 0)
# Convert 0-100 to 0-255
final_brightness = round(temperature * 255 / 100)
elif entity.domain == media_player.DOMAIN:
level = entity.attributes.get(
ATTR_MEDIA_VOLUME_LEVEL, 1.0 if final_state else 0.0)
# Convert 0.0-1.0 to 0-255
final_brightness = round(min(1.0, level) * 255)
elif entity.domain == fan.DOMAIN:
speed = entity.attributes.get(ATTR_SPEED, 0)
# Convert 0.0-1.0 to 0-255
final_brightness = 0
if speed == SPEED_LOW:
final_brightness = 85
elif speed == SPEED_MEDIUM:
final_brightness = 170
elif speed == SPEED_HIGH:
final_brightness = 255
elif entity.domain == cover.DOMAIN:
level = entity.attributes.get(ATTR_CURRENT_POSITION, 0)
final_brightness = round(level / 100 * 255)
else:
final_state, final_brightness = cached_state
# Make sure brightness is valid
if final_brightness is None:
final_brightness = 255 if final_state else 0
return (final_state, final_brightness)
def entity_to_json(config, entity, is_on=None, brightness=None):
"""Convert an entity to its Hue bridge JSON representation."""
return {
'state':
{
HUE_API_STATE_ON: is_on,
HUE_API_STATE_BRI: brightness,
'reachable': True
},
'type': 'Dimmable light',
'name': config.get_entity_name(entity),
'modelid': 'HASS123',
'uniqueid': entity.entity_id,
'swversion': '123'
}
def create_hue_success_response(entity_id, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = '/lights/{}/state/{}'.format(entity_id, attr)
return {'success': {success_key: value}}
|
|
"""evaluate.py
Script to create a system response for a given gold standard and then compare
the system response to that gold standard.
USAGE:
$ python evaluate.py --run --gold DIR1 --system DIR2 [OPTIONS]
$ python evaluate.py --comp --gold DIR1 --system DIR2 [OPTIONS]
$ python evaluate.py --diff --gold DIR1 --system DIR2 --out DIR3 [OPTIONS]
In the first invocation, the script takes the gold standard files in DIR1 and
for each file creates a system file in DIR2 that does not have the gold standard
tags but the tags generated by the system. In the second invocation, the script
compares the system results to the gold standard and writes precision, recall
and f-score results to the standard output. In the third invocation, html files
showing the difference between files will be written to DIR3.
All files in the gold standard are expected to be TTK files. See the code in
utilities.convert for how to convert to the TTK format.
OPTIONS:
--limit INT
Caps the number of files processed from the directory. If no limit is
given all files will be processed.
--display=CHOICE1,CHOICE2,...
This determines what entities pairs are displayed. By default all entity
pairs from the gold and system tags are displayed: matches, partial
matches, false positives and false negatives. But if the --display option
is used then only the ones listed are displayed. Available choices are:
EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP and NO_MATCH_FN. This option is
only relevant for the third invocation above. Example:
--display=PARTIAL_MATCH,NO_MATCH_FN
With this value only partial matches and false negatives are displayed.
"""
from __future__ import absolute_import
from __future__ import print_function
import os, sys, shutil, copy, getopt
from io import StringIO
from six.moves import range
sys.path.insert(0, '..')
sys.path.insert(0, '.')
from __future__ import division
import tarsqi
from library.main import LIBRARY
# Keep the directory this script was called from for later use (Tarsqi will
# change current directories while processing), also keep the directory of this
# script around.
EXEC_DIR = os.getcwd()
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
EVENT = LIBRARY.timeml.EVENT
TIMEX = LIBRARY.timeml.TIMEX
ALINK = LIBRARY.timeml.ALINK
SLINK = LIBRARY.timeml.SLINK
TLINK = LIBRARY.timeml.TLINK
LINK_TAGS = (ALINK, SLINK, TLINK)
TIMEML_TAGS = (EVENT, TIMEX, ALINK, SLINK, TLINK)
TID = LIBRARY.timeml.TID
EIID = LIBRARY.timeml.EIID
RELTYPE = LIBRARY.timeml.RELTYPE
TIME_ID = LIBRARY.timeml.TIME_ID
EVENT_INSTANCE_ID = LIBRARY.timeml.EVENT_INSTANCE_ID
RELATED_TO_TIME = LIBRARY.timeml.RELATED_TO_TIME
RELATED_TO_EVENT_INSTANCE = LIBRARY.timeml.RELATED_TO_EVENT_INSTANCE
SUBORDINATED_EVENT_INSTANCE = LIBRARY.timeml.SUBORDINATED_EVENT_INSTANCE
# the four kinds of aligned entities
EXACT_MATCH = 'EXACT_MATCH'
PARTIAL_MATCH = 'PARTIAL_MATCH'
NO_MATCH_FP = 'NO_MATCH_FP'
NO_MATCH_FN = 'NO_MATCH_FN'
DISPLAY_CHOICES = [EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP, NO_MATCH_FN]
# style file used for the html display of differences
CSS = """
<style>
div { display: block }
p { margin: 5pt; margin-bottom: 15pt; padding: 5pt; }
table { margin-bottom: 25px; width: 100%; }
table.scores { margin: 10px; margin-bottom: 25px; width: auto; }
.bordered { border: thin dotted black; }
sup.s { color: darkred; font-weight: bold; }
sup.chunk { color: darkblue; font-weight: bold; }
sup.pos { color: darkblue; font-weight: bold; }
sup.lex { color: darkgreen; font-weight: bold; font-size: 60%; }
.bracket { color: darkblue; font-weight: bold; }
.sbracket { color: darkred; font-weight: bold; }
entity { color: darkred; text-decoration: underline; }")
</style>
"""
def create_system_files_from_gold_standard(gold_dir, system_dir, limit):
"""Take the TTK files in gold_dir and create TTK files in system_dir that have
the same text and docelement tags, do not have the other tarsqi tags from
the gold standard and have tags as added by the current system."""
print(system_dir)
if os.path.exists(system_dir):
exit("Error: directory %s already exists" % system_dir)
else:
os.makedirs(system_dir)
# get the absolute paths now because components may change the current directory
gold_dir = os.path.abspath(gold_dir)
system_dir = os.path.abspath(system_dir)
count = 0
for fname in os.listdir(gold_dir):
count += 1
if count > limit:
break
print(fname)
gold_file = os.path.join(gold_dir, fname)
system_file = os.path.join(system_dir, fname)
create_system_file_from_gold_standard(gold_file, system_file)
def create_system_file_from_gold_standard(gold_file, system_file):
"""Take gold_file, a TTK file, and create the TTK file system_file that has
the same text and docelement tags, does not have the other tarsqi tags from
the gold standard and has tags as added by the current system."""
# TODO: need to deal with the fact that with THYME we have a ttk version and
# we use source=ttk, but there really needs to be a metadata parser that
# does works for THYME documents. One option is to have the conversion find
# the DCT.
tarsqi_inst, tarsqidoc = tarsqi.load_ttk_document(gold_file)
# before you reset, keep the docelement tags so that we do not have to rerun
# the document parser
docelement_tags = [t for t in tarsqidoc.tags.all_tags() if t.name == 'docelement']
tarsqidoc.tags.reset()
for tag in docelement_tags:
tarsqidoc.tags.append(tag)
tarsqidoc.tags.index()
for (name, wrapper) in tarsqi_inst.pipeline:
tarsqi_inst._apply_component(name, wrapper, tarsqidoc)
tarsqidoc.print_all(system_file)
def compare_dirs(gold_dir, system_dir, limit=sys.maxsize):
"""Generate the precision, recall and f-score numbers for the directories."""
fstats = []
fnames = _collect_files(gold_dir, system_dir, limit)
for fname in fnames:
print(fname)
fstats.append(
FileStatistics(os.path.join(gold_dir, fname),
os.path.join(system_dir, fname)))
dstats = DirectoryStatistics(system_dir, fstats)
dstats.pp()
def view_differences(gold_dir, system_dir, display_dir, display_choices,
limit=sys.maxsize):
"""Create HTML files that view the differences."""
display_dir = _create_display_dir(display_dir)
fnames = _collect_files(gold_dir, system_dir, limit)
for fname in fnames:
print(fname)
FileStatistics(os.path.join(gold_dir, fname),
os.path.join(system_dir, fname),
display_dir, display_choices)
def _collect_files(gold_dir, system_dir, limit):
"""Return the list of files to run the comparison on."""
gold_files = os.listdir(gold_dir)
system_files = os.listdir(system_dir)
# don't assume the directory content is the same, take the intersection
fnames = sorted(list(set(gold_files).intersection(set(system_files))))
# TODO: includes a hack to avoid a file, get rid of it
fnames = [f for f in fnames[:limit] if not f.endswith('wsj_0907.tml')]
return fnames
def _create_display_dir(display_dir):
"""Create the display directory and initialize it with the icons needed for
the display."""
if display_dir is not None:
if not os.path.isabs(display_dir):
display_dir = os.path.abspath(os.path.join(EXEC_DIR, display_dir))
if os.path.exists(display_dir):
exit("ERROR: directory '%s' already exists" % display_dir)
else:
# setup the output directory
os.makedirs(display_dir)
os.makedirs(os.path.join(display_dir, 'icons'))
icons = ('check-green.png', 'check-orange.png', 'cross-red.png')
for icon in icons:
shutil.copyfile(os.path.join(SCRIPT_DIR, 'icons', icon),
os.path.join(display_dir, 'icons', icon))
return display_dir
def _get_annotations(tag_repository):
"""Return a dictionary of the TimeML annotations in the tag repository."""
# TODO: is there solid motivation to use this instead of TagRepository
# itself?
timeml_tags = (EVENT, TIMEX, ALINK, SLINK, TLINK)
annotations = { tagname: {} for tagname in timeml_tags }
event_idx = {}
timex_idx = {}
for tag in tag_repository.all_tags():
if tag.name == EVENT:
event_idx[tag.attrs[EIID]] = tag
elif tag.name == TIMEX:
timex_idx[tag.attrs[TID]] = tag
for tag in tag_repository.all_tags():
if tag.name in timeml_tags:
offsets = _get_offsets(tag, event_idx, timex_idx)
if offsets is not None:
annotations[tag.name][offsets] = tag.attrs
return annotations
def _get_offsets(tag, event_idx, timex_idx):
"""Get begin and end offsets for the tag. For an event or time, this is a pair
of offsets, for example (13,16). For a link, this is pair of the offsets of
the source and target of the link, for example ((13,16),(24,29))."""
if tag.name in LINK_TAGS:
id1, id1_type = tag.attrs.get(TIME_ID), TIMEX
if id1 is None:
saved = "%s-%s" % (id1, id1_type)
id1, id1_type = tag.attrs.get(EVENT_INSTANCE_ID), EVENT
id2, id2_type = tag.attrs.get(RELATED_TO_TIME), TIMEX
if id2 is None:
id2, id2_type = tag.attrs.get(RELATED_TO_EVENT_INSTANCE), EVENT
if id2 is None:
id2, id2_type = tag.attrs.get(SUBORDINATED_EVENT_INSTANCE), EVENT
offsets = [_retrieve_from_index(id1, id1_type, event_idx, timex_idx),
_retrieve_from_index(id2, id2_type, event_idx, timex_idx)]
if len(offsets) != 2:
_offset_warning("unexpected offsets", tag, offsets)
return None
elif offsets[0][0] is None or offsets[1][0] is None:
_offset_warning("cannot find source and/or target", tag, offsets)
return None
else:
return tuple(offsets)
else:
return (tag.begin, tag.end)
def _retrieve_from_index(identifier, tagtype, event_idx, timex_idx):
idx = event_idx if tagtype == EVENT else timex_idx
try:
return (idx[identifier].begin, idx[identifier].end)
except KeyError:
return (None, None)
def precision(tp, fp):
try:
return (tp / (tp + fp))
except ZeroDivisionError:
return None
def recall(tp, fn):
try:
return tp / (tp + fn)
except ZeroDivisionError:
return None
def fscore(tp, fp, fn):
p = precision(tp, fp)
r = recall(tp, fn)
if p is None or r is None:
return None
try:
return (2 * p * r) / (p + r)
except ZeroDivisionError:
return None
def _as_float_string(f):
"""Takes a floating point number and returns it as a formatted string"""
return "%.2f" % f if f is not None else 'nil'
def _offset_warning(message, tag, offsets):
print("WARNING: %s" % message)
print(" %s" % offsets)
print(" %s" % tag.as_ttk_tag())
def print_annotations(annotations, tag=None):
for tagname in sorted(annotations):
if tag is not None and tag != tagname:
continue
print("\n", tagname)
for offsets in sorted(annotations[tagname]):
attrs = annotations[tagname][offsets].items()
attrs_str = ' '.join(["%s=%s" % (a,v) for a,v in attrs])
print(" %s %s" % (offsets, attrs_str))
class FileStatistics(object):
def __init__(self, gold_file, system_file,
display_dir=None, display_choices=None):
tarsqi_instance, tarsqi_doc = tarsqi.load_ttk_document(gold_file)
self.tarsqidoc_gold = tarsqi_doc
tarsqi_instance, tarsqi_doc = tarsqi.load_ttk_document(system_file)
self.tarsqidoc_system = tarsqi_doc
self.filename = system_file
self.gold = _get_annotations(self.tarsqidoc_gold.tags)
self.system = _get_annotations(self.tarsqidoc_system.tags)
self.events = EntityStatistics(self, EVENT, display_dir, display_choices)
self.timexes = EntityStatistics(self, TIMEX, display_dir, display_choices)
self.alinks = LinkStatistics(self.filename, ALINK, self.gold, self.system)
self.slinks = LinkStatistics(self.filename, SLINK, self.gold, self.system)
self.tlinks = LinkStatistics(self.filename, TLINK, self.gold, self.system)
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (self.events, self.timexes,
self.alinks, self.slinks, self.tlinks)
class DirectoryStatistics(FileStatistics):
def __init__(self, directory, statslist):
self.filename = directory
self.statistics = statslist
self.events = AggregateEntityStatistics(directory, [s.events for s in statslist])
self.timexes = AggregateEntityStatistics(directory, [s.timexes for s in statslist])
self.alinks = AggregateLinkStatistics(directory, [s.alinks for s in statslist])
self.slinks = AggregateLinkStatistics(directory, [s.slinks for s in statslist])
self.tlinks = AggregateLinkStatistics(directory, [s.tlinks for s in statslist])
def __str__(self):
return "%s\n%s\n%s\n%s\n%s" % (
self.events, self.timexes, self.alinks, self.slinks, self.tlinks)
def pp(self):
print("\n%s\n" % self)
class EntityStatistics(object):
def __init__(self, file_statistics, tagname, display_dir, display_choices):
self.filename = file_statistics.filename
self.tagname = tagname
self.tarsqidoc_gold = file_statistics.tarsqidoc_gold
self.tarsqidoc_system = file_statistics.tarsqidoc_system
self.gold_tags = file_statistics.gold[self.tagname]
self.system_tags = file_statistics.system[self.tagname]
self.tp = 0
self.fp = 0
self.fn = 0
self._collect_counts()
# the following code presents the differences between the gold and the
# system, the underlying counting should probably be used for the P&R as
# well (allowing strict versus relaxed matching, whereas the above only
# has strict matching).
if display_dir is not None:
Viewer(self, display_dir, display_choices)
def __str__(self):
return "<Statistics %s %s tp:%s fp:%s fn:%s precision=%s recall=%s f-score=%s>" % \
(self.tagname, self.filename, self.tp, self.fp, self.fn,
_as_float_string(self.precision()),
_as_float_string(self.recall()),
_as_float_string(self.fscore()))
def precision(self):
return precision(self.tp, self.fp)
def recall(self):
return recall(self.tp, self.fn)
def fscore(self):
return fscore(self.tp, self.fp, self.fn)
def _collect_counts(self):
"""Collect the counts for true positives, false positives and false
negatives."""
# TODO. This does not take the full-range into account and therefore
# gives much lower numbers for cases where multi-token events were
# imported. It also does not allow for relaxed matching.
for t in self.system_tags.keys():
if t in self.gold_tags:
self.tp += 1
else:
self.fp += 1
for t in self.gold_tags.keys():
if t not in self.system_tags:
self.fn += 1
class LinkStatistics(object):
def __init__(self, filename, tagname, gold_annotations, system_annotations):
self.filename = filename
self.tagname = tagname
self.gold_tags = gold_annotations[tagname]
self.system_tags = system_annotations[tagname]
self.overlap = self._overlap(self.gold_tags, self.system_tags)
self.correct = 0
self.incorrect = 0
for offset in self.overlap:
if self.gold_tags[offset][RELTYPE] == self.system_tags[offset][RELTYPE]:
self.correct += 1
else:
self.incorrect += 1
def __str__(self):
accuracy = self.accuracy()
astring = "nil" if accuracy is None else "%.2f" % accuracy
return "<Statistics %s %s correct:%s incorrect:%s accuracy:%s>" % \
(self.tagname, self.filename, self.correct, self.incorrect, astring)
@staticmethod
def _overlap(annotations1, annotations2):
"""Now just gets the keys that both have in common, should include links where
source and target are reversed."""
return [val for val in annotations1 if val in annotations2]
def accuracy(self):
try:
return self.correct / (self.correct + self.incorrect)
except ZeroDivisionError:
return None
class AggregateEntityStatistics(EntityStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.tp = sum([stats.tp for stats in statistics_list])
self.fp = sum([stats.fp for stats in statistics_list])
self.fn = sum([stats.fn for stats in statistics_list])
class AggregateLinkStatistics(LinkStatistics):
def __init__(self, directory, statistics_list):
self.tagname = statistics_list[0].tagname
self.filename = directory
self.statistics = statistics_list
self.correct = sum([stats.correct for stats in statistics_list])
self.incorrect = sum([stats.incorrect for stats in statistics_list])
class Viewer(object):
"""Creates the HTML files that show the differences between the entities in
two files."""
def __init__(self, entity_statistics, display_dir, display_choices):
"""Take the data from the EntityStatistics instance (which got most of those
from the FileStatistics instance)."""
self.entity_stats = entity_statistics
self.filename = entity_statistics.filename
self.tagname = entity_statistics.tagname
self.tarsqidoc_gold = entity_statistics.tarsqidoc_gold
self.tarsqidoc_system = entity_statistics.tarsqidoc_system
self.gold_tags = entity_statistics.gold_tags
self.system_tags = entity_statistics.system_tags
self.display_dir = display_dir
self.display_choices = display_choices
self._build_idxs()
self._align_tags()
self._display_aligned_tags()
def _build_idxs(self):
"""Builds indexes that store the begin and end offset of s, ng and vg
tags. In addition, it stores the end offset of a lex tag and the lex
tag's associated pos."""
self.open_idx = { 's': set(), 'ng': set(), 'vg': set() }
self.close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
s_tags = self.tarsqidoc_system.tags.find_tags('s')
vg_tags = self.tarsqidoc_system.tags.find_tags('vg')
ng_tags = self.tarsqidoc_system.tags.find_tags('ng')
lex_tags = self.tarsqidoc_system.tags.find_tags('lex')
open_idx = { 's': set(), 'ng': set(), 'vg': set() }
close_idx = { 's': set(), 'ng': set(), 'vg': set(), 'lex': {} }
self._update_idxs(s_tags, 's')
self._update_idxs(ng_tags, 'ng')
self._update_idxs(vg_tags, 'vg')
for lex in lex_tags:
self.close_idx['lex'][lex.end] = lex.attrs['pos']
def _update_idxs(self, tags, tagname):
for t in tags:
self.open_idx[tagname].add(t.begin)
self.close_idx[tagname].add(t.end)
def _align_tags(self):
"""Takes two lists of annotations ordered on text position and returns
them as lists of paired up annotations. Annotations will only pair up if
they overlap, if a gold or system annotation does not overlap with a
counterpart on the other side then it will be in a pair with None."""
gold = [EntityAnnotation(k, v) for k, v in self.gold_tags.items()]
system = [EntityAnnotation(k, v) for k, v in self.system_tags.items()]
# Removing duplicates also sorts the annotations
gold = self._remove_duplicates(gold)
system = self._remove_duplicates(system)
self.alignments = []
while gold or system:
if not gold:
self.alignments.append(Alignment(self, None, system.pop(0)))
elif not system:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].overlaps_with(system[0]):
self.alignments.append(Alignment(self, gold.pop(0), system.pop(0)))
elif gold[0].end < system[0].begin:
self.alignments.append(Alignment(self, gold.pop(0), None))
elif gold[0].begin > system[0].end:
self.alignments.append(Alignment(self, None, system.pop(0)))
else:
exit("ERROR: no option available, infinite loop starting...")
@staticmethod
def _remove_duplicates(annotations):
"""This is to remove duplicates from the annotations. The reason why
this was put in is that with tag import there are cases when an imported
tag spans two chunks and it will be imported into each chunk. This needs
to be fixed in the tag import of course, but in th emean time we do not
want it dilute results here. The result is sorted on text position."""
tmp = {}
for annotation in sorted(annotations):
tmp[annotation.offsets()] = annotation
return sorted(tmp.values())
def _display_aligned_tags(self):
# NOTE: when we run this we are in the ttk directory, even though we
# started in the testing subdirectory, adjust paths as needed
fname = os.path.join(self.display_dir, os.path.basename(self.filename))
fh = open("%s.%s.html" % (fname, self.tagname), 'w')
fh.write("<html>\n<head>%s</head>\n\n" % CSS)
fh.write("<body class=scores>\n\n")
fh.write("<h2>Precision and recall on this file</h2>\n\n")
self._display_p_and_r(fh)
fh.write("<h2>Aligning the key and response %s tags</h2>\n\n" % self.tagname)
self._display_legend(fh)
for alignment in self.alignments:
if self.display_choices[alignment.status]:
alignment.html(fh)
fh.write("</body>\n</html>\n")
def _display_p_and_r(self, fh):
stats = self.entity_stats
# P&R as calculated on the EntityStatistics
p1, r1, f1 = stats.precision(), stats.recall(), stats.fscore()
# P&R as calculated here, which uses the alignments array which takes
# into account the full-range attribute, so it gets much higher results
# for cases when we impoerted tags.
tp, fp, fn = self._count_matches(strict=True)
p2, r2, f2 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
tp, fp, fn = self._count_matches(strict=False)
p3, r3, f3 = precision(tp, fp), recall(tp, fn), fscore(tp, fp, fn)
self._p_and_r_table(fh, ('strict', 'relaxed'), (p2, p3), (r2, r3), (f2, f3))
def _count_matches(self, strict=True):
tp, fp, fn = 0, 0, 0
for alignment in self.alignments:
if alignment.status == NO_MATCH_FP:
fp += 1
elif alignment.status == NO_MATCH_FN:
fn += 1
elif alignment.status == PARTIAL_MATCH:
if strict:
fp += 1
fn += 1
else:
tp += 1
elif alignment.status == EXACT_MATCH:
tp += 1
return (tp, fp, fn)
def _p_and_r_table(self, fh, headers, p_scores, r_scores, f_scores):
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
nbsp, p_str, r_str, f_str = ' ', 'precision', 'recall', 'f-score'
HTML.row(fh, [nbsp] + list(headers))
HTML.row(fh, [p_str] + [ _as_float_string(p) for p in p_scores])
HTML.row(fh, [r_str] + [ _as_float_string(r) for r in r_scores])
HTML.row(fh, [f_str] + [ _as_float_string(f) for f in f_scores])
fh.write("</table>\n\n")
def _display_legend(self, fh):
def img(src): return '<img src="icons/%s.png" height=20>' % src
fh.write("<table class=scores cellpadding=8 cellspacing=0 border=1>\n")
em = len([a for a in self.alignments if a.status == EXACT_MATCH])
pm = len([a for a in self.alignments if a.status == PARTIAL_MATCH])
fp = len([a for a in self.alignments if a.status == NO_MATCH_FP])
fn = len([a for a in self.alignments if a.status == NO_MATCH_FN])
HTML.row(fh, [img("check-green"), 'exact match', em])
HTML.row(fh, [img("check-orange"), 'partial match', pm])
HTML.row(fh, [img('cross-red') + 'p',
'mismatch, false positive (precision error)', fp])
HTML.row(fh, [img('cross-red') + 'r',
'mismatch, false negative (recall error)', fn])
fh.write("</table>\n")
icons = { EXACT_MATCH: img('check-green'),
PARTIAL_MATCH: img('check-orange'),
NO_MATCH_FP: img('cross-red') + 'p',
NO_MATCH_FN: img('cross-red') + 'r' }
showing = [icons[choice]
for choice in DISPLAY_CHOICES
if self.display_choices[choice] is True]
fh.write("<p class=bordered>Showing: %s</p>\n"
% ' '.join(showing))
class EntityAnnotation(object):
"""Simple interface for an entity annotation."""
def __init__(self, offsets, attrs):
self.begin = offsets[0]
self.end = offsets[1]
# we keep these around so we can use them for sorting
self.begin_head = self.begin
self.end_head = self.end
self.attrs = attrs
full_range = self.attrs.get('full-range')
if full_range is not None:
begin, end = full_range.split('-')
self.begin = int(begin)
self.end = int(end)
self.tarsqidoc = None # filled in later by the Alignment instance
def __str__(self):
return "<EntityAnnotation %s:%s %s>" % (self.begin, self.end, self.attrs)
def __eq__(self, other):
return (self.begin == other.begin) \
and (self.end == other.end) \
and (self.begin_head == other.begin_head)
def __ne__(self, other):
return (self.begin != other.begin) \
or (self.end != other.end) \
or (self.begin_head != other.begin_head)
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def _compare(self, other):
# TODO: revisit this later, it is Python3 compliant, but that's about
# the best you can say
def comp(x, y):
return (x > y) - (x < y)
begin_comp = comp(self.begin, other.begin)
if begin_comp != 0:
return begin_comp
end_comp = comp(self.end, other.end)
if end_comp != 0:
return end_comp
return comp(self.begin_head, other.begin_head)
def overlaps_with(self, other):
return not (self.end <= other.begin or other.end <= self.begin)
def has_same_span(self, other):
return self.begin == other.begin and self.end == other.end
def offsets(self):
return (self.begin, self.end)
class Alignment(object):
def __init__(self, entitystats, gold_annotation, system_annotation):
self.tarsqidoc_gold = entitystats.tarsqidoc_gold
self.tarsqidoc_system = entitystats.tarsqidoc_system
self.gold_annotation = gold_annotation
self.system_annotation = system_annotation
self.open_idx = entitystats.open_idx
self.close_idx = entitystats.close_idx
if gold_annotation is not None:
self.gold_annotation.tarsqidoc = self.tarsqidoc_gold
if system_annotation is not None:
self.system_annotation.tarsqidoc = self.tarsqidoc_system
if self.gold_annotation is None:
self.status = NO_MATCH_FP
elif self.system_annotation is None:
self.status = NO_MATCH_FN
elif self.gold_annotation.has_same_span(self.system_annotation):
self.status = EXACT_MATCH
else:
self.status = PARTIAL_MATCH
def html(self, fh):
def oneliner(text):
return ' '.join(text.strip().split())
image = self._get_status_image()
p1, p2, text_span = self._get_span()
span1 = self._get_span_with_entity(p1, text_span, self.gold_annotation)
span2 = self._get_span_with_entity(p1, text_span, self.system_annotation)
text = text_span.replace("\n", "<br/>")
tagged_fragment = self._get_tagged_fragment(p1, p2, text_span)
fh.write("<table cellpadding=5 cellspacing=4>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top width=40>%s</td>\n" % image)
fh.write(" <td class=bordered>\n")
fh.write(" <span class=entity_span><i>%s:%s</i></span><br/>\n" % (p1, p2))
fh.write(" <span class=entity_span>%s</span><br/>\n" % oneliner(span1))
fh.write(" <span class=entity_span>%s</span>\n" % oneliner(span2))
fh.write(" </td>\n")
fh.write("</tr>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top> </td>\n")
fh.write(" <td class=bordered>%s</td>\n" % text)
fh.write("</tr>\n\n")
fh.write("<tr>\n")
fh.write(" <td valign=top> </td>\n")
fh.write(" <td class=bordered>%s</td>\n" % tagged_fragment)
fh.write("</tr>\n\n")
fh.write("</table>\n\n")
def _get_status_image(self):
if self.status == EXACT_MATCH:
return '<img src="icons/check-green.png" height=20>'
elif self.status == PARTIAL_MATCH:
return '<img src="icons/check-orange.png" height=20>'
elif self.status == NO_MATCH_FP:
return '<img src="icons/cross-red.png" height=20>p'
elif self.status == NO_MATCH_FN:
return '<img src="icons/cross-red.png" height=20>r'
def _get_span(self):
offsets = []
for annotation in self.gold_annotation, self.system_annotation:
if annotation is not None:
offsets.extend([annotation.begin, annotation.end])
offsets.sort()
span_begin = offsets[0] - 50
span_end = offsets[-1] + 50
if span_begin < 0:
span_begin = 0
if span_end > len(self.tarsqidoc_gold.sourcedoc.text):
span_end = len(self.tarsqidoc_gold.sourcedoc.text) -1
return (span_begin, span_end,
self.tarsqidoc_gold.sourcedoc[span_begin:span_end])
def _get_span_with_entity(self, p1, text_span, annotation):
if annotation is None:
return text_span
else:
a1 = annotation.begin - p1
a2 = annotation.end - p1
return "%s<entity>%s</entity>%s" \
% (text_span[:a1], text_span[a1:a2], text_span[a2:])
def _get_tagged_fragment(self, p1, p2, text):
def tag(cl, text): return "<sup class=%s>%s</sup>" % (cl, text)
def brc(cl, bracket): return "<span class=%s>%s</span>" % (cl, bracket)
output = StringIO()
for i in range(0, p2-p1):
i_adjusted = i + p1
if i_adjusted in self.open_idx['s']:
output.write('%s%s' % (tag('s', 's'), brc('sbracket', '[')))
if i_adjusted in self.open_idx['ng']:
output.write('%s%s' % (tag('chunk', 'ng'), brc('bracket', '[')))
if i_adjusted in self.open_idx['vg']:
output.write('%s%s' % (tag('chunk', 'vg'), brc('bracket', '[')))
output.write(text[i])
if i_adjusted + 1 in self.close_idx['lex']:
output.write(tag('lex', self.close_idx['lex'][i_adjusted + 1]))
if i_adjusted + 1 in self.close_idx['ng']:
output.write('%s%s' % (brc('bracket', ']'), tag('chunk', 'ng')))
if i_adjusted + 1 in self.close_idx['vg']:
output.write('%s%s' % (brc('bracket', ']'), tag('chunk', 'vg')))
if i_adjusted + 1 in self.close_idx['s']:
output.write('%s%s' % (brc('sbracket', ']'), tag('s', 's')))
return output.getvalue()
class HTML(object):
"""Utility class for printing HTML to a file handle."""
@classmethod
def row(self, fh, elements):
fh.write("<tr>\n")
for e in elements:
align = ' align=right' if isinstance(e, int) else ''
fh.write(" <td%s>%s\n" % (align, e))
fh.write("</tr>\n")
if __name__ == '__main__':
options = ['run', 'comp' , 'diff',
'gold=', 'system=', 'out=', 'display=', 'limit=']
(opts, args) = getopt.getopt(sys.argv[1:], '', options)
opts = { k:v for k,v in opts }
gold = os.path.abspath(opts.get('--gold'))
system = os.path.abspath(opts.get('--system'))
limit = int(opts.get('--limit', sys.maxsize))
out = opts.get('--out')
display = opts.get('--display')
display_categories = [EXACT_MATCH, PARTIAL_MATCH, NO_MATCH_FP, NO_MATCH_FN]
if display is None:
display_choices = { c:True for c in display_categories }
else:
display_choices = { c:False for c in display_categories }
for choice in display.split(','):
display_choices[choice] = True
if '--run' in opts:
create_system_files_from_gold_standard(gold, system, limit)
elif '--comp' in opts:
compare_dirs(gold, system, limit)
elif '--diff' in opts:
view_differences(gold, system, out, display_choices, limit)
|
|
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
import functools
import itertools
import operator
import warnings
from . import numeric as _nx
from . import overrides
from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
from . import fromnumeric as _from_nx
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _atleast_1d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1)
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_2d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _atleast_3d_dispatcher(*arys):
return arys
@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
arys1, arys2, ... : array_like
One or more array-like sequences. Non-array inputs are converted to
arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
avoided where possible, and views with three or more dimensions are
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
view of shape ``(M, N, 1)``.
See Also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
[[[1]
[2]]] (1, 2, 1)
[[[1 2]]] (1, 1, 2)
"""
res = []
for ary in arys:
ary = asanyarray(ary)
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
warnings.warn('arrays to stack must be passed as a "sequence" type '
'such as list or tuple. Support for non-sequence '
'iterables such as generators is deprecated as of '
'NumPy 1.16 and will raise an error in the future.',
FutureWarning, stacklevel=stacklevel)
return ()
return arrays
def _vhstack_dispatcher(tup):
return _arrays_for_stack_dispatcher(tup)
@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
See Also
--------
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
concatenate : Join a sequence of arrays along an existing axis.
vsplit : Split array into a list of multiple sub-arrays vertically.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_2d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
return _nx.concatenate(arrs, 0)
@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis, except for 1-D
arrays where it concatenates along the first axis. Rebuilds arrays divided
by `hsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis,
except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
stack : Join a sequence of arrays along a new axis.
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third axis).
concatenate : Join a sequence of arrays along an existing axis.
hsplit : Split array along second axis.
block : Assemble arrays from blocks.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(tup, stacklevel=2)
arrs = atleast_1d(*tup)
if not isinstance(arrs, list):
arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
else:
return _nx.concatenate(arrs, 1)
def _stack_dispatcher(arrays, axis=None, out=None):
arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
if out is not None:
# optimize for the typical case where only arrays is provided
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
The ``axis`` parameter specifies the index of the new axis in the
dimensions of the result. For example, if ``axis=0`` it will be the first
dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
block : Assemble arrays from blocks.
Examples
--------
>>> arrays = [np.random.randn(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1, 2, 3],
[2, 3, 4]])
>>> np.stack((a, b), axis=-1)
array([[1, 2],
[2, 3],
[3, 4]])
"""
if not overrides.ARRAY_FUNCTION_ENABLED:
# raise warning if necessary
_arrays_for_stack_dispatcher(arrays, stacklevel=2)
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
result_ndim = arrays[0].ndim + 1
axis = normalize_axis_index(axis, result_ndim)
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
# Internal functions to eliminate the overhead of repeated dispatch in one of
# the two possible paths inside np.block.
# Use getattr to protect against __array_function__ being disabled.
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
"""
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
return 'arrays' + idx_str
def _block_check_depths_match(arrays, parent_index=[]):
"""
Recursive function checking that the depths of nested lists in `arrays`
all match. Mismatch raises a ValueError as described in the block
docstring below.
The entire index (rather than just the depth) needs to be calculated
for each innermost list, in case an error needs to be raised, so that
the index of the offending list can be printed as part of the error.
Parameters
----------
arrays : nested list of arrays
The arrays to check
parent_index : list of int
The full index of `arrays` within the nested lists passed to
`_block_check_depths_match` at the top of the recursion.
Returns
-------
first_index : list of int
The full index of an element from the bottom of the nesting in
`arrays`. If any element at the bottom is an empty list, this will
refer to it, and the last index along the empty axis will be None.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
final_size: int
The number of elements in the final array. This is used the motivate
the choice of algorithm used using benchmarking wisdom.
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
# - more than one way to do things - no point treating tuples like
# lists
# - horribly confusing behaviour that results when tuples are
# treated like ndarray
raise TypeError(
'{} is a tuple. '
'Only lists can be used to arrange blocks, and np.block does '
'not allow implicit conversion from tuple to ndarray.'.format(
_block_format_index(parent_index)
)
)
elif type(arrays) is list and len(arrays) > 0:
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
first_index, max_arr_ndim, final_size = next(idxs_ndims)
for index, ndim, size in idxs_ndims:
final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
raise ValueError(
"List depths are mismatched. First element was at depth "
"{}, but there is an element at depth {} ({})".format(
len(first_index),
len(index),
_block_format_index(index)
)
)
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
size = _size(arrays)
return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
# Ensures `a` has at least `ndim` dimensions by prepending
# ones to `a.shape` as necessary
return array(a, ndmin=ndim, copy=False, subok=True)
def _accumulate(values):
return list(itertools.accumulate(values))
def _concatenate_shapes(shapes, axis):
"""Given array shapes, return the resulting shape and slices prefixes.
These help in nested concatation.
Returns
-------
shape: tuple of int
This tuple satisfies:
```
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
shape == concatenate(arrs, axis).shape
```
slice_prefixes: tuple of (slice(start, end), )
For a list of arrays being concatenated, this returns the slice
in the larger array at axis that needs to be sliced into.
For example, the following holds:
```
ret = concatenate([a, b, c], axis)
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
ret[(slice(None),) * axis + sl_a] == a
ret[(slice(None),) * axis + sl_b] == b
ret[(slice(None),) * axis + sl_c] == c
```
These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
shape_at_axis = [shape[axis] for shape in shapes]
# Take a shape, any shape
first_shape = shapes[0]
first_shape_pre = first_shape[:axis]
first_shape_post = first_shape[axis+1:]
if any(shape[:axis] != first_shape_pre or
shape[axis+1:] != first_shape_post for shape in shapes):
raise ValueError(
'Mismatched array shapes in block along axis {}.'.format(axis))
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
offsets_at_axis = _accumulate(shape_at_axis)
slice_prefixes = [(slice(start, end),)
for start, end in zip([0] + offsets_at_axis,
offsets_at_axis)]
return shape, slice_prefixes
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
"""
Returns the shape of the final array, along with a list
of slices and a list of arrays that can be used for assignment inside the
new array
Parameters
----------
arrays : nested list of arrays
The arrays to check
max_depth : list of int
The number of nested lists
result_ndim: int
The number of dimensions in thefinal array.
Returns
-------
shape : tuple of int
The shape that the final array will take on.
slices: list of tuple of slices
The slices into the full array required for assignment. These are
required to be prepended with ``(Ellipsis, )`` to obtain to correct
final index.
arrays: list of ndarray
The data to assign to each slice of the full array
"""
if depth < max_depth:
shapes, slices, arrays = zip(
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
for arr in arrays])
axis = result_ndim - max_depth + depth
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
# Prepend the slice prefix and flatten the slices
slices = [slice_prefix + the_slice
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
for the_slice in inner_slices]
# Flatten the array list
arrays = functools.reduce(operator.add, arrays)
return shape, slices, arrays
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
# Return the slice and the array inside a list to be consistent with
# the recursive case.
arr = _atleast_nd(arrays, result_ndim)
return arr.shape, [()], [arr]
def _block(arrays, max_depth, result_ndim, depth=0):
"""
Internal implementation of block based on repeated concatenation.
`arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
for details).
"""
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
def _block_dispatcher(arrays):
# Use type(...) is list to match the behavior of np.block(), which special
# cases list specifically rather than allowing for generic iterables or
# tuple. Also, we know that list.__array_function__ will never exist.
if type(arrays) is list:
for subarrays in arrays:
yield from _block_dispatcher(subarrays)
else:
yield arrays
@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
Blocks in the innermost lists are concatenated (see `concatenate`) along
the last dimension (-1), then these are concatenated along the
second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
the same for all blocks. This is primarily useful for working with scalars,
and means that code like ``np.block([v, 1])`` is valid, where
``v.ndim == 1``.
When the nested list is two levels deep, this allows block matrices to be
constructed from their components.
.. versionadded:: 1.13.0
Parameters
----------
arrays : nested list of array_like or scalars (but not tuples)
If passed a single ndarray or scalar (a nested list of depth 0), this
is returned unmodified (and not copied).
Elements shapes must match along the appropriate axes (without
broadcasting), but leading 1s will be prepended to the shape as
necessary to make the dimensions match.
Returns
-------
block_array : ndarray
The array assembled from the given blocks.
The dimensionality of the output is equal to the greatest of:
* the dimensionality of all the inputs
* the depth to which the input list is nested
Raises
------
ValueError
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
illegal, and should be spelt ``[[a, b], [c]]``
* If lists are empty - for instance, ``[[a, b], []]``
See Also
--------
concatenate : Join a sequence of arrays together.
stack : Stack arrays in sequence along a new dimension.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
vsplit : Split array into a list of multiple sub-arrays vertically.
Notes
-----
When called with only scalars, ``np.block`` is equivalent to an ndarray
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
``np.array([[1, 2], [3, 4]])``.
This function does not enforce that the blocks lie on a fixed grid.
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
AAAbb
AAAbb
cccDD
But is also allowed to produce, for some ``a, b, c, d``::
AAAbb
AAAbb
cDDDD
Since concatenation happens along the last axis first, `block` is _not_
capable of producing the following directly::
AAAbb
cccbb
cccDD
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
Examples
--------
The most common use of this function is to build a block matrix
>>> A = np.eye(2) * 2
>>> B = np.eye(3) * 3
>>> np.block([
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
array([[2., 0., 0., 0., 0.],
[0., 2., 0., 0., 0.],
[1., 1., 3., 0., 0.],
[1., 1., 0., 3., 0.],
[1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
array([1, 2, 3])
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
array([ 1, 2, 3, 2, 3, 4, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([A, B]) # hstack([A, B])
array([[1, 1, 2, 2],
[1, 1, 2, 2]])
With a list of depth 2, `block` can be used in place of `vstack`:
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([[a], [b]]) # vstack([a, b])
array([[1, 2, 3],
[2, 3, 4]])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
>>> np.block([[A], [B]]) # vstack([A, B])
array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
It can also be used in places of `atleast_1d` and `atleast_2d`
>>> a = np.array(0)
>>> b = np.array([1])
>>> np.block([a]) # atleast_1d(a)
array([0])
>>> np.block([b]) # atleast_1d(b)
array([1])
>>> np.block([[a]]) # atleast_2d(a)
array([[0]])
>>> np.block([[b]]) # atleast_2d(b)
array([[1]])
"""
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
# It was found through benchmarking that making an array of final size
# around 256x256 was faster by straight concatenation on a
# i7-7700HQ processor and dual channel ram 2400MHz.
# It didn't seem to matter heavily on the dtype used.
#
# A 2D array using repeated concatenation requires 2 copies of the array.
#
# The fastest algorithm will depend on the ratio of CPU power to memory
# speed.
# One can monitor the results of the benchmark
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
# to tune this parameter until a C version of the `_block_info_recursion`
# algorithm is implemented which would likely be faster than the python
# version.
if list_ndim * final_size > (2 * 512 * 512):
return _block_slicing(arrays, list_ndim, result_ndim)
else:
return _block_concatenate(arrays, list_ndim, result_ndim)
# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
Returns
(`arrays`, list_ndim, result_ndim, final_size)
"""
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
'List at {} cannot be empty'.format(
_block_format_index(bottom_index)
)
)
result_ndim = max(arr_ndim, list_ndim)
return arrays, list_ndim, result_ndim, final_size
def _block_slicing(arrays, list_ndim, result_ndim):
shape, slices, arrays = _block_info_recursion(
arrays, list_ndim, result_ndim)
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
# Test preferring F only in the case that all input arrays are F
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
order = 'F' if F_order and not C_order else 'C'
result = _nx.empty(shape=shape, dtype=dtype, order=order)
# Note: In a c implementation, the function
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
# guessing of the desired order.
for the_slice, arr in zip(slices, arrays):
result[(Ellipsis,) + the_slice] = arr
return result
def _block_concatenate(arrays, list_ndim, result_ndim):
result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
# This might copy scalars or lists twice, but this isn't a likely
# usecase for those interested in performance
result = result.copy()
return result
|
|
import numpy as np
import pytest
from astropy import time, units as u
from astropy.coordinates import CartesianRepresentation
from astropy.tests.helper import assert_quantity_allclose
from hypothesis import given, settings, strategies as st
from numpy.testing import assert_allclose
from pytest import approx
from poliastro.bodies import Earth, Moon, Sun
from poliastro.constants import J2000
from poliastro.core.elements import rv2coe
from poliastro.core.propagation import (
danby_coe,
func_twobody,
gooding_coe,
markley_coe,
mikkola_coe,
pimienta_coe,
)
from poliastro.core.propagation.farnocchia import farnocchia_coe
from poliastro.examples import iss
from poliastro.frames import Planes
from poliastro.twobody import Orbit
from poliastro.twobody.propagation import (
ALL_PROPAGATORS,
ELLIPTIC_PROPAGATORS,
HYPERBOLIC_PROPAGATORS,
PARABOLIC_PROPAGATORS,
cowell,
danby,
farnocchia,
gooding,
markley,
mikkola,
pimienta,
propagate,
vallado,
)
from poliastro.util import norm
@pytest.fixture(scope="module")
def halley():
return Orbit.from_vectors(
Sun,
[-9018878.63569932, -94116054.79839276, 22619058.69943215] * u.km,
[-49.95092305, -12.94843055, -4.29251577] * u.km / u.s,
)
@pytest.mark.parametrize("ecc", [0.9, 0.99, 0.999, 0.9999, 0.99999])
@pytest.mark.parametrize("propagator", ELLIPTIC_PROPAGATORS)
def test_elliptic_near_parabolic(ecc, propagator):
# 'kepler fails if really close to parabolic'. Refer to issue #714.
if propagator in [vallado] and ecc > 0.99:
pytest.xfail()
_a = 0.0 * u.rad
tof = 1.0 * u.min
ss0 = Orbit.from_classical(
attractor=Earth,
a=10000 * u.km,
ecc=ecc * u.one,
inc=_a,
raan=_a,
argp=_a,
nu=1.0 * u.rad,
)
ss_cowell = ss0.propagate(tof, method=cowell)
ss_propagator = ss0.propagate(tof, method=propagator)
assert_quantity_allclose(ss_propagator.r, ss_cowell.r)
assert_quantity_allclose(ss_propagator.v, ss_cowell.v)
@pytest.mark.parametrize("ecc", [1.0001, 1.001, 1.01, 1.1])
@pytest.mark.parametrize("propagator", HYPERBOLIC_PROPAGATORS)
def test_hyperbolic_near_parabolic(ecc, propagator):
# Still not implemented. Refer to issue #714.
if propagator in [pimienta, gooding]:
pytest.skip()
_a = 0.0 * u.rad
tof = 1.0 * u.min
ss0 = Orbit.from_classical(
attractor=Earth,
a=-10000 * u.km,
ecc=ecc * u.one,
inc=_a,
raan=_a,
argp=_a,
nu=1.0 * u.rad,
)
ss_cowell = ss0.propagate(tof, method=cowell)
ss_propagator = ss0.propagate(tof, method=propagator)
assert_quantity_allclose(ss_propagator.r, ss_cowell.r)
assert_quantity_allclose(ss_propagator.v, ss_cowell.v)
@pytest.mark.parametrize("propagator", [markley])
def test_near_equatorial(propagator):
r = [8.0e3, 1.0e3, 0.0] * u.km
v = [-0.5, -0.5, 0.0001] * u.km / u.s
tof = 1.0 * u.h
ss0 = Orbit.from_vectors(Earth, r, v)
ss_cowell = ss0.propagate(tof, method=cowell)
ss_propagator = ss0.propagate(tof, method=propagator)
assert_quantity_allclose(ss_propagator.r, ss_cowell.r, rtol=1e-4)
assert_quantity_allclose(ss_propagator.v, ss_cowell.v, rtol=1e-4)
@pytest.mark.parametrize("propagator", ALL_PROPAGATORS)
def test_propagation(propagator):
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
expected_r = [-4219.7527, 4363.0292, -3958.7666] * u.km
expected_v = [3.689866, -1.916735, -6.112511] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
tof = 40 * u.min
ss1 = ss0.propagate(tof, method=propagator)
r, v = ss1.rv()
assert_quantity_allclose(r, expected_r, rtol=1e-5)
assert_quantity_allclose(v, expected_v, rtol=1e-4)
def test_propagating_to_certain_nu_is_correct():
# Take an elliptic orbit
a = 1.0 * u.AU
ecc = 1.0 / 3.0 * u.one
_a = 0.0 * u.rad
nu = 10 * u.deg
elliptic = Orbit.from_classical(
attractor=Sun, a=a, ecc=ecc, inc=_a, raan=_a, argp=_a, nu=nu
)
elliptic_at_perihelion = elliptic.propagate_to_anomaly(0.0 * u.rad)
r_per, _ = elliptic_at_perihelion.rv()
elliptic_at_aphelion = elliptic.propagate_to_anomaly(np.pi * u.rad)
r_ap, _ = elliptic_at_aphelion.rv()
assert_quantity_allclose(norm(r_per), a * (1.0 - ecc))
assert_quantity_allclose(norm(r_ap), a * (1.0 + ecc))
# TODO: Test specific values
assert elliptic_at_perihelion.epoch > elliptic.epoch
assert elliptic_at_aphelion.epoch > elliptic.epoch
# Test 10 random true anomaly values
# TODO: Rework this test
for nu in np.random.uniform(low=-np.pi, high=np.pi, size=10):
elliptic = elliptic.propagate_to_anomaly(nu * u.rad)
r, _ = elliptic.rv()
assert_quantity_allclose(norm(r), a * (1.0 - ecc**2) / (1 + ecc * np.cos(nu)))
def test_propagate_to_anomaly_in_the_past_fails_for_open_orbits():
r0 = [Earth.R.to(u.km).value + 300, 0, 0] * u.km
v0 = [0, 15, 0] * u.km / u.s
orb = Orbit.from_vectors(Earth, r0, v0)
with pytest.raises(ValueError, match="True anomaly -0.02 rad not reachable"):
orb.propagate_to_anomaly(orb.nu - 1 * u.deg)
def test_propagate_accepts_timedelta():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
expected_r = [-4219.7527, 4363.0292, -3958.7666] * u.km
expected_v = [3.689866, -1.916735, -6.112511] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
tof = time.TimeDelta(40 * u.min)
ss1 = ss0.propagate(tof)
r, v = ss1.rv()
assert_quantity_allclose(r, expected_r, rtol=1e-5)
assert_quantity_allclose(v, expected_v, rtol=1e-4)
def test_propagation_hyperbolic():
# Data from Curtis, example 3.5
r0 = [Earth.R.to(u.km).value + 300, 0, 0] * u.km
v0 = [0, 15, 0] * u.km / u.s
expected_r_norm = 163180 * u.km
expected_v_norm = 10.51 * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
tof = 14941 * u.s
ss1 = ss0.propagate(tof)
r, v = ss1.rv()
assert_quantity_allclose(norm(r), expected_r_norm, rtol=1e-4)
assert_quantity_allclose(norm(v), expected_v_norm, rtol=1e-3)
@pytest.mark.parametrize("propagator", PARABOLIC_PROPAGATORS)
def test_propagation_parabolic(propagator):
# Example from Howard Curtis (3rd edition), section 3.5, problem 3.15
# TODO: add parabolic solver in some parabolic propagators, refer to #417
if propagator in [mikkola, gooding]:
pytest.skip()
p = 2.0 * 6600 * u.km
_a = 0.0 * u.deg
orbit = Orbit.parabolic(Earth, p, _a, _a, _a, _a)
orbit = orbit.propagate(0.8897 / 2.0 * u.h, method=propagator)
_, _, _, _, _, nu0 = rv2coe(
Earth.k.to(u.km**3 / u.s**2).value,
orbit.r.to(u.km).value,
orbit.v.to(u.km / u.s).value,
)
assert_quantity_allclose(nu0, np.deg2rad(90.0), rtol=1e-4)
orbit = Orbit.parabolic(Earth, p, _a, _a, _a, _a)
orbit = orbit.propagate(36.0 * u.h, method=propagator)
assert_quantity_allclose(norm(orbit.r), 304700.0 * u.km, rtol=1e-4)
def test_propagation_zero_time_returns_same_state():
# Bug #50
r0 = [1131.340, -2282.343, 6672.423] * u.km # type: u.Quantity
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
tof = 0 * u.s
ss1 = ss0.propagate(tof)
r, v = ss1.rv()
assert_quantity_allclose(r, r0)
assert_quantity_allclose(v, v0)
def test_propagation_hyperbolic_zero_time_returns_same_state():
ss0 = Orbit.from_classical(
attractor=Earth,
a=-27112.5464 * u.km,
ecc=1.25 * u.one,
inc=0 * u.deg,
raan=0 * u.deg,
argp=0 * u.deg,
nu=0 * u.deg,
)
r0, v0 = ss0.rv()
tof = 0 * u.s
ss1 = ss0.propagate(tof)
r, v = ss1.rv()
assert_quantity_allclose(r, r0, atol=1e-24 * u.km)
assert_quantity_allclose(v, v0, atol=1e-27 * u.km / u.s)
def test_apply_zero_maneuver_returns_equal_state():
_d = 1.0 * u.AU # Unused distance
_ = 0.5 * u.one # Unused dimensionless value
_a = 1.0 * u.deg # Unused angle
ss = Orbit.from_classical(
attractor=Sun, a=_d, ecc=_, inc=_a, raan=_a, argp=_a, nu=_a
)
dt = 0 * u.s
dv = [0, 0, 0] * u.km / u.s
orbit_new = ss.apply_maneuver([(dt, dv)])
assert_allclose(orbit_new.r.to(u.km).value, ss.r.to(u.km).value)
assert_allclose(orbit_new.v.to(u.km / u.s).value, ss.v.to(u.km / u.s).value)
def test_cowell_propagation_with_zero_acceleration_equals_kepler():
# Data from Vallado, example 2.4
r0 = np.array([1131.340, -2282.343, 6672.423]) * u.km
v0 = np.array([-5.64305, 4.30333, 2.42879]) * u.km / u.s
tofs = [40 * 60.0] * u.s
orbit = Orbit.from_vectors(Earth, r0, v0)
expected_r = np.array([-4219.7527, 4363.0292, -3958.7666]) * u.km
expected_v = np.array([3.689866, -1.916735, -6.112511]) * u.km / u.s
r, v = cowell(Earth.k, orbit.r, orbit.v, tofs)
assert_quantity_allclose(r[0], expected_r, rtol=1e-5)
assert_quantity_allclose(v[0], expected_v, rtol=1e-4)
def test_cowell_propagation_circle_to_circle():
# From [Edelbaum, 1961]
accel = 1e-7
def constant_accel(t0, u_, k):
v = u_[3:]
norm_v = (v[0] ** 2 + v[1] ** 2 + v[2] ** 2) ** 0.5
return accel * v / norm_v
def f(t0, u_, k):
du_kep = func_twobody(t0, u_, k)
ax, ay, az = constant_accel(t0, u_, k)
du_ad = np.array([0, 0, 0, ax, ay, az])
return du_kep + du_ad
ss = Orbit.circular(Earth, 500 * u.km)
tofs = [20] * ss.period
r, v = cowell(Earth.k, ss.r, ss.v, tofs, f=f)
ss_final = Orbit.from_vectors(Earth, r[0], v[0])
da_a0 = (ss_final.a - ss.a) / ss.a
dv_v0 = abs(norm(ss_final.v) - norm(ss.v)) / norm(ss.v)
assert_quantity_allclose(da_a0, 2 * dv_v0, rtol=1e-2)
dv = abs(norm(ss_final.v) - norm(ss.v))
accel_dt = accel * u.km / u.s**2 * tofs[0]
assert_quantity_allclose(dv, accel_dt, rtol=1e-2)
def test_propagate_to_date_has_proper_epoch():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
init_epoch = J2000
final_epoch = time.Time("2000-01-01 12:40:00", scale="tdb")
expected_r = [-4219.7527, 4363.0292, -3958.7666] * u.km
expected_v = [3.689866, -1.916735, -6.112511] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0, epoch=init_epoch)
ss1 = ss0.propagate(final_epoch)
r, v = ss1.rv()
assert_quantity_allclose(r, expected_r, rtol=1e-5)
assert_quantity_allclose(v, expected_v, rtol=1e-4)
# Tolerance should be higher, see https://github.com/astropy/astropy/issues/6638
assert (ss1.epoch - final_epoch).sec == approx(0.0, abs=1e-6)
@pytest.mark.filterwarnings("ignore::erfa.core.ErfaWarning")
@pytest.mark.parametrize("propagator", [danby, markley, gooding])
def test_propagate_long_times_keeps_geometry(propagator):
# See https://github.com/poliastro/poliastro/issues/265
time_of_flight = 100 * u.year
res = iss.propagate(time_of_flight, method=propagator)
assert_quantity_allclose(iss.a, res.a)
assert_quantity_allclose(iss.ecc, res.ecc)
assert_quantity_allclose(iss.inc, res.inc)
assert_quantity_allclose(iss.raan, res.raan)
assert_quantity_allclose(iss.argp, res.argp)
assert_quantity_allclose(
(res.epoch - iss.epoch).to(time_of_flight.unit), time_of_flight
)
@pytest.mark.filterwarnings("ignore::erfa.core.ErfaWarning")
def test_long_propagations_vallado_agrees_farnocchia():
tof = 100 * u.year
r_mm, v_mm = iss.propagate(tof, method=farnocchia).rv()
r_k, v_k = iss.propagate(tof, method=vallado).rv()
assert_quantity_allclose(r_mm, r_k)
assert_quantity_allclose(v_mm, v_k)
r_halleys = [-9018878.63569932, -94116054.79839276, 22619058.69943215] # km
v_halleys = [-49.95092305, -12.94843055, -4.29251577] # km/s
halleys = Orbit.from_vectors(Sun, r_halleys * u.km, v_halleys * u.km / u.s)
r_mm, v_mm = halleys.propagate(tof, method=farnocchia).rv()
r_k, v_k = halleys.propagate(tof, method=vallado).rv()
assert_quantity_allclose(r_mm, r_k)
assert_quantity_allclose(v_mm, v_k)
def test_farnocchia_propagation_very_high_ecc_does_not_fail():
# Regression test for #1296.
r = np.array([-500, 1500, 4012.09]) << u.km
v = np.array([5021.38, -2900.7, 1000.354]) << u.km / u.s
orbit = Orbit.from_vectors(Earth, r, v, epoch=time.Time("2020-01-01"))
tofs = [74] << u.s # tof = 74s and above is the critical region.
coords = propagate(orbit, tofs)
assert not np.isnan(coords.get_xyz()).any()
@st.composite
def with_units(draw, elements, unit):
value = draw(elements)
return value * unit
@settings(deadline=None)
@given(
tof=with_units(
elements=st.floats(
min_value=80, max_value=120, allow_nan=False, allow_infinity=False
),
unit=u.year,
)
)
@pytest.mark.parametrize("method", [farnocchia, vallado])
def test_long_propagation_preserves_orbit_elements(tof, method, halley):
expected_slow_classical = halley.classical()[:-1]
slow_classical = halley.propagate(tof, method=method).classical()[:-1]
for element, expected_element in zip(slow_classical, expected_slow_classical):
assert_quantity_allclose(element, expected_element)
def test_propagation_sets_proper_epoch():
expected_epoch = time.Time("2017-09-01 12:05:50", scale="tdb")
r = [-2.76132873e08, -1.71570015e08, -1.09377634e08] * u.km
v = [13.17478674, -9.82584125, -1.48126639] * u.km / u.s
florence = Orbit.from_vectors(Sun, r, v, plane=Planes.EARTH_ECLIPTIC)
propagated = florence.propagate(expected_epoch)
assert propagated.epoch == expected_epoch
def test_sample_custom_body_raises_warning_and_returns_coords():
# See https://github.com/poliastro/poliastro/issues/649
orbit = Orbit.circular(Moon, 100 * u.km)
coords = orbit.sample(10)
assert isinstance(coords, CartesianRepresentation)
assert len(coords) == 10
def test_propagation_custom_body_works():
# See https://github.com/poliastro/poliastro/issues/649
orbit = Orbit.circular(Moon, 100 * u.km)
orbit.propagate(1 * u.h)
@pytest.mark.parametrize(
"propagator_coe",
[danby_coe, markley_coe, pimienta_coe, mikkola_coe, farnocchia_coe, gooding_coe],
)
def test_propagate_with_coe(propagator_coe):
period = iss.period
a, ecc, inc, raan, argp, nu = iss.classical()
p = a * (1 - ecc**2)
# Delete the units
p = p.to_value(u.km)
ecc = ecc.value
period = period.to_value(u.s)
inc = inc.to_value(u.rad)
raan = raan.to_value(u.rad)
argp = argp.to_value(u.rad)
nu = nu.to_value(u.rad)
k = iss.attractor.k.to_value(u.km**3 / u.s**2)
nu_final = propagator_coe(k, p, ecc, inc, raan, argp, nu, period)
assert_quantity_allclose(nu_final, nu)
@pytest.mark.parametrize("propagator", ALL_PROPAGATORS)
def test_propagator_with_zero_eccentricity(propagator):
attractor = Earth
altitude = 300 * u.km
orbit = Orbit.circular(attractor, altitude)
time_of_flight = 50 * u.s
res = orbit.propagate(time_of_flight, method=propagator)
assert_quantity_allclose(orbit.a, res.a)
assert_quantity_allclose(orbit.ecc, res.ecc, atol=1e-15)
assert_quantity_allclose(orbit.inc, res.inc)
assert_quantity_allclose(orbit.raan, res.raan)
assert_quantity_allclose(orbit.argp, res.argp)
@pytest.mark.parametrize("propagator", ALL_PROPAGATORS)
def test_after_propagation_r_and_v_dimensions(propagator):
r0 = [111.340, -228.343, 2413.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
tof = time.TimeDelta(50 * u.s)
orbit = Orbit.from_vectors(Earth, r0, v0)
rr, vv = propagator(
orbit.attractor.k,
orbit.r,
orbit.v,
tof.reshape(-1).to(u.s),
rtol=1e-10,
)
assert rr.ndim == 2
assert vv.ndim == 2
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
The classes in scene.visuals are visuals that may be added to a scenegraph
using the methods and properties defined in `vispy.scene.Node` such as name,
visible, parent, children, etc...
These classes are automatically generated by mixing `vispy.scene.Node` with
the Visual classes found in `vispy.visuals`.
For developing custom visuals, it is recommended to subclass from
`vispy.visuals.Visual` rather than `vispy.scene.Node`.
"""
import re
import weakref
from .. import visuals
from .node import Node
from ..visuals.filters import ColorFilter, PickingFilter
class VisualNode(Node):
_next_id = 1
_visual_ids = weakref.WeakValueDictionary()
def __init__(self, parent=None, name=None):
Node.__init__(self, parent=parent, name=name,
transforms=self.transforms)
self.interactive = False
self._opacity_filter = ColorFilter()
self.attach(self._opacity_filter)
self._id = VisualNode._next_id
VisualNode._visual_ids[self._id] = self
VisualNode._next_id += 1
self._picking_filter = PickingFilter(id_=self._id)
self.attach(self._picking_filter)
def _update_opacity(self):
self._opacity_filter.color = (1, 1, 1, self._opacity)
def _set_clipper(self, node, clipper):
"""Assign a clipper that is inherited from a parent node.
If *clipper* is None, then remove any clippers for *node*.
"""
if node in self._clippers:
self.detach(self._clippers.pop(node))
if clipper is not None:
self.attach(clipper)
self._clippers[node] = clipper
@property
def picking(self):
"""Boolean that determines whether this node (and its children) are
drawn in picking mode.
"""
return self._picking
@picking.setter
def picking(self, p):
for c in self.children:
c.picking = p
if self._picking == p:
return
self._picking = p
self._picking_filter.enabled = p
self.update_gl_state(blend=not p)
def _update_trsys(self, event):
"""Transform object(s) have changed for this Node; assign these to the
visual's TransformSystem.
"""
doc = self.document_node
scene = self.scene_node
root = self.root_node
self.transforms.visual_transform = self.node_transform(scene)
self.transforms.scene_transform = scene.node_transform(doc)
self.transforms.document_transform = doc.node_transform(root)
Node._update_trsys(self, event)
@property
def interactive(self):
"""Whether this widget should be allowed to accept mouse and touch
events.
"""
return self._interactive
@interactive.setter
def interactive(self, i):
self._interactive = i
def draw(self):
if self.picking and not self.interactive:
return
self._visual_superclass.draw(self)
def create_visual_node(subclass):
# Create a new subclass of Node.
# Decide on new class name
clsname = subclass.__name__
if not (clsname.endswith('Visual') and
issubclass(subclass, visuals.BaseVisual)):
raise RuntimeError('Class "%s" must end with Visual, and must '
'subclass BaseVisual' % clsname)
clsname = clsname[:-6]
# Generate new docstring based on visual docstring
try:
doc = generate_docstring(subclass, clsname)
except Exception:
# If parsing fails, just return the original Visual docstring
doc = subclass.__doc__
# New __init__ method
def __init__(self, *args, **kwargs):
parent = kwargs.pop('parent', None)
name = kwargs.pop('name', None)
self.name = name # to allow __str__ before Node.__init__
self._visual_superclass = subclass
subclass.__init__(self, *args, **kwargs)
self.unfreeze()
VisualNode.__init__(self, parent=parent, name=name)
self.freeze()
# Create new class
cls = type(clsname, (VisualNode, subclass),
{'__init__': __init__, '__doc__': doc})
return cls
def generate_docstring(subclass, clsname):
# Generate a Visual+Node docstring by modifying the Visual's docstring
# to include information about Node inheritance and extra init args.
sc_doc = subclass.__doc__
if sc_doc is None:
sc_doc = ""
# find locations within docstring to insert new parameters
lines = sc_doc.split("\n")
# discard blank lines at start
while lines and lines[0].strip() == '':
lines.pop(0)
i = 0
params_started = False
param_indent = None
first_blank = None
param_end = None
while i < len(lines):
line = lines[i]
# ignore blank lines and '------' lines
if re.search(r'\w', line):
indent = len(line) - len(line.lstrip())
# If Params section has already started, check for end of params
# (that is where we will insert new params)
if params_started:
if indent < param_indent:
break
elif indent == param_indent:
# might be end of parameters block..
if re.match(r'\s*[a-zA-Z0-9_]+\s*:\s*\S+', line) is None:
break
param_end = i + 1
# Check for beginning of params section
elif re.match(r'\s*Parameters\s*', line):
params_started = True
param_indent = indent
if first_blank is None:
first_blank = i
# Check for first blank line
# (this is where the Node inheritance description will be
# inserted)
elif first_blank is None and line.strip() == '':
first_blank = i
i += 1
if i == len(lines) and param_end is None:
# reached end of docstring; insert here
param_end = i
# If original docstring has no params heading, we need to generate it.
if not params_started:
lines.extend(["", " Parameters", " ----------"])
param_end = len(lines)
if first_blank is None:
first_blank = param_end - 3
params_started = True
# build class and parameter description strings
class_desc = ("\n This class inherits from visuals.%sVisual and "
"scene.Node, allowing the visual to be placed inside a "
"scenegraph.\n" % (clsname))
parm_doc = (" parent : Node\n"
" The parent node to assign to this node (optional).\n"
" name : string\n"
" A name for this node, used primarily for debugging\n"
" (optional).")
# assemble all docstring parts
lines = (lines[:first_blank] +
[class_desc] +
lines[first_blank:param_end] +
[parm_doc] +
lines[param_end:])
doc = '\n'.join(lines)
return doc
# This is _not_ automated to help with auto-completion of IDEs,
# python REPL and IPython.
# Explicitly initializing these members allow IDEs to lookup
# and provide auto-completion. One problem is the fact that
# Docstrings are _not_ looked up correctly by IDEs, since they
# are attached programatically in the create_visual_node call.
# However, help(vispy.scene.FooVisual) still works
Arrow = create_visual_node(visuals.ArrowVisual)
Axis = create_visual_node(visuals.AxisVisual)
Box = create_visual_node(visuals.BoxVisual)
ColorBar = create_visual_node(visuals.ColorBarVisual)
Compound = create_visual_node(visuals.CompoundVisual)
Cube = create_visual_node(visuals.CubeVisual)
Ellipse = create_visual_node(visuals.EllipseVisual)
Graph = create_visual_node(visuals.GraphVisual)
GridLines = create_visual_node(visuals.GridLinesVisual)
GridMesh = create_visual_node(visuals.GridMeshVisual)
Histogram = create_visual_node(visuals.HistogramVisual)
Image = create_visual_node(visuals.ImageVisual)
InfiniteLine = create_visual_node(visuals.InfiniteLineVisual)
Isocurve = create_visual_node(visuals.IsocurveVisual)
Isoline = create_visual_node(visuals.IsolineVisual)
Isosurface = create_visual_node(visuals.IsosurfaceVisual)
Line = create_visual_node(visuals.LineVisual)
LinearRegion = create_visual_node(visuals.LinearRegionVisual)
LinePlot = create_visual_node(visuals.LinePlotVisual)
Markers = create_visual_node(visuals.MarkersVisual)
Mesh = create_visual_node(visuals.MeshVisual)
Plane = create_visual_node(visuals.PlaneVisual)
Polygon = create_visual_node(visuals.PolygonVisual)
Rectangle = create_visual_node(visuals.RectangleVisual)
RegularPolygon = create_visual_node(visuals.RegularPolygonVisual)
ScrollingLines = create_visual_node(visuals.ScrollingLinesVisual)
Spectrogram = create_visual_node(visuals.SpectrogramVisual)
Sphere = create_visual_node(visuals.SphereVisual)
SurfacePlot = create_visual_node(visuals.SurfacePlotVisual)
Text = create_visual_node(visuals.TextVisual)
Tube = create_visual_node(visuals.TubeVisual)
# Visual = create_visual_node(visuals.Visual) # Should not be created
Volume = create_visual_node(visuals.VolumeVisual)
XYZAxis = create_visual_node(visuals.XYZAxisVisual)
__all__ = [name for (name, obj) in globals().items()
if isinstance(obj, type) and issubclass(obj, VisualNode)]
|
|
import asyncio
import datetime
import enum
import json
import math
import time
import warnings
from email.utils import parsedate
from multidict import CIMultiDict, CIMultiDictProxy
from . import hdrs, payload
from .helpers import HeadersMixin, SimpleCookie, sentinel
from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11
__all__ = ('ContentCoding', 'StreamResponse', 'Response', 'json_response')
class ContentCoding(enum.Enum):
# The content codings that we have support for.
#
# Additional registered codings are listed at:
# https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
deflate = 'deflate'
gzip = 'gzip'
identity = 'identity'
############################################################
# HTTP Response classes
############################################################
class StreamResponse(HeadersMixin):
_length_check = True
def __init__(self, *, status=200, reason=None, headers=None):
self._body = None
self._keep_alive = None
self._chunked = False
self._compression = False
self._compression_force = False
self._cookies = SimpleCookie()
self._req = None
self._payload_writer = None
self._eof_sent = False
self._body_length = 0
if headers is not None:
self._headers = CIMultiDict(headers)
else:
self._headers = CIMultiDict()
self.set_status(status, reason)
@property
def prepared(self):
return self._payload_writer is not None
@property
def task(self):
return getattr(self._req, 'task', None)
@property
def status(self):
return self._status
@property
def chunked(self):
return self._chunked
@property
def compression(self):
return self._compression
@property
def reason(self):
return self._reason
def set_status(self, status, reason=None, _RESPONSES=RESPONSES):
assert not self.prepared, \
'Cannot change the response status code after ' \
'the headers have been sent'
self._status = int(status)
if reason is None:
try:
reason = _RESPONSES[self._status][0]
except:
reason = ''
self._reason = reason
@property
def keep_alive(self):
return self._keep_alive
def force_close(self):
self._keep_alive = False
@property
def body_length(self):
return self._body_length
@property
def output_length(self):
warnings.warn('output_length is deprecated', DeprecationWarning)
return self._payload_writer.buffer_size
def enable_chunked_encoding(self, chunk_size=None):
"""Enables automatic chunked transfer encoding."""
self._chunked = True
if chunk_size is not None:
warnings.warn('Chunk size is deprecated #1615', DeprecationWarning)
def enable_compression(self, force=None):
"""Enables response compression encoding."""
# Backwards compatibility for when force was a bool <0.17.
if type(force) == bool:
force = ContentCoding.deflate if force else ContentCoding.identity
elif force is not None:
assert isinstance(force, ContentCoding), ("force should one of "
"None, bool or "
"ContentEncoding")
self._compression = True
self._compression_force = force
@property
def headers(self):
return self._headers
@property
def cookies(self):
return self._cookies
def set_cookie(self, name, value, *, expires=None,
domain=None, max_age=None, path='/',
secure=None, httponly=None, version=None):
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = max_age
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version
def del_cookie(self, name, *, domain=None, path='/'):
"""Delete cookie.
Creates new empty expired cookie.
"""
# TODO: do we need domain/path here?
self._cookies.pop(name, None)
self.set_cookie(name, '', max_age=0,
expires="Thu, 01 Jan 1970 00:00:00 GMT",
domain=domain, path=path)
@property
def content_length(self):
# Just a placeholder for adding setter
return super().content_length
@content_length.setter
def content_length(self, value):
if value is not None:
value = int(value)
# TODO: raise error if chunked enabled
self._headers[hdrs.CONTENT_LENGTH] = str(value)
else:
self._headers.pop(hdrs.CONTENT_LENGTH, None)
@property
def content_type(self):
# Just a placeholder for adding setter
return super().content_type
@content_type.setter
def content_type(self, value):
self.content_type # read header values if needed
self._content_type = str(value)
self._generate_content_type_header()
@property
def charset(self):
# Just a placeholder for adding setter
return super().charset
@charset.setter
def charset(self, value):
ctype = self.content_type # read header values if needed
if ctype == 'application/octet-stream':
raise RuntimeError("Setting charset for application/octet-stream "
"doesn't make sense, setup content_type first")
if value is None:
self._content_dict.pop('charset', None)
else:
self._content_dict['charset'] = str(value).lower()
self._generate_content_type_header()
@property
def last_modified(self, _LAST_MODIFIED=hdrs.LAST_MODIFIED):
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self.headers.get(_LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None
@last_modified.setter
def last_modified(self, value):
if value is None:
self.headers.pop(hdrs.LAST_MODIFIED, None)
elif isinstance(value, (int, float)):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value)))
elif isinstance(value, datetime.datetime):
self.headers[hdrs.LAST_MODIFIED] = time.strftime(
"%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple())
elif isinstance(value, str):
self.headers[hdrs.LAST_MODIFIED] = value
@property
def tcp_nodelay(self):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot get tcp_nodelay for not prepared response"
return payload_writer.tcp_nodelay
def set_tcp_nodelay(self, value):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot set tcp_nodelay for not prepared response"
payload_writer.set_tcp_nodelay(value)
@property
def tcp_cork(self):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot get tcp_cork for not prepared response"
return payload_writer.tcp_cork
def set_tcp_cork(self, value):
payload_writer = self._payload_writer
assert payload_writer is not None, \
"Cannot set tcp_cork for not prepared response"
payload_writer.set_tcp_cork(value)
def _generate_content_type_header(self, CONTENT_TYPE=hdrs.CONTENT_TYPE):
params = '; '.join("%s=%s" % i for i in self._content_dict.items())
if params:
ctype = self._content_type + '; ' + params
else:
ctype = self._content_type
self.headers[CONTENT_TYPE] = ctype
def _do_start_compression(self, coding):
if coding != ContentCoding.identity:
self.headers[hdrs.CONTENT_ENCODING] = coding.value
self._payload_writer.enable_compression(coding.value)
self._chunked = True
def _start_compression(self, request):
if self._compression_force:
self._do_start_compression(self._compression_force)
else:
accept_encoding = request.headers.get(
hdrs.ACCEPT_ENCODING, '').lower()
for coding in ContentCoding:
if coding.value in accept_encoding:
self._do_start_compression(coding)
return
@asyncio.coroutine
def prepare(self, request):
if self._eof_sent:
return
if self._payload_writer is not None:
return self._payload_writer
yield from request._prepare_hook(self)
return self._start(request)
def _start(self, request,
HttpVersion10=HttpVersion10,
HttpVersion11=HttpVersion11,
CONNECTION=hdrs.CONNECTION,
DATE=hdrs.DATE,
SERVER=hdrs.SERVER,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH,
SET_COOKIE=hdrs.SET_COOKIE,
SERVER_SOFTWARE=SERVER_SOFTWARE,
TRANSFER_ENCODING=hdrs.TRANSFER_ENCODING):
self._req = request
keep_alive = self._keep_alive
if keep_alive is None:
keep_alive = request.keep_alive
self._keep_alive = keep_alive
version = request.version
writer = self._payload_writer = request._writer
headers = self._headers
for cookie in self._cookies.values():
value = cookie.output(header='')[1:]
headers.add(SET_COOKIE, value)
if self._compression:
self._start_compression(request)
if self._chunked:
if version != HttpVersion11:
raise RuntimeError(
"Using chunked encoding is forbidden "
"for HTTP/{0.major}.{0.minor}".format(request.version))
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
elif self._length_check:
writer.length = self.content_length
if writer.length is None and version >= HttpVersion11:
writer.enable_chunking()
headers[TRANSFER_ENCODING] = 'chunked'
if CONTENT_LENGTH in headers:
del headers[CONTENT_LENGTH]
headers.setdefault(CONTENT_TYPE, 'application/octet-stream')
headers.setdefault(DATE, request.time_service.strtime())
headers.setdefault(SERVER, SERVER_SOFTWARE)
# connection header
if CONNECTION not in headers:
if keep_alive:
if version == HttpVersion10:
headers[CONNECTION] = 'keep-alive'
else:
if version == HttpVersion11:
headers[CONNECTION] = 'close'
# status line
status_line = 'HTTP/{}.{} {} {}\r\n'.format(
version[0], version[1], self._status, self._reason)
writer.write_headers(status_line, headers)
return writer
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
raise RuntimeError("Cannot call write() after write_eof()")
if self._payload_writer is None:
raise RuntimeError("Cannot call write() before prepare()")
return self._payload_writer.write(data)
@asyncio.coroutine
def drain(self):
assert not self._eof_sent, "EOF has already been sent"
assert self._payload_writer is not None, \
"Response has not been started"
yield from self._payload_writer.drain()
@asyncio.coroutine
def write_eof(self, data=b''):
assert isinstance(data, (bytes, bytearray, memoryview)), \
"data argument must be byte-ish (%r)" % type(data)
if self._eof_sent:
return
assert self._payload_writer is not None, \
"Response has not been started"
yield from self._payload_writer.write_eof(data)
self._eof_sent = True
self._req = None
self._body_length = self._payload_writer.output_size
self._payload_writer = None
def __repr__(self):
if self._eof_sent:
info = "eof"
elif self.prepared:
info = "{} {} ".format(self._req.method, self._req.path)
else:
info = "not prepared"
return "<{} {} {}>".format(self.__class__.__name__,
self.reason, info)
class Response(StreamResponse):
def __init__(self, *, body=None, status=200,
reason=None, text=None, headers=None, content_type=None,
charset=None):
if body is not None and text is not None:
raise ValueError("body and text are not allowed together")
if headers is None:
headers = CIMultiDict()
elif not isinstance(headers, (CIMultiDict, CIMultiDictProxy)):
headers = CIMultiDict(headers)
if content_type is not None and ";" in content_type:
raise ValueError("charset must not be in content_type "
"argument")
if text is not None:
if hdrs.CONTENT_TYPE in headers:
if content_type or charset:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
# fast path for filling headers
if not isinstance(text, str):
raise TypeError("text argument must be str (%r)" %
type(text))
if content_type is None:
content_type = 'text/plain'
if charset is None:
charset = 'utf-8'
headers[hdrs.CONTENT_TYPE] = (
content_type + '; charset=' + charset)
body = text.encode(charset)
text = None
else:
if hdrs.CONTENT_TYPE in headers:
if content_type is not None or charset is not None:
raise ValueError("passing both Content-Type header and "
"content_type or charset params "
"is forbidden")
else:
if content_type is not None:
if charset is not None:
content_type += '; charset=' + charset
headers[hdrs.CONTENT_TYPE] = content_type
super().__init__(status=status, reason=reason, headers=headers)
if text is not None:
self.text = text
else:
self.body = body
@property
def body(self):
return self._body
@body.setter
def body(self, body,
CONTENT_TYPE=hdrs.CONTENT_TYPE,
CONTENT_LENGTH=hdrs.CONTENT_LENGTH):
if body is None:
self._body = None
self._body_payload = False
elif isinstance(body, (bytes, bytearray)):
self._body = body
self._body_payload = False
else:
try:
self._body = body = payload.PAYLOAD_REGISTRY.get(body)
except payload.LookupError:
raise ValueError('Unsupported body type %r' % type(body))
self._body_payload = True
headers = self._headers
# enable chunked encoding if needed
if not self._chunked and CONTENT_LENGTH not in headers:
size = body.size
if size is None:
self._chunked = True
elif CONTENT_LENGTH not in headers:
headers[CONTENT_LENGTH] = str(size)
# set content-type
if CONTENT_TYPE not in headers:
headers[CONTENT_TYPE] = body.content_type
# copy payload headers
if body.headers:
for (key, value) in body.headers.items():
if key not in headers:
headers[key] = value
@property
def text(self):
if self._body is None:
return None
return self._body.decode(self.charset or 'utf-8')
@text.setter
def text(self, text):
assert text is None or isinstance(text, str), \
"text argument must be str (%r)" % type(text)
if self.content_type == 'application/octet-stream':
self.content_type = 'text/plain'
if self.charset is None:
self.charset = 'utf-8'
self._body = text.encode(self.charset)
self._body_payload = False
@property
def content_length(self):
if self._chunked:
return None
if hdrs.CONTENT_LENGTH in self.headers:
return super().content_length
if self._body is not None:
return len(self._body)
else:
return 0
@content_length.setter
def content_length(self, value):
raise RuntimeError("Content length is set automatically")
@asyncio.coroutine
def write_eof(self):
body = self._body
if body is not None:
if (self._req._method == hdrs.METH_HEAD or
self._status in [204, 304]):
yield from super().write_eof()
elif self._body_payload:
yield from body.write(self._payload_writer)
yield from super().write_eof()
else:
yield from super().write_eof(body)
else:
yield from super().write_eof()
def _start(self, request):
if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:
if self._body is not None:
self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))
else:
self._headers[hdrs.CONTENT_LENGTH] = '0'
return super()._start(request)
def json_response(data=sentinel, *, text=None, body=None, status=200,
reason=None, headers=None, content_type='application/json',
dumps=json.dumps):
if data is not sentinel:
if text or body:
raise ValueError(
"only one of data, text, or body should be specified"
)
else:
text = dumps(data)
return Response(text=text, body=body, status=status, reason=reason,
headers=headers, content_type=content_type)
|
|
if __name__ == "__main__":
import argparse
import json
from git import Repo
import os
from multiprocessing import Pool
import numpy as np
from ..data.dataset import Dataset, NotAllign
from ..features.helpers import scale_simple, scale_named, scale_named2, scale_named4, scale_named4s
from ..features.extract_events import tv_segment
import glob
import os
import sys
from keras import backend as K
import time
parser = argparse.ArgumentParser()
parser.add_argument('--window-size', dest="window_size",
type=int, choices=[2, 4, 5, 8], default=5)
parser.add_argument('--Nbases', type=int, choices=[4, 5, 8], default=4)
parser.add_argument('--root', type=str, default="./")
parser.add_argument('--test', dest='test', action='store_true')
parser.add_argument('--size', type=int, default=20)
parser.add_argument("--metadata", type=str, default=None)
parser.add_argument("--n-cpu", dest="n_cpu", type=int, default=None)
parser.add_argument("--name", dest="name", type=str, default='dataset.pick')
parser.add_argument("--target", dest="target", type=str, default='T')
parser.add_argument("--test-set", dest="test_set", action="store_true")
parser.add_argument("--range", dest="range", nargs='+', default=[], type=float)
parser.add_argument('--weights', dest='weights', type=str, default=None)
parser.add_argument('--n-input', dest="n_input", type=int, default=1)
parser.add_argument('--n-output', dest="n_output", type=int, default=1)
parser.add_argument('--n-output-network', dest="n_output_network", type=int, default=1)
parser.add_argument('--force-clean', dest="force_clean", action="store_true")
parser.add_argument('--filter', nargs='+', dest="filter", type=str, default=[])
parser.add_argument('--ctc-length', dest="ctc_length", type=int, default=20)
parser.add_argument('--clean', dest="clean", action="store_true")
parser.add_argument('--sclean', dest="sclean", action="store_true")
parser.add_argument('--attention', dest="attention", action="store_true")
parser.add_argument('--residual', dest="res", action="store_true")
parser.add_argument('--all-datasets', nargs='+', dest="all_datasets", default=[], type=str)
parser.add_argument('--simple', dest="simple", action="store_true")
parser.add_argument('--num-threads', dest="num_threads", type=int, default=1)
parser.add_argument('--norm2', dest="norm2", action="store_true")
parser.add_argument('--maxf', dest="maxf", type=int, default=None)
parser.add_argument("--method", dest="method", choices=["FW", "TV", "TV45", "TV25", "TV5"])
parser.add_argument('--allinfos', dest='allinfos', action='store_true')
parser.add_argument('--maxleninf', dest="maxleninf", type=int, default=36)
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--maxlen', dest="maxlen", type=int, default=10000)
parser.add_argument('--correct', dest='correct', action='store_true')
parser.add_argument('--gamma', dest="gamma", type=float, default=40)
parser.add_argument('--not-normed', dest="normed", action="store_false")
parser.add_argument('--extra-output', dest='extra_output', type=int, default=0)
parser.add_argument('--info', action="store_true")
parser.add_argument('--flatten', action="store_true")
parser.add_argument('--batchnorm', dest='batchnorm', action="store_true")
parser.add_argument('--dropout', dest='dropout', default=0, type=float)
parser.add_argument("--human", dest="human", default=None, type=int)
parser.add_argument("--one", dest="one", action="store_true")
parser.add_argument('--gammas', nargs='+', dest="rg", type=int, default=[10, 20, 40, 60, 80])
# parser.add_argument("--substitution", dest="substitution", default="T", type=str)
args = parser.parse_args()
argparse_dict = vars(args)
repo = Repo("./")
argparse_dict["commit"] = str(repo.head.commit)
os.makedirs(args.root, exist_ok=True)
rac = os.path.split(args.name)[0]
if rac == '':
rac = "./"
os.makedirs(rac, exist_ok=True)
if not args.debug:
f = open(os.devnull, 'w')
sys.stdout = f
with open(rac + '/params.json', 'w') as fp:
json.dump(argparse_dict, fp, indent=True)
if args.n_cpu is not None:
n_cpu = args.n_cpu
else:
n_cpu = os.cpu_count()
root = "data/raw/20170908-R9.5/"
base_call = True
rf = None
human = False
if args.target == "T":
samf = "BTF_AG_ONT_1_FAH14273_A-select.sam"
rf = "AG-basecalled/"
if args.target == "TR":
samf = ""
rf = "../../../../../../../data/bioinfo@borvo/users/jarbona/deepnano5bases/data/raw/AG_0-10"
base_call = False
if args.target == "B":
samf = "BTF_AH_ONT_1_FAH14319_A-select.sam"
rf = "AH-basecalled/"
if args.target == "D":
samf = ""
rf = "AD-basecalled"
base_call = False
if args.target == "H_B":
samf = ""
rf = "Human-HR2/72"
base_call = False
human = True
if args.target == "H_T":
samf = ""
rf = "Human_HQ/"
base_call = False
if rf is None:
samf = ""
rf = args.target
base_call = False
D = Dataset(samfile=root + samf,
root_files=root + rf)
D.metadata = argparse_dict
D.substitution = args.target
maxlen = args.maxlen
if args.human is not None:
if args.human == 1:
human = True
print("Human")
ran = range(1, 11)
if args.test_set:
ran = range(11, 17)
if args.range != []:
ran = range(1, 17)
D.populate(maxf=args.maxf, filter_not_alligned=True,
filter_ch=ran, basecall=False, minion=False, arange=args.range,
base_call=base_call)
print("Popul", len(D.strands))
D.strands = D.strands[:args.maxf]
print("Popul", len(D.strands))
from ..models.model import build_models
def load_model():
if args.Nbases == 4:
mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4} # Modif
elif args.Nbases == 5:
mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "B": 4, "N": 5} # Modif
elif args.Nbases == 8:
mapping = {"A": 0, "C": 1, "G": 2, "T": 3, "B": 4,
"L": 5, "E": 6, "I": 7, "N": 8} # Modif
n_output_network = args.n_output_network
subseq_size = args.ctc_length
ctc_length = subseq_size
input_length = ctc_length
if n_output_network == 2:
ctc_length = 2 * subseq_size
n_feat = 4
# if args.clean:
# n_feat = 3
if args.sclean:
n_feat = 1
if args.norm2:
n_feat = 2
if args.allinfos:
n_feat = args.maxleninf
predictor, _ = build_models(args.size, nbase=args.Nbases - 4,
ctc_length=ctc_length,
input_length=None, n_output=n_output_network,
lr=1, res=args.res, attention=args.attention,
n_feat=n_feat, simple=args.simple, extra_output=args.extra_output, batchnorm=args.batchnorm,
recurrent_dropout=args.dropout, one=args.one)
if args.weights is not None:
predictor.load_weights(args.weights)
return predictor
predictor = load_model()
# except:
# load from basecall
def load_from_bc(strand, gamma):
trans = strand.get_seq(f="no_basecall", window_size=args.window_size,
method=args.method, allinfos=args.allinfos,
maxlen=args.maxleninf, minlen=1, gamma=gamma, flatten=args.flatten)
return [trans[:int(4 * maxlen)], None]
if args.allinfos:
if args.normed:
fnorm = lambda x: scale_named4(x, maxleninf=args.maxleninf)
else:
fnorm = lambda x: scale_named4s(x, maxleninf=args.maxleninf)
else:
fnorm = scale_named2
def compute_attributes(strand):
# try:
transfered = strand.transfered
# strand.transfered_bc = copy.deepcopy(transfered)
if len("".join(transfered["seq"]).replace("N", "")) > maxlen:
transfered = transfered[:maxlen]
# get the ref from transefered:
from_ntwk = "".join(transfered["seq"]).replace("N", "")
sub = "B"
prop = from_ntwk.count("T") / (from_ntwk.count("T") + from_ntwk.count(sub) + 1e-7)
ref = strand.get_ref(from_ntwk.replace(sub, "T"), correct=args.correct, human=human)
# print(ref)
if ref == "":
print("Not alligned")
return [None, len(from_ntwk) / len(transfered)]
# allign the ref on the transefered
bc_strand = from_ntwk.replace(sub, "T")
al = strand.score(bc_strand, ref, all_info=True)
# strand.score_bc_ref = al[2] / len(bc_strand)
mapped_ref, correction = strand.give_map(
"".join(transfered["seq"]).replace(sub, "T"), al[:2])
def order(s1, s2):
if prop < 0.5:
s1 = s1.replace("T", sub)
s2 = s2.replace("T", sub)
else:
s1 = s1.replace(sub, "T")
s2 = s2.replace(sub, "T")
if s1 != "N":
return s1 + s2
return s2 + s1
new_ref = np.array([order(s, s1)
for s, s1 in zip(mapped_ref[::2], mapped_ref[1::2])])
transfered["seq_ref"] = new_ref
transfered["seq_ref_correction"] = np.array([order(s, s1)
for s, s1 in zip(correction[::2], correction[1::2])])
strand.changed = True
score = strand.score("".join(transfered["seq_ref"]).replace(
"N", "").replace(sub, "T"), ref, all_info=False)
print("ALL", al[2] / len(bc_strand))
# print("".join(transfered["seq_ref"]).replace(
# "N", "").replace(sub, "T"))
return transfered, al[2] / len(bc_strand), score, len(ref)
# print(res)
Density_network = {}
B = {}
Nb = {}
All = {}
Length = {}
rg = args.rg
for gamma in rg:
Density_network[gamma] = []
B[gamma] = []
Nb[gamma] = []
All[gamma] = []
Length[gamma] = []
for istrand, s in enumerate(D.strands):
print(istrand)
t = time.time()
v = load_from_bc(s, gamma)
print("TV", time.time() - t, len(v[0]))
t = time.time()
s.transfered = v[0]
outputs = s.analyse_segmentation(predictor, fnorm(
s.transfered), no2=args.n_output_network == 2)
output = outputs[::, 0]
if len(outputs) > 2:
output2 = outputs[::, 1]
s.transfered["seq"] = [s + s2 for s, s2 in zip(output, output2)]
else:
s.transfered["seq"] = [s + "N" for s in output]
print("ADD to segments")
s.segments["seq"] = s.transfered["seq"]
print("predict", time.time() - t)
Density_network[gamma].append(
len("".join(s.transfered["seq"]).replace("N", "")) / len(s.transfered))
seq = "".join(s.transfered["seq"])
lseq = np.array([l for l in seq])
# print(lseq)
p = np.sum(lseq == "B") / (1 + np.sum(lseq == "T") + np.sum(lseq == "B"))
B[gamma].append(p)
Nb[gamma].append(len("".join(s.transfered["seq"]).replace("N", "")))
Length[gamma].append(np.mean(s.transfered["length"] * s.sl))
# Allignement
v = compute_attributes(s)
if v[0] is None:
All[gamma].append(0)
else:
All[gamma].append(v[1])
Density_network[gamma] = np.array(Density_network[gamma])
B[gamma] = np.array(B[gamma])
Nb[gamma] = np.array(Nb[gamma])
All[gamma] = np.array(All[gamma])
Length[gamma] = np.array(Length[gamma])
np.set_printoptions(precision=2, suppress=True)
print("Segment length")
for gamma in rg:
print(gamma, np.array(Length[gamma]))
print("Base Density_network")
for gamma in rg:
print(gamma, Density_network[gamma])
print("Percent B")
for gamma in rg:
print(gamma, B[gamma])
print("Number of bases")
for gamma in rg:
print(gamma, Nb[gamma])
print("Allignement")
for gamma in rg:
print(gamma, All[gamma])
print("Sampling rate", s.sl)
print("Summary")
print("Gamma,Density,Percent,Nb,Score al", "Lengt h")
def mean(v):
return float("%.2f" % np.mean(v))
for gamma in rg:
print(gamma, mean(Density_network[gamma]), mean(
B[gamma]), mean(Nb[gamma]), mean(All[gamma]), mean(Length[gamma]))
import pandas as pd
df = pd.DataFrame({"gamma": rg,
"Density_base_mean": [np.mean(Density_network[gamma]) for gamma in rg],
"Percent_mean": [np.mean(B[gamma]) for gamma in rg],
"Nb_mean": [np.mean(Nb[gamma]) for gamma in rg],
"length_mean": [np.mean(Length[gamma]) for gamma in rg],
"Al_mean": [np.mean(All[gamma]) for gamma in rg],
"Density_base": [Density_network[gamma] for gamma in rg],
"Percent": [B[gamma] for gamma in rg],
"Nb": [Nb[gamma] for gamma in rg],
"Al": [All[gamma] for gamma in rg],
"length": [Length[gamma] for gamma in rg],
"Samplingrate": [s.sl for gamma in rg]})
df.to_csv(args.name)
"""
Density_alligned = []
Length = []
t = time.time()
print("Cattr", time.time() - t)
if v[0] is not None:
s.transfered = v[0]
s.bc_score = v[1]
s.confirm_score = v[2]
s.transfered["all"] = [np.array(a, dtype=np.float16) for a in s.transfered["all"]]
s.transfered["mean"] = np.array(s.transfered["mean"], dtype=np.float16)
s.transfered["stdv"] = np.array(s.transfered["stdv"], dtype=np.float16)
s.segments = None
Density_network.append(
len("".join(s.transfered["seq"]).replace("N", "")) / len(s.transfered))
Density_alligned.append(
len("".join(s.transfered["seq_ref"]).replace("N", "")) / len(s.transfered))
Length.append(np.mean([len(t) for t in s.transfered["all"]]))
else:
s.transfered = None
Density_network.append(v[1])
Length.append(np.mean([len(t) for t in s.segments["all"]]))
"""
# print(output.shape, len(s.transfered))
"""
data_x = []
# except:
# return [None, None]
# strand.transfered_seq = transfered
# except:
# return [None, None]
import _pickle as cPickle
print("Writing on ", args.name)
with open(args.name, "wb") as fich:
cPickle.dump(D, fich)
print("End writing")
if args.info:
print("########################")
print("Infos:")
print("Sampling rate", s.sl)
print("Density of segments (network)", np.mean(Density_network))
print(Density_network)
print("Density of segments (alligned)", np.mean(Density_alligned))
print(Density_alligned)
print("lengths of segment", np.mean(Length))
print(Length)
"""
if K.backend() == 'tensorflow':
K.clear_session()
|
|
import numpy
import openravepy
import logging
from .exceptions import UnsupportedTypeSerializationException
TYPE_KEY = '__type__'
serialization_logger = logging.getLogger('prpy.serialization')
deserialization_logger = logging.getLogger('prpy.deserialization')
# Serialization.
def serialize(obj):
from numpy import ndarray
from openravepy import Environment, KinBody, Robot, Trajectory
from prpy.tsr import TSR, TSRChain
NoneType = type(None)
if isinstance(obj, (int, float, basestring, NoneType)):
return obj
elif isinstance(obj, (list, tuple)):
return [ serialize(x) for x in obj ]
elif isinstance(obj, dict):
obj = { serialize(k): serialize(v) for k, v in obj.iteritems() }
obj[TYPE_KEY] = dict.__name__
return obj
elif isinstance(obj, ndarray):
return {
TYPE_KEY: ndarray.__name__,
'data': serialize(obj.tolist())
}
elif isinstance(obj, Environment):
return {
TYPE_KEY: Environment.__name__,
'data': serialize_environment(obj)
}
elif isinstance(obj, KinBody):
return {
TYPE_KEY: KinBody.__name__,
'name': obj.GetName()
}
elif isinstance(obj, Robot):
return {
TYPE_KEY: Robot.__name__,
'name': obj.GetName()
}
elif isinstance(obj, KinBody.Link):
return {
TYPE_KEY: KinBody.Link.__name__,
'name': obj.GetName(),
'parent_name': obj.GetParent().GetName()
}
elif isinstance(obj, KinBody.Joint):
return {
TYPE_KEY: KinBody.Joint.__name__,
'name': obj.GetName(),
'parent_name': obj.GetParent().GetName()
}
elif isinstance(obj, Robot.Manipulator):
return {
TYPE_KEY: KinBody.Manipulator.__name__,
'name': obj.GetName(),
'parent_name': obj.GetParent().GetName()
}
elif isinstance(obj, Trajectory):
return {
TYPE_KEY: Trajectory.__name__,
'data': obj.serialize(0)
}
elif isinstance(obj, TSR):
return {
TYPE_KEY: TSR.__name__,
'data': obj.to_dict()
}
elif isinstance(obj, TSRChain):
return {
TYPE_KEY: TSRChain.__name__,
'data': obj.to_dict()
}
else:
raise UnsupportedTypeSerializationException(obj)
def serialize_environment(env):
return {
'bodies': [ serialize_kinbody(body) for body in env.GetBodies() ],
}
def serialize_environment_file(env, path, writer=None):
if writer is None:
import json
writer = json.dump
data = serialize_environment(env)
if path is not None:
with open(path, 'wb') as output_file:
writer(data, output_file)
serialization_logger.debug('Wrote environment to "%s".', path)
return data
def serialize_kinbody(body):
all_joints = []
all_joints.extend(body.GetJoints())
all_joints.extend(body.GetPassiveJoints())
data = {
'is_robot': body.IsRobot(),
'name': body.GetName(),
'uri': body.GetXMLFilename(),
'links': map(serialize_link, body.GetLinks()),
'joints': map(serialize_joint, all_joints),
}
data['kinbody_state'] = serialize_kinbody_state(body)
if body.IsRobot():
data.update(serialize_robot(body))
return data
def serialize_robot(robot):
return {
'manipulators': map(serialize_manipulator, robot.GetManipulators()),
'robot_state': serialize_robot_state(robot),
}
def serialize_kinbody_state(body):
data = {
name: get_fn(body)
for name, (get_fn, _) in KINBODY_STATE_MAP.iteritems()
}
link_transforms, dof_branches = body.GetLinkTransformations(True)
data.update({
'link_transforms': map(serialize_transform, link_transforms),
'dof_branches': dof_branches.tolist(),
'dof_values': body.GetDOFValues().tolist(),
})
return data
def serialize_robot_state(body):
data = {
name: get_fn(body)
for name, (get_fn, _) in ROBOT_STATE_MAP.iteritems()
}
data['grabbed_bodies'] = map(serialize_grabbed_info, body.GetGrabbedInfo())
return data
def serialize_link(link):
data = { 'info': serialize_link_info(link.GetInfo()) }
# Bodies loaded from ".kinbody.xml" do not have GeometryInfo's listed in
# their LinkInfo class. We manually read them from GetGeometries().
# TODO: This may not correctly preserve non-active geometry groups.
data['info']['_vgeometryinfos'] = [
serialize_geometry_info(geometry.GetInfo()) \
for geometry in link.GetGeometries()
]
return data
def serialize_joint(joint):
return { 'info': serialize_joint_info(joint.GetInfo()) }
def serialize_manipulator(manipulator):
return { 'info': serialize_manipulator_info(manipulator.GetInfo()) }
def serialize_with_map(obj, attribute_map):
return {
key: serialize_fn(getattr(obj, key))
for key, (serialize_fn, _) in attribute_map.iteritems()
}
def serialize_link_info(link_info):
return serialize_with_map(link_info, LINK_INFO_MAP)
def serialize_joint_info(joint_info):
return serialize_with_map(joint_info, JOINT_INFO_MAP)
def serialize_manipulator_info(manip_info):
return serialize_with_map(manip_info, MANIPULATOR_INFO_MAP)
def serialize_geometry_info(geom_info):
return serialize_with_map(geom_info, GEOMETRY_INFO_MAP)
def serialize_grabbed_info(grabbed_info):
return serialize_with_map(grabbed_info, GRABBED_INFO_MAP)
def serialize_transform(t):
from openravepy import quatFromRotationMatrix
return {
'position': list(map(float,t[0:3, 3])),
'orientation': list(map(float,quatFromRotationMatrix(t[0:3, 0:3]))),
}
# Deserialization.
def _deserialize_internal(env, data, data_type):
from numpy import array, ndarray
from openravepy import (Environment, KinBody, Robot, Trajectory,
RaveCreateTrajectory)
from prpy.tsr import TSR, TSRChain
from .exceptions import UnsupportedTypeDeserializationException
if data_type == dict.__name__:
return {
deserialize(env, k): deserialize(env, v)
for k, v in data.iteritems()
if k != TYPE_KEY
}
elif data_type == ndarray.__name__:
return array(data['data'])
elif data_type in [ KinBody.__name__, Robot.__name__ ]:
body = env.GetKinBody(data['name'])
if body is None:
raise ValueError('There is no body with name "{:s}".'.format(
data['name']))
return body
elif data_type == KinBody.Link.__name__:
body = env.GetKinBody(data['parent_name'])
if body is None:
raise ValueError('There is no body with name "{:s}".'.format(
data['parent_name']))
link = body.GetLink(data['name'])
if link is None:
raise ValueError('Body "{:s}" has no link named "{:s}".'.format(
data['parent_name'], data['name']))
return link
elif data_type == KinBody.Joint.__name__:
body = env.GetKinBody(data['parent_name'])
if body is None:
raise ValueError('There is no body with name "{:s}".'.format(
data['parent_name']))
joint = body.GetJoint(data['name'])
if joint is None:
raise ValueError('Body "{:s}" has no joint named "{:s}".'.format(
data['parent_name'], data['name']))
return joint
elif data_type == Robot.Manipulator.__name__:
body = env.GetKinBody(data['parent_name'])
if body is None:
raise ValueError('There is no robot with name "{:s}".'.format(
data['parent_name']))
elif not body.IsRobot():
raise ValueError('Body "{:s}" is not a robot.'.format(
data['parent_name']))
manip = body.GetJoint(data['name'])
if manip is None:
raise ValueError('Robot "{:s}" has no manipulator named "{:s}".'.format(
data['parent_name'], data['name']))
return manip
elif data_type == Trajectory.__name__:
traj = RaveCreateTrajectory(env, '')
traj.deserialize(data['data'])
return traj
elif data_type == TSR.__name__:
return TSR.from_dict(data['data'])
elif data_type == TSRChain.__name__:
return TSRChain.from_dict(data['data'])
else:
raise UnsupportedTypeDeserializationException(data_type)
def deserialize(env, data):
if isinstance(data, unicode):
return data.encode()
elif isinstance(data, list):
return [ deserialize(env, x) for x in data ]
elif isinstance(data, dict):
return _deserialize_internal(env, data, data.get(TYPE_KEY))
else:
return data
def deserialize_environment(data, env=None, purge=False, reuse_bodies=None):
import openravepy
if env is None:
env = openravepy.Environment()
if reuse_bodies is None:
reuse_bodies_dict = dict()
reuse_bodies_set = set()
else:
reuse_bodies_dict = { body.GetName(): body for body in reuse_bodies }
reuse_bodies_set = set(reuse_bodies)
# Release anything that's grabbed.
for body in reuse_bodies:
body.ReleaseAllGrabbed()
# Remove any extra bodies from the environment.
for body in env.GetBodies():
if body not in reuse_bodies_set:
deserialization_logger.debug('Purging body "%s".', body.GetName())
env.Remove(body)
# Deserialize the kinematic structure.
deserialized_bodies = []
for body_data in data['bodies']:
body = reuse_bodies_dict.get(body_data['name'], None)
if body is None:
body = deserialize_kinbody(env, body_data, state=False)
deserialization_logger.debug('Deserialized body "%s".', body.GetName())
deserialized_bodies.append((body, body_data))
# Restore state. We do this in a second pass to insure that any bodies that
# are grabbed already exist.
for body, body_data in deserialized_bodies:
deserialize_kinbody_state(body, body_data['kinbody_state'])
if body.IsRobot():
deserialize_robot_state(body, body_data['robot_state'])
return env
def deserialize_kinbody(env, data, name=None, anonymous=False, state=True):
from openravepy import RaveCreateKinBody, RaveCreateRobot
deserialization_logger.debug('Deserializing %s "%s".',
'Robot' if data['is_robot'] else 'KinBody',
data['name']
)
link_infos = [
deserialize_link_info(link_data['info']) \
for link_data in data['links']
]
joint_infos = [
deserialize_joint_info(joint_data['info']) \
for joint_data in data['joints']
]
if data['is_robot']:
# TODO: Also load sensors.
manipulator_infos = [
deserialize_manipulator_info(manipulator_data['info']) \
for manipulator_data in data['manipulators']
]
sensor_infos = []
kinbody = RaveCreateRobot(env, '')
kinbody.Init(
link_infos, joint_infos,
manipulator_infos, sensor_infos,
data['uri']
)
else:
kinbody = RaveCreateKinBody(env, '')
kinbody.Init(link_infos, joint_infos, data['uri'])
kinbody.SetName(name or data['name'])
env.Add(kinbody, anonymous)
if state:
deserialize_kinbody_state(kinbody, data['kinbody_state'])
if kinbody.IsRobot():
deserialize_robot_state(kinbody, data['robot_state'])
return kinbody
def deserialize_kinbody_state(body, data):
from openravepy import KinBody
deserialization_logger.debug('Deserializing "%s" KinBody state.',
body.GetName())
for key, (_, set_fn) in KINBODY_STATE_MAP.iteritems():
try:
set_fn(body, data[key])
except Exception as e:
deserialization_logger.error(
'Failed deserializing KinBody "%s" state "%s": %s',
body.GetName(), key, e.message
)
raise
body.SetLinkTransformations(
map(deserialize_transform, data['link_transforms']),
data['dof_branches']
)
def deserialize_robot_state(body, data):
deserialization_logger.debug('Deserializing "%s" Robot state.',
body.GetName())
for key, (_, set_fn) in ROBOT_STATE_MAP.iteritems():
set_fn(body, data[key])
env = body.GetEnv()
for grabbed_info_dict in data['grabbed_bodies']:
grabbed_info = deserialize_grabbed_info(grabbed_info_dict)
robot_link = body.GetLink(grabbed_info._robotlinkname)
robot_links_to_ignore = grabbed_info._setRobotLinksToIgnore
grabbed_body = env.GetKinBody(grabbed_info._grabbedname)
grabbed_pose = numpy.dot(robot_link.GetTransform(),
grabbed_info._trelative)
grabbed_body.SetTransform(grabbed_pose)
body.Grab(grabbed_body, robot_link, robot_links_to_ignore)
def deserialize_with_map(obj, data, attribute_map):
for key, (_, deserialize_fn) in attribute_map.iteritems():
setattr(obj, key, deserialize_fn(data[key]))
return obj
def deserialize_link_info(data):
from openravepy import KinBody
return deserialize_with_map(KinBody.LinkInfo(), data, LINK_INFO_MAP)
def deserialize_joint_info(data):
from openravepy import KinBody
return deserialize_with_map(KinBody.JointInfo(), data, JOINT_INFO_MAP)
def deserialize_manipulator_info(data):
from openravepy import Robot
return deserialize_with_map(Robot.ManipulatorInfo(), data, MANIPULATOR_INFO_MAP)
def deserialize_geometry_info(data):
from openravepy import KinBody
geom_info = deserialize_with_map(
KinBody.GeometryInfo(), data, GEOMETRY_INFO_MAP)
# OpenRAVE only has a ReadTrimeshURI method on Environment. We create a
# static, dummy environment (mesh_environment) just to load meshes.
if geom_info._filenamecollision:
geom_info._meshcollision = mesh_environment.ReadTrimeshURI(
geom_info._filenamecollision)
return geom_info
def deserialize_grabbed_info(data):
from openravepy import Robot
return deserialize_with_map(Robot.GrabbedInfo(), data, GRABBED_INFO_MAP)
def deserialize_transform(data):
from openravepy import matrixFromQuat
t = matrixFromQuat(data['orientation'])
t[0:3, 3] = data['position']
return t
# Schema.
mesh_environment = openravepy.Environment()
identity = lambda x: x
str_identity = (
lambda x: x,
lambda x: x.encode()
)
both_identity = (
lambda x: x,
lambda x: x
)
numpy_identity = (
lambda x: x.tolist(),
lambda x: numpy.array(x)
)
transform_identity = (
serialize_transform,
deserialize_transform
)
KINBODY_STATE_MAP = {
'description': (
lambda x: x.GetDescription(),
lambda x, value: x.SetDescription(value),
),
'link_enable_states': (
lambda x: x.GetLinkEnableStates().tolist(),
lambda x, value: x.SetLinkEnableStates(value)
),
'link_velocities': (
lambda x: x.GetLinkVelocities().tolist(),
lambda x, value: x.SetLinkVelocities(value),
),
'transform': (
lambda x: serialize_transform(x.GetTransform()),
lambda x, value: x.SetTransform(deserialize_transform(value)),
),
'dof_weights': (
lambda x: x.GetDOFWeights().tolist(),
lambda x, value: x.SetDOFWeights(value),
),
'dof_resolutions': (
lambda x: x.GetDOFResolutions().tolist(),
lambda x, value: x.SetDOFResolutions(value),
),
'dof_position_limits': (
lambda x: [ limits.tolist() for limits in x.GetDOFLimits() ],
lambda x, (lower, upper): x.SetDOFLimits(lower, upper),
),
'dof_velocity_limits': (
lambda x: x.GetDOFVelocityLimits().tolist(),
lambda x, value: x.SetDOFVelocityLimits(value),
),
'dof_acceleration_limits': (
lambda x: x.GetDOFAccelerationLimits().tolist(),
lambda x, value: x.SetDOFAccelerationLimits(value),
),
'dof_torque_limits': (
lambda x: x.GetDOFTorqueLimits().tolist(),
lambda x, value: x.SetDOFTorqueLimits(value),
),
# TODO: What about link accelerations and geometry groups?
}
ROBOT_STATE_MAP = {
# TODO: Does this preserve affine DOFs?
'active_dof_indices': (
lambda x: x.GetActiveDOFIndices().tolist(),
lambda x, value: x.SetActiveDOFs(value)
),
'active_manipulator': (
lambda x: x.GetActiveManipulator().GetName(),
lambda x, value: x.SetActiveManipulator(value),
),
}
LINK_INFO_MAP = {
'_bIsEnabled': both_identity,
'_bStatic': both_identity,
'_mapFloatParameters': both_identity,
'_mapIntParameters': both_identity,
'_mapStringParameters': both_identity, # TODO
'_mass': both_identity,
'_name': str_identity,
'_t': transform_identity,
'_tMassFrame': transform_identity,
'_vForcedAdjacentLinks': both_identity,
'_vgeometryinfos': (
lambda x: map(serialize_geometry_info, x),
lambda x: map(deserialize_geometry_info, x),
),
'_vinertiamoments': numpy_identity,
}
JOINT_INFO_MAP = {
'_bIsActive': both_identity,
'_bIsCircular': both_identity,
'_linkname0': str_identity,
'_linkname1': str_identity,
'_mapFloatParameters': both_identity,
'_mapIntParameters': both_identity,
'_mapStringParameters': both_identity, # TODO
'_name': str_identity,
'_type': (
lambda x: x.name,
lambda x: openravepy.KinBody.JointType.names[x].encode()
),
'_vanchor': numpy_identity,
'_vaxes': (
lambda x: [ xi.tolist() for xi in x ],
lambda x: map(numpy.array, x)
),
'_vcurrentvalues': numpy_identity,
'_vhardmaxvel': numpy_identity,
'_vlowerlimit': numpy_identity,
'_vmaxaccel': numpy_identity,
'_vmaxinertia': numpy_identity,
'_vmaxtorque': numpy_identity,
'_vmaxvel': numpy_identity,
'_vmimic': both_identity,
'_voffsets': numpy_identity,
'_vresolution': numpy_identity,
'_vupperlimit': numpy_identity,
'_vweights': numpy_identity,
}
GEOMETRY_INFO_MAP = {
'_bModifiable': both_identity,
'_bVisible': both_identity,
'_fTransparency': both_identity,
'_filenamecollision': str_identity,
'_filenamerender': str_identity,
'_t': transform_identity,
'_type': (
lambda x: x.name,
lambda x: openravepy.GeometryType.names[x]
),
'_vAmbientColor': numpy_identity,
'_vCollisionScale': numpy_identity,
'_vDiffuseColor': numpy_identity,
'_vGeomData': numpy_identity,
'_vRenderScale': numpy_identity,
# TODO: What are these?
#'_mapExtraGeometries': None
#15 is not JSON serializable
#'_trajfollow': None,
}
MANIPULATOR_INFO_MAP = {
'_name': str_identity,
'_sBaseLinkName': str_identity,
'_sEffectorLinkName': str_identity,
'_sIkSolverXMLId': str_identity,
'_tLocalTool': transform_identity,
'_vChuckingDirection': numpy_identity,
'_vClosingDirection': numpy_identity,
'_vGripperJointNames': both_identity, # TODO
'_vdirection': numpy_identity,
}
GRABBED_INFO_MAP = {
'_grabbedname': str_identity,
'_robotlinkname': str_identity,
'_setRobotLinksToIgnore': both_identity, # TODO
'_trelative': transform_identity,
}
|
|
##
# Copyright (c) 2005-2014 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: Wilfredo Sanchez, wsanchez@apple.com
##
import os
from twisted.cred.portal import Portal
from txweb2 import responsecode
from txweb2.auth import basic
from txweb2.stream import MemoryStream
from txweb2.dav.util import davXMLFromStream
from txweb2.dav.auth import TwistedPasswordProperty, IPrincipal, DavRealm, TwistedPropertyChecker, AuthenticationWrapper
from txweb2.dav.fileop import rmdir
from txweb2.test.test_server import SimpleRequest
from txweb2.dav.test.util import Site, serialize
from txweb2.dav.test.test_resource import \
TestDAVPrincipalResource, TestPrincipalsCollection
from txdav.xml import element
import txweb2.dav.test.util
class ACL(txweb2.dav.test.util.TestCase):
"""
RFC 3744 (WebDAV ACL) tests.
"""
def createDocumentRoot(self):
docroot = self.mktemp()
os.mkdir(docroot)
userResource = TestDAVPrincipalResource("/principals/users/user01")
userResource.writeDeadProperty(TwistedPasswordProperty("user01"))
principalCollection = TestPrincipalsCollection(
"/principals/",
children={"users": TestPrincipalsCollection(
"/principals/users/",
children={"user01": userResource})})
rootResource = self.resource_class(
docroot, principalCollections=(principalCollection,))
portal = Portal(DavRealm())
portal.registerChecker(TwistedPropertyChecker())
credentialFactories = (basic.BasicCredentialFactory(""),)
loginInterfaces = (IPrincipal,)
self.site = Site(AuthenticationWrapper(
rootResource,
portal,
credentialFactories,
credentialFactories,
loginInterfaces
))
rootResource.setAccessControlList(self.grant(element.All()))
for name, acl in (
("none" , self.grant()),
("read" , self.grant(element.Read())),
("read-write" , self.grant(element.Read(), element.Write())),
("unlock" , self.grant(element.Unlock())),
("all" , self.grant(element.All())),
):
filename = os.path.join(docroot, name)
if not os.path.isfile(filename):
file(filename, "w").close()
resource = self.resource_class(filename)
resource.setAccessControlList(acl)
for name, acl in (
("nobind" , self.grant()),
("bind" , self.grant(element.Bind())),
("unbind" , self.grant(element.Bind(), element.Unbind())),
):
dirname = os.path.join(docroot, name)
if not os.path.isdir(dirname):
os.mkdir(dirname)
resource = self.resource_class(dirname)
resource.setAccessControlList(acl)
return docroot
def restore(self):
# Get rid of whatever messed up state the test has now so that we'll
# get a fresh docroot. This isn't very cool; tests should be doing
# less so that they don't need a fresh copy of this state.
if hasattr(self, "_docroot"):
rmdir(self._docroot)
del self._docroot
def test_COPY_MOVE_source(self):
"""
Verify source access controls during COPY and MOVE.
"""
def work():
dst_path = os.path.join(self.docroot, "copy_dst")
dst_uri = "/" + os.path.basename(dst_path)
for src, status in (
("nobind", responsecode.FORBIDDEN),
("bind", responsecode.FORBIDDEN),
("unbind", responsecode.CREATED),
):
src_path = os.path.join(self.docroot, "src_" + src)
src_uri = "/" + os.path.basename(src_path)
if not os.path.isdir(src_path):
os.mkdir(src_path)
src_resource = self.resource_class(src_path)
src_resource.setAccessControlList({
"nobind": self.grant(),
"bind" : self.grant(element.Bind()),
"unbind": self.grant(element.Bind(), element.Unbind())
}[src])
for name, acl in (
("none" , self.grant()),
("read" , self.grant(element.Read())),
("read-write" , self.grant(element.Read(), element.Write())),
("unlock" , self.grant(element.Unlock())),
("all" , self.grant(element.All())),
):
filename = os.path.join(src_path, name)
if not os.path.isfile(filename):
file(filename, "w").close()
self.resource_class(filename).setAccessControlList(acl)
for method in ("COPY", "MOVE"):
for name, code in (
("none" , {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("read" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("read-write" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
("unlock" , {"COPY": responsecode.FORBIDDEN, "MOVE": status}[method]),
("all" , {"COPY": responsecode.CREATED, "MOVE": status}[method]),
):
path = os.path.join(src_path, name)
uri = src_uri + "/" + name
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", dst_uri)
_add_auth_header(request)
def test(response, code=code, path=path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_COPY_MOVE_dest(self):
"""
Verify destination access controls during COPY and MOVE.
"""
def work():
src_path = os.path.join(self.docroot, "read")
uri = "/" + os.path.basename(src_path)
for method in ("COPY", "MOVE"):
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.CREATED),
("unbind" , responsecode.CREATED),
):
dst_parent_path = os.path.join(self.docroot, name)
dst_path = os.path.join(dst_parent_path, "dst")
request = SimpleRequest(self.site, method, uri)
request.headers.setHeader("destination", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, dst_path=dst_path):
if os.path.isfile(dst_path):
os.remove(dst_path)
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
self.restore()
return serialize(self.send, work())
def test_DELETE(self):
"""
Verify access controls during DELETE.
"""
def work():
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.FORBIDDEN),
("unbind" , responsecode.NO_CONTENT),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
file(path, "w").close()
request = SimpleRequest(self.site, "DELETE", "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "DELETE", name)
yield (request, test)
return serialize(self.send, work())
def test_UNLOCK(self):
"""
Verify access controls during UNLOCK of unowned lock.
"""
raise NotImplementedError()
test_UNLOCK.todo = "access controls on UNLOCK unimplemented"
def test_MKCOL_PUT(self):
"""
Verify access controls during MKCOL.
"""
for method in ("MKCOL", "PUT"):
def work():
for name, code in (
("nobind" , responsecode.FORBIDDEN),
("bind" , responsecode.CREATED),
("unbind" , responsecode.CREATED),
):
collection_path = os.path.join(self.docroot, name)
path = os.path.join(collection_path, "dst")
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
os.rmdir(path)
request = SimpleRequest(self.site, method, "/" + name + "/dst")
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def test_PUT_exists(self):
"""
Verify access controls during PUT of existing file.
"""
def work():
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , responsecode.FORBIDDEN),
("read-write" , responsecode.NO_CONTENT),
("unlock" , responsecode.FORBIDDEN),
("all" , responsecode.NO_CONTENT),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PUT", "/" + name)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PUT", name)
yield (request, test)
return serialize(self.send, work())
def test_PROPFIND(self):
"""
Verify access controls during PROPFIND.
"""
raise NotImplementedError()
test_PROPFIND.todo = "access controls on PROPFIND unimplemented"
def test_PROPPATCH(self):
"""
Verify access controls during PROPPATCH.
"""
def work():
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , responsecode.FORBIDDEN),
("read-write" , responsecode.MULTI_STATUS),
("unlock" , responsecode.FORBIDDEN),
("all" , responsecode.MULTI_STATUS),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, "PROPPATCH", "/" + name)
request.stream = MemoryStream(
element.WebDAVDocument(element.PropertyUpdate()).toxml()
)
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, "PROPPATCH", name)
yield (request, test)
return serialize(self.send, work())
def test_GET_REPORT(self):
"""
Verify access controls during GET and REPORT.
"""
def work():
for method in ("GET", "REPORT"):
if method == "GET":
ok = responsecode.OK
elif method == "REPORT":
ok = responsecode.MULTI_STATUS
else:
raise AssertionError("We shouldn't be here. (method = %r)" % (method,))
for name, code in (
("none" , responsecode.FORBIDDEN),
("read" , ok),
("read-write" , ok),
("unlock" , responsecode.FORBIDDEN),
("all" , ok),
):
path = os.path.join(self.docroot, name)
request = SimpleRequest(self.site, method, "/" + name)
if method == "REPORT":
request.stream = MemoryStream(element.PrincipalPropertySearch().toxml())
_add_auth_header(request)
def test(response, code=code, path=path):
if response.code != code:
return self.oops(request, response, code, method, name)
yield (request, test)
return serialize(self.send, work())
def oops(self, request, response, code, method, name):
def gotResponseData(doc):
if doc is None:
doc_xml = None
else:
doc_xml = doc.toxml()
def fail(acl):
self.fail("Incorrect status code %s (!= %s) for %s of resource %s with %s ACL: %s\nACL: %s"
% (response.code, code, method, request.uri, name, doc_xml, acl.toxml()))
def getACL(resource):
return resource.accessControlList(request)
d = request.locateResource(request.uri)
d.addCallback(getACL)
d.addCallback(fail)
return d
d = davXMLFromStream(response.stream)
d.addCallback(gotResponseData)
return d
def _add_auth_header(request):
request.headers.setHeader(
"authorization",
("basic", "user01:user01".encode("base64"))
)
|
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import constants as const
from neutron import context as n_context
from neutron.db import api as db_api
from neutron.openstack.common import log as logging
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.l2pop import config # noqa
from neutron.plugins.ml2.drivers.l2pop import db as l2pop_db
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
from neutron.openstack.common import jsonutils
LOG = logging.getLogger(__name__)
class L2populationMechanismDriver(api.MechanismDriver,
l2pop_db.L2populationDbMixin):
def __init__(self):
super(L2populationMechanismDriver, self).__init__()
self.L2populationAgentNotify = l2pop_rpc.L2populationAgentNotifyAPI()
def initialize(self):
LOG.debug(_("Experimental L2 population driver"))
self.rpc_ctx = n_context.get_admin_context_without_session()
self.migrated_ports = {}
def _get_port_fdb_entries(self, port):
return [[port['mac_address'],
ip['ip_address'], port['device_owner']] for ip in port['fixed_ips']]
def delete_port_postcommit(self, context):
port = context.current
agent_host = context.host
fdb_entries = self._update_port_down(context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(self.rpc_ctx,
fdb_entries)
def _get_diff_ips(self, orig, port):
orig_ips = set([ip['ip_address'] for ip in orig['fixed_ips']])
port_ips = set([ip['ip_address'] for ip in port['fixed_ips']])
# check if an ip has been added or removed
orig_chg_ips = orig_ips.difference(port_ips)
port_chg_ips = port_ips.difference(orig_ips)
if orig_chg_ips or port_chg_ips:
return orig_chg_ips, port_chg_ips
def _fixed_ips_changed(self, context, orig, port, diff_ips):
orig_ips, port_ips = diff_ips
if (port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
agent_host = context.host
else:
agent_host = context.original_host
port_infos = self._get_port_infos(
context, orig, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
orig_mac_ip = [[port['mac_address'], ip, port['device_owner']]
for ip in orig_ips]
port_mac_ip = [[port['mac_address'], ip, port['device_owner']]
for ip in port_ips]
upd_fdb_entries = {port['network_id']: {agent_ip: {}}}
ports = upd_fdb_entries[port['network_id']][agent_ip]
if orig_mac_ip:
ports['before'] = orig_mac_ip
if port_mac_ip:
ports['after'] = port_mac_ip
self.L2populationAgentNotify.update_fdb_entries(
self.rpc_ctx, {'chg_ip': upd_fdb_entries})
return True
def update_port_postcommit(self, context):
port = context.current
orig = context.original
diff_ips = self._get_diff_ips(orig, port)
if diff_ips:
self._fixed_ips_changed(context, orig, port, diff_ips)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
if context.status == const.PORT_STATUS_DOWN:
agent_host = context.host
fdb_entries = self._update_port_down(
context, port, agent_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif (context.host != context.original_host
and context.status == const.PORT_STATUS_ACTIVE
and not self.migrated_ports.get(orig['id'])):
# The port has been migrated. We have to store the original
# binding to send appropriate fdb once the port will be set
# on the destination host
self.migrated_ports[orig['id']] = (
(orig, context.original_host))
elif context.status != context.original_status:
if context.status == const.PORT_STATUS_ACTIVE:
self._update_port_up(context)
elif context.status == const.PORT_STATUS_DOWN:
fdb_entries = self._update_port_down(
context, port, context.host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
elif context.status == const.PORT_STATUS_BUILD:
orig = self.migrated_ports.pop(port['id'], None)
if orig:
original_port = orig[0]
original_host = orig[1]
# this port has been migrated: remove its entries from fdb
fdb_entries = self._update_port_down(
context, original_port, original_host)
self.L2populationAgentNotify.remove_fdb_entries(
self.rpc_ctx, fdb_entries)
def _get_port_infos(self, context, port, agent_host):
if not agent_host:
return
session = db_api.get_session()
agent = self.get_agent_by_host(session, agent_host)
if not agent:
return
if (port['binding:profile'].get('host_ip')):
agent_ip = self.get_host_ip_from_binding_profile(port)
else:
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
agent_ip = "127.0.0.1"
else:
agent_ip = self.get_agent_ip(agent)
if not agent_ip:
LOG.warning(_("Unable to retrieve the agent ip, check the agent "
"configuration."))
return
segment = context.bound_segment
if not segment:
LOG.warning(_("Port %(port)s updated by agent %(agent)s "
"isn't bound to any segment"),
{'port': port['id'], 'agent': agent})
return
network_types = self.get_agent_l2pop_network_types(agent)
if network_types is None:
network_types = self.get_agent_tunnel_types(agent)
if segment['network_type'] not in network_types:
return
fdb_entries = self._get_port_fdb_entries(port)
return agent, agent_host, agent_ip, segment, fdb_entries
def _update_port_up(self, context):
port = context.current
agent_host = context.host
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 1 or (
self.get_agent_uptime(agent) < cfg.CONF.l2pop.agent_boot_time):
# First port activated on current agent in this network,
# we have to provide it with the whole list of fdb entries
agent_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {}}}
ports = agent_fdb_entries[network_id]['ports']
nondvr_network_ports = self.get_nondvr_network_ports(session,
network_id)
for network_port in nondvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
profile = binding.profile
if(profile and jsonutils.loads(profile).get('host_ip', None)):
ip = self.get_host_ip_from_binding_profile_str(profile)
else:
ip = self.get_agent_ip(agent)
if not ip:
LOG.debug(_("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration."),
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
agent_ports += self._get_port_fdb_entries(binding.port)
ports[ip] = agent_ports
dvr_network_ports = self.get_dvr_network_ports(session, network_id)
for network_port in dvr_network_ports:
binding, agent = network_port
if agent.host == agent_host:
continue
ip = self.get_agent_ip(agent)
if not ip:
LOG.debug("Unable to retrieve the agent ip, check "
"the agent %(agent_host)s configuration.",
{'agent_host': agent.host})
continue
agent_ports = ports.get(ip, [const.FLOODING_ENTRY])
ports[ip] = agent_ports
# And notify other agents to add flooding entry
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
if ports.keys():
self.L2populationAgentNotify.add_fdb_entries(
self.rpc_ctx, agent_fdb_entries, agent_host)
# Notify other agents to add fdb rule for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
other_fdb_entries[network_id]['ports'][agent_ip] += (
port_fdb_entries)
self.L2populationAgentNotify.add_fdb_entries(self.rpc_ctx,
other_fdb_entries)
def _update_port_down(self, context, port, agent_host):
port_infos = self._get_port_infos(context, port, agent_host)
if not port_infos:
return
agent, agent_host, agent_ip, segment, port_fdb_entries = port_infos
network_id = port['network_id']
session = db_api.get_session()
agent_active_ports = self.get_agent_network_active_port_count(
session, agent_host, network_id)
other_fdb_entries = {network_id:
{'segment_id': segment['segmentation_id'],
'network_type': segment['network_type'],
'ports': {agent_ip: []}}}
if agent_active_ports == 0:
# Agent is removing its last activated port in this network,
# other agents needs to be notified to delete their flooding entry.
other_fdb_entries[network_id]['ports'][agent_ip].append(
const.FLOODING_ENTRY)
# Notify other agents to remove fdb rules for current port
if port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE:
fdb_entries = port_fdb_entries
other_fdb_entries[network_id]['ports'][agent_ip] += fdb_entries
return other_fdb_entries
|
|
#!/usr/bin/env python3
#============================================================================
# Copyright (C) Microsoft Corporation, All rights reserved.
#============================================================================
from contextlib import contextmanager
import os
import sys
import imp
import re
import codecs
import shutil
import json
import fileinput
import hashlib
import collections
protocol = imp.load_source('protocol', '../protocol.py')
nxDSCLog = imp.load_source('nxDSCLog', '../nxDSCLog.py')
LG = nxDSCLog.DSCLog
RESOURCE_MODULE_PATH = '/opt/microsoft/omsconfig/modules/nxOMSWLI/DSCResources/MSFT_nxOMSWLIResource/WLI/'
PLUGIN_RPATH = 'plugins/'
CONF_RPATH = 'conf/'
PLUGIN_DEST_PATH = '/opt/microsoft/omsagent/plugin/'
CONF_DEST_PATH_PREFIX = '/etc/opt/microsoft/omsagent/'
CONF_DEST_PATH_SUFFIX = '/conf/omsagent.d'
BLOCK_SIZE = 8192
class IOMSAgent:
def restart_oms_agent(self):
pass
class OMSAgentUtil(IOMSAgent):
def restart_oms_agent(self, workspaceId):
wsId = workspaceId
if wsId is None:
wsId = ''
if os.system('sudo /opt/microsoft/omsagent/bin/service_control restart %s' %(wsId)) == 0:
return True
else:
LG().Log(LogType.Error, 'Error restarting omsagent for workspace ' + wsId)
return False
OMS_ACTION = OMSAgentUtil()
class WLISettings:
workload_name = ""
ensure = ""
plugin_loc = ""
config_cmd = ""
require_sudo = False
def __init__(self, settingsDict):
if 'WorkloadName' in settingsDict:
self.workload_name = settingsDict['WorkloadName'].encode('ascii', 'ignore').decode('utf-8')
if 'Ensure' in settingsDict:
self.ensure = settingsDict['Ensure'].encode('ascii', 'ignore').decode('utf-8')
if 'Plugin' in settingsDict:
self.plugin_loc = settingsDict['Plugin'].encode('ascii', 'ignore').decode('utf-8')
if 'ConfigCommand' in settingsDict:
self.config_cmd = settingsDict['ConfigCommand'].encode('ascii', 'ignore').decode('utf-8')
self.require_sudo = WLISettings.get_utf8_value(settingsDict["RequireSudo"])
def Set_Marshall(WorkspaceId, Configuration):
WorkspaceId = WorkspaceId.encode('ascii', 'ignore').decode('utf-8')
config_ary = json.loads(Configuration)['WLIConfiguration']
wli_settings = Get_WLI_Settings(config_ary)
for ws in wli_settings:
retval = Set(WorkspaceId, ws)
if not retval:
return [-1]
return [0]
def Test_Marshall(WorkspaceId, Configuration):
WorkspaceId = WorkspaceId.encode('ascii', 'ignore').decode('utf-8')
config_ary = json.loads(Configuration)['WLIConfiguration']
wli_settings = Get_WLI_Settings(config_ary)
for ws in wli_settings:
retval = Test(WorkspaceId, ws)
if not retval:
return [-1]
return [0]
def Get_Marshall(WorkspaceId, Configuration):
arg_names = list(locals().keys())
WorkspaceId = WorkspaceId.encode('ascii', 'ignore').decode('utf-8')
retval = 0
local_workloads = Get(WorkspaceId)
Configuration = protocol.MI_String(str(local_workloads))
WorkspaceId = protocol.MI_String(WorkspaceId)
retd = {}
ld = locals()
for k in arg_names:
retd[k] = ld[k]
return retval, retd
def Get(wid):
workloads = []
resource_conf_path = RESOURCE_MODULE_PATH.__add__(CONF_RPATH)
sub_dirs = []
dirs = os.listdir(resource_conf_path)
for sub_dir in dirs:
if os.path.isdir(os.path.join(resource_conf_path, sub_dir)):
sub_dirs.append(sub_dir)
for sub_dir in sub_dirs:
retval = Check_All_Files(wid, os.path.join(resource_conf_path, sub_dir), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX)
if retval:
workloads.append({'WorkloadName': sub_dir, 'Ensure': 'Present'})
else:
workloads.append({'WorkloadName': sub_dir, 'Ensure': 'Absent'})
return workloads
def Get_WLI_Settings(config_array):
wli_config = []
for config in config_array:
wli_setting = WLISettings(config)
wli_config.append(wli_setting)
return wli_config
def Set(wid, wli_setting):
retval = True
if wli_setting.ensure == 'Present':
retval &= Update_Plugin_Files(wid, wli_setting.plugin_loc)
retval &= Update_Conf_Files(wid, wli_setting.workload_name)
else:
retval &= Delete_All_Files(RESOURCE_MODULE_PATH.__add__(CONF_RPATH).__add__(wli_setting.workload_name), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX, True)
retval &= OMS_ACTION.restart_oms_agent(wid)
return retval
def Test(wid, wli_setting):
retval = True
if wli_setting.ensure == 'Present':
retval &= Check_All_Files(wid, RESOURCE_MODULE_PATH.__add__(PLUGIN_RPATH).__add__(wli_setting.plugin_loc), PLUGIN_DEST_PATH)
retval &= Check_All_Files(wid, RESOURCE_MODULE_PATH.__add__(CONF_RPATH).__add__(wli_setting.workload_name), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX)
else:
retval &= not Check_All_Files(wid, RESOURCE_MODULE_PATH.__add__(CONF_RPATH).__add__(wli_setting.workload_name), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX)
return retval
def Update_Plugin_Files(wid, plugin_loc):
retval = True
#replace files
retval &= Delete_All_Files(RESOURCE_MODULE_PATH.__add__(PLUGIN_RPATH).__add__(plugin_loc), PLUGIN_DEST_PATH)
retval &= Copy_All_Files(RESOURCE_MODULE_PATH.__add__(PLUGIN_RPATH).__add__(plugin_loc), PLUGIN_DEST_PATH)
return retval
def Update_Conf_Files(wid, workload_name):
retval = True
retval &= Delete_All_Files(RESOURCE_MODULE_PATH.__add__(CONF_RPATH).__add__(workload_name), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX)
retval &= Copy_All_Files(RESOURCE_MODULE_PATH.__add__(CONF_RPATH).__add__(workload_name), CONF_DEST_PATH_PREFIX + wid + CONF_DEST_PATH_SUFFIX)
return retval
def Copy_All_Files(src, dest):
try:
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
dest_file_name = os.path.join(dest, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dest)
ext = os.path.splitext(dest_file_name)[1]
if(len(ext) > 0 and ext == '.conf'):
for line in fileinput.FileInput(dest_file_name, inplace=True):
print(line.replace("%CONF_DIR_WS%", dest))
except:
LG().Log('Error', 'copy_all_files failed for src: ' + src + ' dest: ' + dest + ' with exception: ' + str(sys.exc_info()[0]))
return False
return True
def Delete_All_Files(src, dest, conf_only=False):
try:
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(dest, file_name)
if (os.path.isfile(full_file_name) and not conf_only):
os.remove(full_file_name)
elif (os.path.isfile(full_file_name) and conf_only):
file_ext = os.path.splitext(full_file_name)[1]
if(len(file_ext) > 0 and file_ext == '.conf'):
os.remove(full_file_name)
except:
LG().Log('Error', 'delete_all_files failed for src: ' + src + ' dest: ' + dest + ' with exception: ' + str(sys.exc_info()[0]))
return False
return True
def Check_All_Files(wid, src, dest):
try:
src_files = os.listdir(src)
for file_name in src_files:
src_file_path = os.path.join(src, file_name)
dest_file_path = os.path.join(dest, file_name)
tmp_file_path = '/tmp/wli_' + wid + '/' + file_name
if os.path.isfile(dest_file_path):
if Compare_Files(wid, dest_file_path, src_file_path, 'sha256', tmp_file_path) == -1:
if (os.path.isfile(tmp_file_path)):
os.remove(tmp_file_path)
return False
else:
if (os.path.isfile(tmp_file_path)):
os.remove(tmp_file_path)
else:
return False
return True
except:
LG().Log('ERROR', 'check_all_files failed for src: ' + src + ' dest: ' + dest + ' with error ' + str(sys.exc_info()[0]))
return False
def Create_Tmp_Conf_File(wid, conf_file, dest):
try:
tmp_dir = '/tmp/wli_' + wid
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
file_name = os.path.basename(conf_file)
LG().Log('INFO', "FILE: " + file_name + " Dir: " + str(os.path.join(tmp_dir, file_name)))
if (os.path.isfile(conf_file)):
shutil.copy(conf_file, tmp_dir)
for line in fileinput.FileInput(os.path.join(tmp_dir, file_name), inplace=True):
print(line.replace("%CONF_DIR_WS%", os.path.dirname(dest)))
return True
except:
LG().Log('Error', 'create_tmp_conf_file failed for conf: ' + conf_file + ' with exception: ' + str(sys.exc_info()[0]))
return False
def Compare_Files(wid, DestinationPath, SourcePath, Checksum, TmpPath):
"""
If the files differ in size, return -1.
Reading and computing the hash here is done in a block-by-block manner,
in case the file is quite large.
"""
src_file_ext = os.path.splitext(SourcePath)[1]
if(len(src_file_ext) > 0 and src_file_ext == '.conf'):
if Create_Tmp_Conf_File(wid, SourcePath, DestinationPath):
SourcePath = TmpPath
if SourcePath == DestinationPath: # Files are the same!
return 0
stat_dest = StatFile(DestinationPath)
stat_src = StatFile(SourcePath)
if stat_src.st_size != stat_dest.st_size:
LG().Log('INFO', 'Size src: ' + str(SourcePath) + ' dest: ' + str(DestinationPath))
return -1
if Checksum == 'sha256':
src_error = None
dest_error = None
with opened_bin_w_error(SourcePath, 'rb') as (src_file, src_error):
if src_error:
print_error('Exception opening source file ' + SourcePath
+ ' Error : ' + str(src_error))
return -1
with opened_bin_w_error(DestinationPath, 'rb') as (dest_file,
dest_error):
if dest_error:
print_error('Exception opening destination file '
+ DestinationPath + ' Error Code: '
+ str(dest_error.errno) + ' Error: '
+ dest_error.strerror)
return -1
return are_binary_file_contents_same(src_file, dest_file)
elif Checksum == 'ctime':
if stat_src.st_ctime != stat_dest.st_ctime:
return -1
else:
return 0
elif Checksum == 'mtime':
if stat_src.st_mtime != stat_dest.st_mtime:
return -1
else:
return 0
def are_binary_file_contents_same(f1, f2):
while True:
block1 = f1.read(BLOCK_SIZE)
block2 = f2.read(BLOCK_SIZE)
# both at EOF is success
if block1 == '' and block2 == '':
return 0
# bytewise mismatch is failure
if block1 != block2:
return -1
def StatFile(path):
"""
Stat the file, following the symlink.
"""
d = None
error = None
try:
d = os.stat(path)
except (OSError, IOError) as error:
LG().Log('Exception stating file ' + path + ' Error: '
+ str(error))
return d
@contextmanager
def opened_bin_w_error(filename, mode='rb'):
"""
This context ensures the file is closed.
"""
try:
f = open(filename, mode)
except IOError as err:
yield None, err
else:
try:
yield f, None
finally:
f.close()
|
|
import braintree
from django.conf import settings
from django.test import TestCase
from django.utils.unittest.case import skipIf
from billing import get_gateway, CreditCard
from billing.signals import *
from billing.gateway import CardNotSupported, InvalidData
from billing.utils.credit_card import Visa
@skipIf(not settings.MERCHANT_SETTINGS.get("braintree_payments", None), "gateway not configured")
class BraintreePaymentsGatewayTestCase(TestCase):
def setUp(self):
self.merchant = get_gateway("braintree_payments")
self.merchant.test_mode = True
self.credit_card = CreditCard(first_name="Test", last_name="User",
month=10, year=2020,
number="4111111111111111",
verification_value="100")
def assertBraintreeResponseSuccess(self, resp, msg=None):
if resp['status'] == "FAILURE":
standardMsg = resp['response'].message
self.fail(self._formatMessage(msg, standardMsg))
else:
self.assertEquals(resp['status'], "SUCCESS")
def assertBraintreeResponseFailure(self, resp, msg=None):
self.assertEquals(resp['status'], "FAILURE")
def testCardSupported(self):
self.credit_card.number = "5019222222222222"
self.assertRaises(CardNotSupported,
lambda: self.merchant.purchase(1000, self.credit_card))
def testCardType(self):
self.merchant.validate_card(self.credit_card)
self.assertEquals(self.credit_card.card_type, Visa)
def testPurchase(self):
resp = self.merchant.purchase(5, self.credit_card)
self.assertBraintreeResponseSuccess(resp)
def testFailedPurchase(self):
resp = self.merchant.purchase(2001, self.credit_card)
self.assertBraintreeResponseFailure(resp)
def testDeclinedPurchase(self):
resp = self.merchant.purchase(2900, self.credit_card)
self.assertBraintreeResponseFailure(resp)
def testPaymentSuccessfulSignal(self):
received_signals = []
def receive(sender, **kwargs):
received_signals.append(kwargs.get("signal"))
transaction_was_successful.connect(receive)
resp = self.merchant.purchase(1, self.credit_card)
self.assertEquals(received_signals, [transaction_was_successful])
def testPaymentUnSuccessfulSignal(self):
received_signals = []
def receive(sender, **kwargs):
received_signals.append(kwargs.get("signal"))
transaction_was_unsuccessful.connect(receive)
resp = self.merchant.purchase(2000, self.credit_card)
self.assertEquals(received_signals, [transaction_was_unsuccessful])
def testCreditCardExpired(self):
credit_card = CreditCard(first_name="Test", last_name="User",
month=10, year=2011,
number="4000111111111115",
verification_value="100")
resp = self.merchant.purchase(2004, credit_card)
self.assertNotEquals(resp["status"], "SUCCESS")
def testAuthorizeAndCapture(self):
resp = self.merchant.authorize(100, self.credit_card)
self.assertBraintreeResponseSuccess(resp)
resp = self.merchant.capture(50, resp["response"].transaction.id)
self.assertBraintreeResponseSuccess(resp)
# Need a way to test this. Requires delaying the status to either
# "settled" or "settling"
# def testAuthorizeAndRefund(self):
# resp = self.merchant.purchase(100, self.credit_card)
# self.assertEquals(resp["status"], "SUCCESS")
# response = self.merchant.credit(50, resp["response"].transaction.id)
# self.assertEquals(response["status"], "SUCCESS")
def testAuthorizeAndVoid(self):
resp = self.merchant.authorize(105, self.credit_card)
self.assertBraintreeResponseSuccess(resp)
resp = self.merchant.void(resp["response"].transaction.id)
self.assertBraintreeResponseSuccess(resp)
def testStoreMissingCustomer(self):
self.assertRaises(InvalidData,
lambda: self.merchant.store(self.credit_card, {}))
def testStoreWithoutBillingAddress(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
}
resp = self.merchant.store(self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
self.assertEquals(resp["response"].customer.credit_cards[0].expiration_date,
"%s/%s" % (self.credit_card.month,
self.credit_card.year))
self.assertTrue(getattr(resp["response"].customer.credit_cards[0], "customer_id"))
self.assertTrue(getattr(resp["response"].customer.credit_cards[0], "token"))
def testStoreWithBillingAddress(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
"billing_address": {
"name": "Johnny Doe",
"company": "",
"email": "johnny.doe@example.com",
"address1": "Street #1",
"address2": "House #2",
"city": "Timbuktu",
"country": "United States of America",
"zip": "110011"
}
}
resp = self.merchant.store(self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
self.assertTrue(getattr(resp["response"].customer.credit_cards[0], "billing_address"))
billing_address = resp["response"].customer.credit_cards[0].billing_address
self.assertEquals(billing_address.country_code_alpha2, "US")
self.assertEquals(billing_address.postal_code, "110011")
self.assertEquals(billing_address.street_address, "Street #1")
self.assertEquals(billing_address.extended_address, "House #2")
self.assertEquals(billing_address.locality, "Timbuktu")
def testUnstore(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
}
resp = self.merchant.store(self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
resp = self.merchant.unstore(resp["response"].customer.credit_cards[0].token)
self.assertBraintreeResponseSuccess(resp)
# The below tests require 'test_plan' to be created in the sandbox
# console panel. This cannot be created by API at the moment
def testRecurring1(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
"recurring": {
"plan_id": "test_plan"
},
}
resp = self.merchant.recurring(10, self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
subscription = resp["response"].subscription
self.assertEquals(subscription.status,
braintree.Subscription.Status.Active)
def testRecurring2(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
"recurring": {
"plan_id": "test_plan",
"price": 15
},
}
resp = self.merchant.recurring(15, self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
subscription = resp["response"].subscription
self.assertEquals(subscription.price, 15)
def testRecurring3(self):
options = {
"customer": {
"name": "John Doe",
"email": "john.doe@example.com",
},
"recurring": {
"plan_id": "test_plan",
"trial_duration": 2,
"trial_duration_unit": "month",
"number_of_billing_cycles": 12,
},
}
resp = self.merchant.recurring(20, self.credit_card, options=options)
self.assertBraintreeResponseSuccess(resp)
subscription = resp["response"].subscription
self.assertEquals(subscription.number_of_billing_cycles, 12)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubRouteTableV2SOperations(object):
"""VirtualHubRouteTableV2SOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
"""Retrieves the details of a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHubRouteTableV2, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.VirtualHubRouteTableV2
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualHubRouteTableV2"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_route_table_v2_parameters, 'VirtualHubRouteTableV2')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
virtual_hub_route_table_v2_parameters, # type: "_models.VirtualHubRouteTableV2"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualHubRouteTableV2"]
"""Creates a VirtualHubRouteTableV2 resource if it doesn't exist else updates the existing
VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:param virtual_hub_route_table_v2_parameters: Parameters supplied to create or update
VirtualHubRouteTableV2.
:type virtual_hub_route_table_v2_parameters: ~azure.mgmt.network.v2019_09_01.models.VirtualHubRouteTableV2
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualHubRouteTableV2 or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_09_01.models.VirtualHubRouteTableV2]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHubRouteTableV2"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
virtual_hub_route_table_v2_parameters=virtual_hub_route_table_v2_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHubRouteTableV2', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
route_table_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a VirtualHubRouteTableV2.
:param resource_group_name: The resource group name of the VirtualHubRouteTableV2.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param route_table_name: The name of the VirtualHubRouteTableV2.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_hub_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ListVirtualHubRouteTableV2SResult"]
"""Retrieves the details of all VirtualHubRouteTableV2s.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubRouteTableV2SResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.ListVirtualHubRouteTableV2SResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubRouteTableV2SResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubRouteTableV2SResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routeTables'} # type: ignore
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common credentials classes and constructors."""
from __future__ import print_function
import datetime
import json
import os
import threading
import httplib2
import oauth2client
import oauth2client.client
import oauth2client.gce
import oauth2client.locked_file
import oauth2client.multistore_file
import oauth2client.service_account
from oauth2client import tools # for gflags declarations
from six.moves import http_client
from six.moves import urllib
from apitools.base.py import exceptions
from apitools.base.py import util
try:
# pylint: disable=wrong-import-order
import gflags
FLAGS = gflags.FLAGS
except ImportError:
FLAGS = None
__all__ = [
'CredentialsFromFile',
'GaeAssertionCredentials',
'GceAssertionCredentials',
'GetCredentials',
'GetUserinfo',
'ServiceAccountCredentials',
'ServiceAccountCredentialsFromFile',
]
# Lock when accessing the cache file to avoid resource contention.
cache_file_lock = threading.Lock()
def SetCredentialsCacheFileLock(lock):
global cache_file_lock # pylint: disable=global-statement
cache_file_lock = lock
# List of additional methods we use when attempting to construct
# credentials. Users can register their own methods here, which we try
# before the defaults.
_CREDENTIALS_METHODS = []
def _RegisterCredentialsMethod(method, position=None):
"""Register a new method for fetching credentials.
This new method should be a function with signature:
client_info, **kwds -> Credentials or None
This method can be used as a decorator, unless position needs to
be supplied.
Note that method must *always* accept arbitrary keyword arguments.
Args:
method: New credential-fetching method.
position: (default: None) Where in the list of methods to
add this; if None, we append. In all but rare cases,
this should be either 0 or None.
Returns:
method, for use as a decorator.
"""
if position is None:
position = len(_CREDENTIALS_METHODS)
else:
position = min(position, len(_CREDENTIALS_METHODS))
_CREDENTIALS_METHODS.insert(position, method)
return method
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
credentials_filename=None,
api_key=None, # pylint: disable=unused-argument
client=None, # pylint: disable=unused-argument
oauth2client_args=None,
**kwds):
"""Attempt to get credentials, using an oauth dance as the last resort."""
scopes = util.NormalizeScopes(scopes)
client_info = {
'client_id': client_id,
'client_secret': client_secret,
'scope': ' '.join(sorted(scopes)),
'user_agent': user_agent or '%s-generated/0.1' % package_name,
}
for method in _CREDENTIALS_METHODS:
credentials = method(client_info, **kwds)
if credentials is not None:
return credentials
credentials_filename = credentials_filename or os.path.expanduser(
'~/.apitools.token')
credentials = CredentialsFromFile(credentials_filename, client_info,
oauth2client_args=oauth2client_args)
if credentials is not None:
return credentials
raise exceptions.CredentialsError('Could not create valid credentials')
def ServiceAccountCredentialsFromFile(
service_account_name, private_key_filename, scopes,
service_account_kwargs=None):
with open(private_key_filename) as key_file:
return ServiceAccountCredentials(
service_account_name, key_file.read(), scopes,
service_account_kwargs=service_account_kwargs)
def ServiceAccountCredentials(service_account_name, private_key, scopes,
service_account_kwargs=None):
service_account_kwargs = service_account_kwargs or {}
scopes = util.NormalizeScopes(scopes)
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name, private_key, scopes, **service_account_kwargs)
def _EnsureFileExists(filename):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(filename):
old_umask = os.umask(0o177)
try:
open(filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True
def _GceMetadataRequest(relative_url, use_metadata_ip=False):
"""Request the given url from the GCE metadata service."""
if use_metadata_ip:
base_url = 'http://169.254.169.254/'
else:
base_url = 'http://metadata.google.internal/'
url = base_url + 'computeMetadata/v1/' + relative_url
# Extra header requirement can be found here:
# https://developers.google.com/compute/docs/metadata
headers = {'Metadata-Flavor': 'Google'}
request = urllib.request.Request(url, headers=headers)
opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
try:
response = opener.open(request)
except urllib.error.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
return response
class GceAssertionCredentials(oauth2client.gce.AppAssertionCredentials):
"""Assertion credentials for GCE instances."""
def __init__(self, scopes=None, service_account_name='default', **kwds):
"""Initializes the credentials instance.
Args:
scopes: The scopes to get. If None, whatever scopes that are
available to the instance are used.
service_account_name: The service account to retrieve the scopes
from.
**kwds: Additional keyword args.
"""
# If there is a connectivity issue with the metadata server,
# detection calls may fail even if we've already successfully
# identified these scopes in the same execution. However, the
# available scopes don't change once an instance is created,
# so there is no reason to perform more than one query.
self.__service_account_name = service_account_name
cached_scopes = None
cache_filename = kwds.get('cache_filename')
if cache_filename:
cached_scopes = self._CheckCacheFileForMatch(
cache_filename, scopes)
scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)
if cache_filename and not cached_scopes:
self._WriteCacheFile(cache_filename, scopes)
super(GceAssertionCredentials, self).__init__(scopes, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name,
}
with cache_file_lock:
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
cached_creds_str = locked_file.file_handle().read()
if cached_creds_str:
# Cached credentials metadata dict.
cached_creds = json.loads(cached_creds_str)
if (creds['svc_acct_name'] ==
cached_creds['svc_acct_name']):
if (creds['scopes'] in
(None, cached_creds['scopes'])):
scopes = cached_creds['scopes']
finally:
locked_file.unlock_and_close()
return scopes
def _WriteCacheFile(self, cache_filename, scopes):
"""Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
"""
with cache_file_lock:
if _EnsureFileExists(cache_filename):
locked_file = oauth2client.locked_file.LockedFile(
cache_filename, 'r+b', 'rb')
try:
locked_file.open_and_lock()
if locked_file.is_locked():
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)),
'svc_acct_name': self.__service_account_name}
locked_file.file_handle().write(
json.dumps(creds, encoding='ascii'))
# If it's not locked, the locking process will
# write the same data to the file, so just
# continue.
finally:
locked_file.unlock_and_close()
def _ScopesFromMetadataServer(self, scopes):
if not util.DetectGce():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
if not self.GetServiceAccount(self.__service_account_name):
raise exceptions.ResourceUnavailableError(
'GCE credentials requested but service account '
'%s does not exist.' % self.__service_account_name)
if scopes:
scope_ls = util.NormalizeScopes(scopes)
instance_scopes = self.GetInstanceScopes()
if scope_ls > instance_scopes:
raise exceptions.CredentialsError(
'Instance did not have access to scopes %s' % (
sorted(list(scope_ls - instance_scopes)),))
else:
scopes = self.GetInstanceScopes()
return scopes
def GetServiceAccount(self, account):
relative_url = 'instance/service-accounts'
response = _GceMetadataRequest(relative_url)
response_lines = [line.rstrip('/\n\r')
for line in response.readlines()]
return account in response_lines
def GetInstanceScopes(self):
relative_url = 'instance/service-accounts/{0}/scopes'.format(
self.__service_account_name)
response = _GceMetadataRequest(relative_url)
return util.NormalizeScopes(scope.strip()
for scope in response.readlines())
def _refresh(self, do_request):
"""Refresh self.access_token.
This function replaces AppAssertionCredentials._refresh, which
does not use the credential store and is therefore poorly
suited for multi-threaded scenarios.
Args:
do_request: A function matching httplib2.Http.request's signature.
"""
# pylint: disable=protected-access
oauth2client.client.OAuth2Credentials._refresh(self, do_request)
# pylint: enable=protected-access
def _do_refresh_request(self, unused_http_request):
"""Refresh self.access_token by querying the metadata server.
If self.store is initialized, store acquired credentials there.
"""
relative_url = 'instance/service-accounts/{0}/token'.format(
self.__service_account_name)
try:
response = _GceMetadataRequest(relative_url)
except exceptions.CommunicationError:
self.invalid = True
if self.store:
self.store.locked_put(self)
raise
content = response.read()
try:
credential_info = json.loads(content)
except ValueError:
raise exceptions.CredentialsError(
'Could not parse response as JSON: %s' % content)
self.access_token = credential_info['access_token']
if 'expires_in' in credential_info:
expires_in = int(credential_info['expires_in'])
self.token_expiry = (
datetime.timedelta(seconds=expires_in) +
datetime.datetime.utcnow())
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
kwargs = {}
if 'cache_filename' in data.get('kwargs', []):
kwargs['cache_filename'] = data['kwargs']['cache_filename']
credentials = GceAssertionCredentials(scopes=[data['scope']],
**kwargs)
if 'access_token' in data:
credentials.access_token = data['access_token']
if 'token_expiry' in data:
credentials.token_expiry = datetime.datetime.strptime(
data['token_expiry'], oauth2client.client.EXPIRY_FORMAT)
if 'invalid' in data:
credentials.invalid = data['invalid']
return credentials
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
# TODO(craigcitro): Currently, we can't even *load*
# `oauth2client.appengine` without being on appengine, because of how
# it handles imports. Fix that by splitting that module into
# GAE-specific and GAE-independent bits, and guarding imports.
class GaeAssertionCredentials(oauth2client.client.AssertionCredentials):
"""Assertion credentials for Google App Engine apps."""
def __init__(self, scopes, **kwds):
if not util.DetectGae():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
self._scopes = list(util.NormalizeScopes(scopes))
super(GaeAssertionCredentials, self).__init__(None, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return GaeAssertionCredentials(data['_scopes'])
def _refresh(self, _):
"""Refresh self.access_token.
Args:
_: (ignored) A function matching httplib2.Http.request's signature.
"""
# pylint: disable=import-error
from google.appengine.api import app_identity
try:
token, _ = app_identity.get_access_token(self._scopes)
except app_identity.Error as e:
raise exceptions.CredentialsError(str(e))
self.access_token = token
def _GetRunFlowFlags(args=None):
# There's one rare situation where gsutil will not have argparse
# available, but doesn't need anything depending on argparse anyway,
# since they're bringing their own credentials. So we just allow this
# to fail with an ImportError in those cases.
#
# TODO(craigcitro): Move this import back to the top when we drop
# python 2.6 support (eg when gsutil does).
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
# Get command line argparse flags.
flags, _ = parser.parse_known_args(args=args)
# Allow `gflags` and `argparse` to be used side-by-side.
if hasattr(FLAGS, 'auth_host_name'):
flags.auth_host_name = FLAGS.auth_host_name
if hasattr(FLAGS, 'auth_host_port'):
flags.auth_host_port = FLAGS.auth_host_port
if hasattr(FLAGS, 'auth_local_webserver'):
flags.noauth_local_webserver = (not FLAGS.auth_local_webserver)
return flags
# TODO(craigcitro): Switch this from taking a path to taking a stream.
def CredentialsFromFile(path, client_info, oauth2client_args=None):
"""Read credentials from a file."""
credential_store = oauth2client.multistore_file.get_credential_storage(
path,
client_info['client_id'],
client_info['user_agent'],
client_info['scope'])
if hasattr(FLAGS, 'auth_local_webserver'):
FLAGS.auth_local_webserver = False
credentials = credential_store.get()
if credentials is None or credentials.invalid:
print('Generating new OAuth credentials ...')
for _ in range(20):
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
flags = _GetRunFlowFlags(args=oauth2client_args)
credentials = tools.run_flow(flow, credential_store, flags)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because
# you reused a token.
print('Invalid authorization: %s' % (e,))
except httplib2.HttpLib2Error as e:
print('Communication error: %s' % (e,))
raise exceptions.CredentialsError(
'Communication error creating credentials: %s' % e)
return credentials
# TODO(craigcitro): Push this into oauth2client.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name
"""Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
"""
http = http or httplib2.Http()
url_root = 'https://www.googleapis.com/oauth2/v2/tokeninfo'
query_args = {'access_token': credentials.access_token}
url = '?'.join((url_root, urllib.parse.urlencode(query_args)))
# We ignore communication woes here (i.e. SSL errors, socket
# timeout), as handling these should be done in a common location.
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
response, content = http.request(url)
return json.loads(content or '{}') # Save ourselves from an empty reply.
@_RegisterCredentialsMethod
def _GetServiceAccountCredentials(
client_info, service_account_name=None, service_account_keyfile=None,
service_account_json_keyfile=None, **unused_kwds):
if ((service_account_name and not service_account_keyfile) or
(service_account_keyfile and not service_account_name)):
raise exceptions.CredentialsError(
'Service account name or keyfile provided without the other')
scopes = client_info['scope'].split()
user_agent = client_info['user_agent']
if service_account_json_keyfile:
with open(service_account_json_keyfile) as keyfile:
service_account_info = json.load(keyfile)
account_type = service_account_info.get('type')
if account_type != oauth2client.client.SERVICE_ACCOUNT:
raise exceptions.CredentialsError(
'Invalid service account credentials: %s' % (
service_account_json_keyfile,))
# pylint: disable=protected-access
credentials = oauth2client.service_account._ServiceAccountCredentials(
service_account_id=service_account_info['client_id'],
service_account_email=service_account_info['client_email'],
private_key_id=service_account_info['private_key_id'],
private_key_pkcs8_text=service_account_info['private_key'],
scopes=scopes, user_agent=user_agent)
# pylint: enable=protected-access
return credentials
if service_account_name is not None:
# pylint: disable=redefined-variable-type
credentials = ServiceAccountCredentialsFromFile(
service_account_name, service_account_keyfile, scopes,
service_account_kwargs={'user_agent': user_agent})
if credentials is not None:
return credentials
@_RegisterCredentialsMethod
def _GetGaeServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GaeAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetGceServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GceAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetApplicationDefaultCredentials(
client_info, skip_application_default_credentials=False,
**unused_kwds):
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files()
except oauth2client.client.ApplicationDefaultCredentialsError:
return None
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if not isinstance(credentials, gc) or cp in scopes:
return credentials
return None
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from urllib import urlencode
class HTTPError(Exception):
default_status = 500
def __init__(self, status=None, body=None, exception=None,
traceback=None, **more_headers):
self.exception = exception
self.traceback = traceback
###############################################################################
# Routing
###############################################################################
class RouteError(Exception):
""" This is a base class for all routing related exceptions """
class RouteReset(RouteError):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError):
pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))', lambda m: m.group(0) if
len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
default_pattern = '[^/]+'
default_filter = 're'
# The current CPython regexp implementation does not allow more
# than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
# If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf: (_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)
}
rule_syntax = re.compile('(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>)')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if prefix:
yield prefix, None, None
name, filtr, conf = g
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix + rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default':
mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter:
filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name:
self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error as e:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, e))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x + maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder:
raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons):
query['anon%d' % i] = value
url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url + '?' + urlencode(query)
except KeyError as e:
raise RouteBuildError('Missing URL argument: %r' % e.args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
if __name__ == "__main__":
router = Router()
def view(request):
pass
# router.add('/object/detail/', 'GET', view)
# router.add('/object/<action>/<item>', 'GET', view)
router.add('/<its>/<:re:.+>/<test>/<name:re:[a-z]+>/', 'GET', view)
env = {'PATH_INFO': '/object/get/line/abc/', 'REQUEST_METHOD': 'GET'}
target, args = router.match(env)
print target, args
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_put_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
encryption_scope_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"encryptionScopeName": _SERIALIZER.url("encryption_scope_name", encryption_scope_name, 'str', max_length=63, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_patch_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
encryption_scope_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"encryptionScopeName": _SERIALIZER.url("encryption_scope_name", encryption_scope_name, 'str', max_length=63, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
encryption_scope_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
"encryptionScopeName": _SERIALIZER.url("encryption_scope_name", encryption_scope_name, 'str', max_length=63, min_length=3),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
resource_group_name: str,
account_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes')
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
"accountName": _SERIALIZER.url("account_name", account_name, 'str', max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', min_length=1),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class EncryptionScopesOperations(object):
"""EncryptionScopesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def put(
self,
resource_group_name: str,
account_name: str,
encryption_scope_name: str,
encryption_scope: "_models.EncryptionScope",
**kwargs: Any
) -> "_models.EncryptionScope":
"""Synchronously creates or updates an encryption scope under the specified storage account. If an
encryption scope is already created and a subsequent request is issued with different
properties, the encryption scope properties will be updated per the specified request.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param encryption_scope_name: The name of the encryption scope within the specified storage
account. Encryption scope names must be between 3 and 63 characters in length and use numbers,
lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and
followed by a letter or number.
:type encryption_scope_name: str
:param encryption_scope: Encryption scope properties to be used for the create or update.
:type encryption_scope: ~azure.mgmt.storage.v2021_02_01.models.EncryptionScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EncryptionScope, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.EncryptionScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(encryption_scope, 'EncryptionScope')
request = build_put_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
encryption_scope_name=encryption_scope_name,
content_type=content_type,
json=_json,
template_url=self.put.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('EncryptionScope', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('EncryptionScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}'} # type: ignore
@distributed_trace
def patch(
self,
resource_group_name: str,
account_name: str,
encryption_scope_name: str,
encryption_scope: "_models.EncryptionScope",
**kwargs: Any
) -> "_models.EncryptionScope":
"""Update encryption scope properties as specified in the request body. Update fails if the
specified encryption scope does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param encryption_scope_name: The name of the encryption scope within the specified storage
account. Encryption scope names must be between 3 and 63 characters in length and use numbers,
lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and
followed by a letter or number.
:type encryption_scope_name: str
:param encryption_scope: Encryption scope properties to be used for the update.
:type encryption_scope: ~azure.mgmt.storage.v2021_02_01.models.EncryptionScope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EncryptionScope, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.EncryptionScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(encryption_scope, 'EncryptionScope')
request = build_patch_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
encryption_scope_name=encryption_scope_name,
content_type=content_type,
json=_json,
template_url=self.patch.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EncryptionScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
encryption_scope_name: str,
**kwargs: Any
) -> "_models.EncryptionScope":
"""Returns the properties for the specified encryption scope.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param encryption_scope_name: The name of the encryption scope within the specified storage
account. Encryption scope names must be between 3 and 63 characters in length and use numbers,
lower-case letters and dash (-) only. Every dash (-) character must be immediately preceded and
followed by a letter or number.
:type encryption_scope_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EncryptionScope, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_02_01.models.EncryptionScope
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionScope"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
encryption_scope_name=encryption_scope_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EncryptionScope', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes/{encryptionScopeName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> Iterable["_models.EncryptionScopeListResult"]:
"""Lists all the encryption scopes available under the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EncryptionScopeListResult or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_02_01.models.EncryptionScopeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EncryptionScopeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("EncryptionScopeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/encryptionScopes'} # type: ignore
|
|
# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
# :Author: a Pygments author|contributor; Felix Wiemann; Guenter Milde
# :Date: $Date$
# :Copyright: This module has been placed in the public domain.
#
# This is a merge of `Using Pygments in ReST documents`_ from the pygments_
# documentation, and a `proof of concept`_ by Felix Wiemann.
#
# ========== ===========================================================
# 2007-06-01 Removed redundancy from class values.
# 2007-06-04 Merge of successive tokens of same type
# (code taken from pygments.formatters.others).
# 2007-06-05 Separate docutils formatter script
# Use pygments' CSS class names (like the html formatter)
# allowing the use of pygments-produced style sheets.
# 2007-06-07 Merge in the formatting of the parsed tokens
# (misnamed as docutils_formatter) as class DocutilsInterface
# 2007-06-08 Failsave implementation (fallback to a standard literal block
# if pygments not found)
# ========== ===========================================================
#
# ::
"""Define and register a code-block directive using pygments"""
# Requirements
# ------------
# ::
import codecs
from docutils import nodes
from docutils.parsers.rst import directives
try:
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.formatters.html import _get_ttype_class
except ImportError:
pass
from log import log
# Customisation
# -------------
#
# Do not insert inline nodes for the following tokens.
# (You could add e.g. Token.Punctuation like ``['', 'p']``.) ::
unstyled_tokens = ['']
# DocutilsInterface
# -----------------
#
# This interface class combines code from
# pygments.formatters.html and pygments.formatters.others.
#
# It does not require anything of docutils and could also become a part of
# pygments::
class DocutilsInterface(object):
"""Parse `code` string and yield "classified" tokens.
Arguments
code -- string of source code to parse
language -- formal language the code is written in.
Merge subsequent tokens of the same token-type.
Yields the tokens as ``(ttype_class, value)`` tuples,
where ttype_class is taken from pygments.token.STANDARD_TYPES and
corresponds to the class argument used in pygments html output.
"""
def __init__(self, code, language, custom_args={}):
self.code = code
self.language = language
self.custom_args = custom_args
def lex(self):
# Get lexer for language (use text as fallback)
try:
if self.language and unicode(self.language).lower() <> 'none':
lexer = get_lexer_by_name(self.language.lower(),
**self.custom_args
)
else:
lexer = get_lexer_by_name('text', **self.custom_args)
except ValueError:
log.info("no pygments lexer for %s, using 'text'" \
% self.language)
# what happens if pygment isn't present ?
lexer = get_lexer_by_name('text')
return pygments.lex(self.code, lexer)
def join(self, tokens):
"""join subsequent tokens of same token-type
"""
tokens = iter(tokens)
(lasttype, lastval) = tokens.next()
for ttype, value in tokens:
if ttype is lasttype:
lastval += value
else:
yield(lasttype, lastval)
(lasttype, lastval) = (ttype, value)
yield(lasttype, lastval)
def __iter__(self):
"""parse code string and yield "clasified" tokens
"""
try:
tokens = self.lex()
except IOError:
log.info("Pygments lexer not found, using fallback")
# TODO: write message to INFO
yield ('', self.code)
return
for ttype, value in self.join(tokens):
yield (_get_ttype_class(ttype), value)
# code_block_directive
# --------------------
# ::
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Parse and classify content of a code_block."""
if 'include' in options:
try:
if 'encoding' in options:
encoding = options['encoding']
else:
encoding = 'utf-8'
content = codecs.open(options['include'], 'r', encoding).read().rstrip()
except (IOError, UnicodeError): # no file or problem finding it or reading it
log.error('Error reading file: "%s" L %s' % (options['include'], lineno))
content = u''
line_offset = 0
if content:
# here we define the start-at and end-at options
# so that limit is included in extraction
# this is different than the start-after directive of docutils
# (docutils/parsers/rst/directives/misc.py L73+)
# which excludes the beginning
# the reason is we want to be able to define a start-at like
# def mymethod(self)
# and have such a definition included
after_text = options.get('start-at', None)
if after_text:
# skip content in include_text before *and NOT incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-at" option of "%s" '
'code-block directive:\nText not found.' % options['start-at'])
content = content[after_index:]
line_offset = len(content[:after_index].splitlines())
after_text = options.get('start-after', None)
if after_text:
# skip content in include_text before *and incl.* a matching text
after_index = content.find(after_text)
if after_index < 0:
raise state_machine.reporter.severe('Problem with "start-after" option of "%s" '
'code-block directive:\nText not found.' % options['start-after'])
line_offset = len(content[:after_index + len(after_text)].splitlines())
content = content[after_index + len(after_text):]
# same changes here for the same reason
before_text = options.get('end-at', None)
if before_text:
# skip content in include_text after *and incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-at" option of "%s" '
'code-block directive:\nText not found.' % options['end-at'])
content = content[:before_index + len(before_text)]
before_text = options.get('end-before', None)
if before_text:
# skip content in include_text after *and NOT incl.* a matching text
before_index = content.find(before_text)
if before_index < 0:
raise state_machine.reporter.severe('Problem with "end-before" option of "%s" '
'code-block directive:\nText not found.' % options['end-before'])
content = content[:before_index]
else:
content = u'\n'.join(content)
if 'tabsize' in options:
tabw = options['tabsize']
else:
tabw = int(options.get('tab-width', 8))
content = content.replace('\t',' '*tabw)
withln = "linenos" in options
if not "linenos_offset" in options:
line_offset = 0
language = arguments[0]
# create a literal block element and set class argument
code_block = nodes.literal_block(classes=["code", language])
if withln:
lineno = 1 + line_offset
total_lines = content.count('\n') + 1 + line_offset
lnwidth = len(str(total_lines))
fstr = "\n%%%dd " % lnwidth
code_block += nodes.inline(fstr[1:] % lineno, fstr[1:] % lineno, classes=['linenumber'])
# parse content with pygments and add to code_block element
for cls, value in DocutilsInterface(content, language, options):
if withln and "\n" in value:
# Split on the "\n"s
values = value.split("\n")
# The first piece, pass as-is
code_block += nodes.Text(values[0], values[0])
# On the second and later pieces, insert \n and linenos
linenos = range(lineno, lineno + len(values))
for chunk, ln in zip(values, linenos)[1:]:
if ln <= total_lines:
code_block += nodes.inline(fstr % ln, fstr % ln, classes=['linenumber'])
code_block += nodes.Text(chunk, chunk)
lineno += len(values) - 1
elif cls in unstyled_tokens:
# insert as Text to decrease the verbosity of the output.
code_block += nodes.Text(value, value)
else:
code_block += nodes.inline(value, value, classes=["pygments-" + cls])
return [code_block]
# Custom argument validators
# --------------------------
# ::
#
# Move to separated module??
def string_list(argument):
"""
Converts a space- or comma-separated list of values into a python list
of strings.
(Directive option conversion function)
Based in positive_int_list of docutils.parsers.rst.directives
"""
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return entries
def string_bool(argument):
"""
Converts True, true, False, False in python boolean values
"""
if argument is None:
msg = 'argument required but none supplied; choose from "True" or "False"'
raise ValueError(msg)
elif argument.lower() == 'true':
return True
elif argument.lower() == 'false':
return False
else:
raise ValueError('"%s" unknown; choose from "True" or "False"'
% argument)
def csharp_unicodelevel(argument):
return directives.choice(argument, ('none', 'basic', 'full'))
def lhs_litstyle(argument):
return directives.choice(argument, ('bird', 'latex'))
def raw_compress(argument):
return directives.choice(argument, ('gz', 'bz2'))
# Register Directive
# ------------------
# ::
code_block_directive.arguments = (1, 0, 1)
code_block_directive.content = 1
code_block_directive.options = {'include': directives.unchanged_required,
'start-at': directives.unchanged_required,
'end-at': directives.unchanged_required,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
'linenos': directives.unchanged,
'linenos_offset': directives.unchanged,
'tab-width': directives.unchanged,
# generic
'stripnl' : string_bool,
'stripall': string_bool,
'ensurenl': string_bool,
'tabsize' : directives.positive_int,
'encoding': directives.encoding,
# Lua
'func_name_hightlighting':string_bool,
'disabled_modules': string_list,
# Python Console
'python3': string_bool,
# Delphi
'turbopascal':string_bool,
'delphi' :string_bool,
'freepascal': string_bool,
'units': string_list,
# Modula2
'pim' : string_bool,
'iso' : string_bool,
'objm2' : string_bool,
'gm2ext': string_bool,
# CSharp
'unicodelevel' : csharp_unicodelevel,
# Literate haskell
'litstyle' : lhs_litstyle,
# Raw
'compress': raw_compress,
# Rst
'handlecodeblocks': string_bool,
# Php
'startinline': string_bool,
'funcnamehighlighting': string_bool,
'disabledmodules': string_list,
}
# .. _doctutils: http://docutils.sf.net/
# .. _pygments: http://pygments.org/
# .. _Using Pygments in ReST documents: http://pygments.org/docs/rstdirective/
# .. _proof of concept:
# http://article.gmane.org/gmane.text.docutils.user/3689
#
# Test output
# -----------
#
# If called from the command line, call the docutils publisher to render the
# input::
if __name__ == '__main__':
from docutils.core import publish_cmdline, default_description
from docutils.parsers.rst import directives
directives.register_directive('code-block', code_block_directive)
description = "code-block directive test output" + default_description
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except Exception:
pass
publish_cmdline(writer_name='html', description=description)
|
|
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
import _imp
import os
import re
import sys
from .errors import DistutilsPlatformError
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCbuild/win32 or project/PCbuild/amd64.
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
project_base = os.path.dirname(os.path.abspath(sys.executable))
if (os.name == 'nt' and
project_base.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))):
project_base = os.path.dirname(os.path.dirname(project_base))
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
# Setup.local is available for Makefile builds including VPATH builds,
# Setup.dist is available on Windows
def _is_python_source_dir(d):
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if (_sys_home and os.name == 'nt' and
_sys_home.lower().endswith(('\\pcbuild\\win32', '\\pcbuild\\amd64'))):
_sys_home = os.path.dirname(os.path.dirname(_sys_home))
def _python_build():
if _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(project_base)
python_build = _python_build()
# Calculate the build qualifier flags if they are defined. Adding the flags
# to the include and lib directories only makes sense for an installation, not
# an in-source build.
build_flags = ''
try:
if not python_build:
build_flags = sys.abiflags
except AttributeError:
# It's not a configure-based build, so the sys module doesn't have
# this attribute, which is fine.
pass
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return '%d.%d' % sys.version_info[:2]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
if os.name == "posix":
if python_build:
# Assume the executable is in the build directory. The
# pyconfig.h file should be in the same directory. Since
# the build directory may not be the source directory, we
# must use "srcdir" from the makefile to find the "Include"
# directory.
if plat_specific:
return _sys_home or project_base
else:
incdir = os.path.join(get_config_var('srcdir'), 'Include')
return os.path.normpath(incdir)
python_dir = 'python' + get_python_version() + build_flags
return os.path.join(prefix, "include", python_dir)
elif os.name == "nt":
return os.path.join(prefix, "include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
libpython = os.path.join(prefix,
"lib", "python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, opt, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'OPT', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = opt + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or project_base, "PC")
else:
inc_dir = _sys_home or project_base
else:
inc_dir = get_python_inc(plat_specific=1)
return os.path.join(inc_dir, 'pyconfig.h')
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(_sys_home or project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
config_file = 'config-{}{}'.format(get_python_version(), build_flags)
if hasattr(sys.implementation, '_multiarch'):
config_file += '-%s' % sys.implementation._multiarch
return os.path.join(lib_dir, config_file, 'Makefile')
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see the sysconfig module
name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
os.environ.get('_CONDA_PYTHON_SYSCONFIGDATA_NAME',
'_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
abi=sys.abiflags,
platform=sys.platform,
multiarch=getattr(sys.implementation, '_multiarch', ''))
)
)
_temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
build_time_vars = _temp.build_time_vars
global _config_vars
_config_vars = {}
_config_vars.update(build_time_vars)
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
# For backward compatibility, see issue19555
SO = _config_vars.get('EXT_SUFFIX')
if SO is not None:
_config_vars['SO'] = SO
# Always convert srcdir to an absolute path
srcdir = _config_vars.get('srcdir', project_base)
if os.name == 'posix':
if python_build:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if python_build and os.name == "posix":
base = project_base
if (not os.path.isabs(_config_vars['srcdir']) and
base != os.getcwd()):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _config_vars['srcdir'])
_config_vars['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
|
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Implementation of TiledGrid3DAction actions
'''
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
import random
from cocos.euclid import *
from .basegrid_actions import *
from cocos.director import director
rr = random.randrange
__all__ = [ 'FadeOutTRTiles', # actions that don't modify the z coordinate
'FadeOutBLTiles',
'FadeOutUpTiles',
'FadeOutDownTiles',
'ShuffleTiles',
'TurnOffTiles',
'SplitRows',
'SplitCols',
'ShakyTiles3D', # actions that modify the z coordinate
'ShatteredTiles3D',
'WavesTiles3D',
'JumpTiles3D',
]
# Don't export this class
class Tile(object):
def __init__(self, position=(0,0), start_position=(0,0), delta=(0,0) ):
super(Tile,self).__init__()
self.position = position
self.start_position = start_position
self.delta = delta
def __repr__(self):
return "(start_pos: %s pos: %s delta:%s)" % (self.start_position, self.position, self.delta)
class ShakyTiles3D( TiledGrid3DAction ):
'''Simulates a shaky floor composed of tiles
Example::
scene.do( ShakyTiles3D( randrange=6, grid=(4,4), duration=10) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShakyTiles3D,self).init(*args,**kw)
self.randrange = randrange
def update( self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
class ShatteredTiles3D( TiledGrid3DAction ):
'''ShatterTiles shatters the tiles according to a random value.
It is similar to shakes (see `ShakyTiles3D`) the tiles just one frame, and then continue with
that state for duration time.
Example::
scene.do( ShatteredTiles3D( randrange=12 ) )
'''
def init( self, randrange=6, *args, **kw ):
'''
:Parameters:
`randrange` : int
Number that will be used in random.randrange( -randrange, randrange) to do the effect
'''
super(ShatteredTiles3D,self).init(*args,**kw)
self.randrange = randrange
self._once = False
def update( self, t ):
if not self._once:
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
x = rr(-self.randrange, self.randrange+1)
y = rr(-self.randrange, self.randrange+1)
z = rr(-self.randrange, self.randrange+1)
coords[k] += x
coords[k+1] += y
coords[k+2] += z
self.set_tile(i,j,coords)
self._once = True
class ShuffleTiles( TiledGrid3DAction ):
'''ShuffleTiles moves the tiles randomly across the screen.
To put them back use: Reverse( ShuffleTiles() ) with the same seed parameter.
Example::
scene.do( ShuffleTiles( grid=(4,4), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
'''
:Parameters:
`seed` : float
Seed for the random in the shuffle.
'''
super(ShuffleTiles,self).init(*args, **kw)
self.seed = seed
def start(self):
super(ShuffleTiles,self).start()
self.tiles = {}
self._once = False
if self.seed != -1:
random.seed( self.seed )
# random positions
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle( self.tiles_order )
for i in range(self.grid.x):
for j in range(self.grid.y):
self.tiles[(i,j)] = Tile( position = Point2(i,j),
start_position = Point2(i,j),
delta= self._get_delta(i,j) )
def place_tile(self, i, j):
t = self.tiles[(i,j)]
coords = self.get_original_tile(i,j)
for k in range(0,len(coords),3):
coords[k] += int( t.position.x * self.target.grid.x_step )
coords[k+1] += int( t.position.y * self.target.grid.y_step )
self.set_tile(i,j,coords)
def update(self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
self.tiles[(i,j)].position = self.tiles[(i,j)].delta * t
self.place_tile(i,j)
# private method
def _get_delta(self, x, y):
idx = x * self.grid.y + y
i,j = divmod( self.tiles_order[idx], self.grid.y )
return Point2(i,j)-Point2(x,y)
class FadeOutTRTiles( TiledGrid3DAction ):
'''Fades out each tile following a diagonal Top-Right path until all the tiles are faded out.
Example::
scene.do( FadeOutTRTiles( grid=(16,12), duration=10) )
'''
def update( self, t ):
# direction right - up
for i in range(self.grid.x):
for j in range(self.grid.y):
distance = self.test_func(i,j,t)
if distance == 0:
self.turn_off_tile(i,j)
elif distance < 1:
self.transform_tile(i,j,distance)
else:
self.turn_on_tile(i,j)
def turn_on_tile(self, x,y):
self.set_tile(x,y, self.get_original_tile(x,y) )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in range( len(coords) ):
# x
if c == 0*3 or c == 3*3:
coords[c] = coords[c] + (self.target.grid.x_step / 2.0) * (1-t)
elif c == 1*3 or c == 2*3:
coords[c] = coords[c] - (self.target.grid.x_step / 2.0) * (1-t)
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
def turn_off_tile( self,x,y):
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
def test_func(self, i,j, t ):
x,y = self.grid * t
if x+y==0:
return 1
return pow( (i+j) / (x+y), 6 )
class FadeOutBLTiles( FadeOutTRTiles):
'''Fades out each tile following an Bottom-Left path until all the tiles are faded out.
Example::
scene.do( FadeOutBLTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j,t):
x,y = self.grid * (1-t)
if i+j==0:
return 1
return pow( (x+y) / (i+j), 6)
class FadeOutUpTiles( FadeOutTRTiles):
'''Fades out each tile following an upwards path until all the tiles are faded out.
Example::
scene.do( FadeOutUpTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * t
if y==0:
return 1
return pow( (j) / y, 6 )
def transform_tile(self, x, y, t ):
coords = self.get_original_tile(x,y)
for c in range( len(coords) ):
# y
if c == 0*3+1 or c == 1*3+1:
coords[c] = coords[c] + (self.target.grid.y_step / 2.0) * (1-t)
elif c == 2*3+1 or c == 3*3+1:
coords[c] = coords[c] - (self.target.grid.y_step / 2.0) * (1-t)
self.set_tile(x,y,coords)
class FadeOutDownTiles( FadeOutUpTiles):
'''Fades out each tile following an downwards path until all the tiles are faded out.
Example::
scene.do( FadeOutDownTiles( grid=(16,12), duration=5) )
'''
def test_func(self, i,j, t):
x,y = self.grid * (1-t)
if j==0:
return 1
return pow( (y) / j, 6 )
class TurnOffTiles( TiledGrid3DAction ):
'''TurnOffTiles turns off each in random order
Example::
scene.do( TurnOffTiles( grid=(16,12), seed=1, duration=10) )
'''
def init(self, seed=-1, *args, **kw):
super(TurnOffTiles,self).init( *args, **kw )
self.seed = seed
def start(self):
super(TurnOffTiles,self).start()
if self.seed != -1:
random.seed( self.seed )
self.nr_of_tiles = self.grid.x * self.grid.y
self.tiles_order = list(range(self.nr_of_tiles))
random.shuffle( self.tiles_order )
def update( self, t ):
l = int( t * self.nr_of_tiles )
for i in range( self.nr_of_tiles):
t = self.tiles_order[i]
if i < l:
self.turn_off_tile(t)
else:
self.turn_on_tile(t)
def get_tile_pos(self, idx):
return divmod(idx, self.grid.y)
def turn_on_tile(self, t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y, self.get_original_tile(x,y) )
def turn_off_tile(self,t):
x,y = self.get_tile_pos(t)
self.set_tile(x,y,[0,0,0,0,0,0,0,0,0,0,0,0] )
class WavesTiles3D( TiledGrid3DAction ):
'''Simulates waves using the math.sin() function in the z-axis of each tile
Example::
scene.do( WavesTiles3D( waves=5, amplitude=120, grid=(16,16), duration=10) )
'''
def init( self, waves=4, amplitude=120, *args, **kw ):
'''
:Parameters:
`waves` : int
Number of waves (2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(WavesTiles3D, self).init( *args, **kw )
#: Total number of waves to perform
self.waves=waves
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
x = coords[0]
y = coords[1]
z = (math.sin(t*math.pi*self.waves*2 + (y+x) * .01) * self.amplitude * self.amplitude_rate )
for k in range( 0,len(coords),3 ):
coords[k+2] += z
self.set_tile( i,j, coords )
class JumpTiles3D( TiledGrid3DAction ):
'''Odd tiles will perform a jump in the z-axis using the sine function,
while the even tiles will perform a jump using sine+pi function
Example::
scene.do( JumpTiles3D( jumps=5, amplitude=40, grid=(16,16), duration=10) )
'''
def init( self, jumps=4, amplitude=20, *args, **kw ):
'''
:Parameters:
`jumps` : int
Number of jumps(2 * pi) that the action will perform. Default is 4
`amplitude` : int
Wave amplitude (height). Default is 20
'''
super(JumpTiles3D, self).init( *args, **kw )
#: Total number of jumps to perform
self.jumps=jumps
#: amplitude rate. Default: 1.0
#: This value is modified by other actions like `AccelAmplitude`.
self.amplitude_rate = 1.0
self.amplitude=amplitude
def update( self, t ):
sinz = (math.sin(t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
sinz2= (math.sin(math.pi+t*math.pi*self.jumps*2 + (0) * .01) * self.amplitude * self.amplitude_rate )
for i in range(0, self.grid.x):
for j in range(0, self.grid.y):
coords = self.get_original_tile(i,j)
for k in range( 0,len(coords),3 ):
if (i+j) % 2 == 0:
coords[k+2] += sinz
else:
coords[k+2] += sinz2
self.set_tile( i,j, coords )
class SplitRows( TiledGrid3DAction ):
'''Split the screen in a number of rows, and move
these rows away from the screen.
The odds rows are moved to the left, while the even rows are moved to
the right.
Example::
scene.do( SplitRows( rows=3, duration=2) )
'''
def init( self, rows=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`rows` : int
Number of rows that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (1,rows)
self.rows = rows
super(SplitRows, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for j in range(0, self.grid.y):
coords = self.get_original_tile(0,j)
for c in range(0, len(coords), 3):
direction = 1
if j % 2 == 0:
direction = -1
coords[c] += direction * x * t
self.set_tile( 0,j, coords )
class SplitCols( TiledGrid3DAction ):
'''Split the screen in a number of columns, and move
these columns away from the screen.
The odds columns are moved to the upwards, while the even
columns are moved to the downwards.
Example::
scene.do( SplitCols( cols=3, duration=2) )
'''
def init( self, cols=9, grid=(-1,-1), *args, **kw ):
'''
:Parameters:
`cols` : int
Number of columns that will have the effect. Default: 9
'''
if grid != (-1,-1):
raise Exception("This action doesn't receives the grid argument")
grid = (cols,1)
self.cols = cols
super(SplitCols, self).init( grid, *args, **kw )
def update( self, t ):
x,y = director.get_window_size()
for i in range(0, self.grid.x):
coords = self.get_original_tile(i,0)
for c in range(0, len(coords), 3):
direction = 1
if i % 2 == 0:
direction = -1
coords[c+1] += direction * y * t
self.set_tile( i,0, coords )
|
|
import pathlib
import pytest
from sectionproperties.pre.geometry import *
from sectionproperties.pre.library.primitive_sections import *
from sectionproperties.pre.library.steel_sections import *
from sectionproperties.pre.library.nastran_sections import *
from sectionproperties.analysis.section import Section
from sectionproperties.pre.pre import DEFAULT_MATERIAL, Material
from sectionproperties.pre.rhino import load_3dm, load_brep_encoding
from shapely.geometry import (
Polygon,
MultiPolygon,
LineString,
Point,
GeometryCollection,
box,
)
from shapely import wkt
import json
big_sq = rectangular_section(d=300, b=250)
small_sq = rectangular_section(d=100, b=75)
small_hole = rectangular_section(d=40, b=30).align_center(small_sq)
i_sec = i_section(d=200, b=100, t_f=20, t_w=10, r=12, n_r=12)
small_sq_w_hole = small_sq - small_hole
composite = (
big_sq
+ small_sq_w_hole.align_to(big_sq, on="top", inner=True).align_to(big_sq, on="top")
+ i_sec.align_to(big_sq, on="bottom", inner=True).align_to(big_sq, on="right")
)
composite.create_mesh([200])
comp_sec = Section(composite)
comp_sec.calculate_geometric_properties()
comp_sec.calculate_plastic_properties()
# Subtractive modelling
nested_geom = (small_sq - small_hole) + small_hole
nested_geom.create_mesh([50])
nested_sec = Section(nested_geom)
# Overlapped modelling
overlay_geom = small_sq + small_hole
overlay_geom.create_mesh([50])
overlay_sec = Section(overlay_geom)
steel = Material("steel", 200e3, 0.3, 7.85e-6, 400, "grey")
def test_material_persistence():
# Test ensures that the material attribute gets transformed
# through all of the Geometry transformation methods, each which
# returns a new Geometry object.
# The material assignment should persist through all of the
# transformations
big_sq.material = steel
new_geom = (
big_sq.align_to(small_sq, on="left", inner=False)
.align_center()
.rotate_section(23)
.mirror_section(axis="y")
.offset_perimeter(amount=1)
)
new_geom.material == steel
def test_for_incidental_holes():
# One hole in the geometry was explicitly created through subtraction
# Another hole in the geometry was created accidentally by sticking
# a I Section up against a rectangle.
# There should be two holes created after .compile_geometry()
assert len(composite.holes) == 2
assert len(nested_geom.holes) == 0
def test__sub__():
small_hole.material = steel
top_left = (
small_hole.align_to(big_sq, on="left")
.align_to(big_sq, on="top")
.shift_section(20, -20)
)
top_right = top_left.shift_section(x_offset=200)
compound = big_sq - top_left
compound = compound + top_left
compound = compound - top_right
compound = compound + top_right
assert len(compound.control_points) == 3
# Incomplete test to validate that the iterative __sub__ produces
# three distinct regions with proper material assignments
def test_geometry_from_points():
# Geometry.from_points() tests a shape with exactly one exterior
# and an arbitrary number of interiors being built from the legacy
# points, facets, holes, control_points interface of sectionproperties
exterior = [[-6, 10], [6, 10], [6, -10], [-6, -10]]
interior1 = [[-4, 8], [4, 8], [4, 4], [-4, 4]]
interior2 = [[-4, -8], [4, -8], [4, -4], [-4, -4]]
points = exterior + interior1 + interior2
facets = [
[0, 1],
[1, 2],
[2, 3],
[3, 0],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[8, 9],
[9, 10],
[10, 11],
[11, 7],
]
control_points = [[0, 0]]
holes = [[0, 6], [0, -6]]
new_geom = Geometry.from_points(
points=points, facets=facets, control_points=control_points, holes=holes
)
wkt_test_geom = shapely.wkt.loads(
"POLYGON ((6 10, 6 -10, -6 -10, -6 10, 6 10), (-4 4, 4 4, 4 8, -4 8, -4 4), (4 -8, 4 -4, -4 -4, -4 -8, 4 -8))"
)
assert (new_geom.geom - wkt_test_geom) == Polygon()
def test_compound_geometry_from_points():
# CompoundGeometry.from_points() tests a shape with an arbitrary
# number of exteriors and an arbitrary number of interiors being
# built from the legacy
# points, facets, holes, control_points interface of sectionproperties
a = 1
b = 2
t = 0.1
# build the lists of points, facets, holes and control points
points = [
[-t / 2, -2 * a],
[t / 2, -2 * a],
[t / 2, -t / 2],
[a, -t / 2],
[a, t / 2],
[-t / 2, t / 2],
[-b / 2, -2 * a],
[b / 2, -2 * a],
[b / 2, -2 * a - t],
[-b / 2, -2 * a - t],
]
facets = [
[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 0],
[6, 7],
[7, 8],
[8, 9],
[9, 6],
]
control_points = [[0, 0], [0, -2 * a - t / 2]]
new_geom = CompoundGeometry.from_points(points, facets, control_points)
wkt_test_geom = shapely.wkt.loads(
"MULTIPOLYGON (((-0.05 -2, 0.05 -2, 0.05 -0.05, 1 -0.05, 1 0.05, -0.05 0.05, -0.05 -2)), ((-1 -2, 1 -2, 1 -2.1, -1 -2.1, -1 -2)))"
)
assert (new_geom.geom - wkt_test_geom) == Polygon()
def test_multinested_compound_geometry_from_points():
"""
Testing a multi-nested section. This section contains three nested materials in concentric
square rings with a hole going through the center of the whole section. This test confirms
that the section can be successfully built using .from_points, that the control_points
and hole nodes persist in the right locations, and that the plastic section calculation
raises a warning because the nested regions overlap.
"""
points = [
[-50.0, 50.0],
[50.0, 50.0],
[50.0, -50.0],
[-50.0, -50.0],
[37.5, -37.5],
[37.5, 37.5],
[-37.5, 37.5],
[-37.5, -37.5],
[25.0, -25.0],
[25.0, 25.0],
[-25.0, 25.0],
[-25.0, -25.0],
[12.5, -12.5],
[12.5, 12.5],
[-12.5, 12.5],
[-12.5, -12.5],
]
facets = [
[0, 1],
[1, 2],
[2, 3],
[3, 0],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[8, 9],
[9, 10],
[10, 11],
[11, 8],
[12, 13],
[13, 14],
[14, 15],
[15, 12],
]
control_points = [[-43.75, 0.0], [-31.25, 0.0], [-18.75, 0.0]]
holes = [[0, 0]]
nested_compound = CompoundGeometry.from_points(
points=points, facets=facets, control_points=control_points, holes=holes
)
wkt_test_geom = shapely.wkt.loads(
"MULTIPOLYGON (((50 50, 50 -50, -50 -50, -50 50, 50 50), (12.5 12.5, -12.5 12.5, -12.5 -12.5, 12.5 -12.5, 12.5 12.5)), ((-37.5 -37.5, -37.5 37.5, 37.5 37.5, 37.5 -37.5, -37.5 -37.5), (12.5 12.5, -12.5 12.5, -12.5 -12.5, 12.5 -12.5, 12.5 12.5)), ((-25 -25, -25 25, 25 25, 25 -25, -25 -25), (12.5 12.5, -12.5 12.5, -12.5 -12.5, 12.5 -12.5, 12.5 12.5)))"
)
assert (nested_compound.geom - wkt_test_geom) == Polygon()
assert nested_compound.control_points == [
(-43.75, 0.0),
(-31.25, 0.0),
(-18.75, 0.0),
]
assert nested_compound.holes == [(0, 0), (0, 0), (0, 0)]
# Section contains overlapping geometries which will result in potentially incorrect
# plastic properties calculation (depends on user intent and geometry).
# Test to ensure a warning is raised about this to notify the user.
nested_compound.create_mesh([25, 30, 35])
nested_compound_sec = Section(nested_compound)
nested_compound_sec.calculate_geometric_properties()
with pytest.warns(UserWarning):
nested_compound_sec.calculate_plastic_properties()
def test_geometry_from_dxf():
section_holes_dxf = (
pathlib.Path.cwd() / "sectionproperties" / "tests" / "section_holes.dxf"
)
assert (
Geometry.from_dxf(section_holes_dxf).geom.wkt
== "POLYGON ((-0.338658834889 -0.395177702895, -0.338658834889 29.092318216393, 31.962257588776 29.092318216393, 31.962257588776 -0.395177702895, -0.338658834889 -0.395177702895), (16.684315862478 2.382629883704, 29.683030851053 2.382629883704, 29.683030851053 24.355800152063, 16.684315862478 24.355800152063, 16.684315862478 2.382629883704), (1.548825807288 3.344178663681, 14.547540795863 3.344178663681, 14.547540795863 27.382898163101, 1.548825807288 27.382898163101, 1.548825807288 3.344178663681))"
)
def test_plastic_centroid():
## Test created in response to #114
# Since the section being tested is a compound geometry with two different
# materials, this tests that the plastic centroid takes into account the
# correct "center" of the original section which is affected by EA of each
# of the constituent geometries.
steel = Material(
name="Steel",
elastic_modulus=200e3,
poissons_ratio=0.3,
density=7.85e-6,
yield_strength=500,
color="grey",
)
timber = Material(
name="Timber",
elastic_modulus=5e3,
poissons_ratio=0.35,
density=6.5e-7,
yield_strength=20,
color="burlywood",
)
# create 310UB40.4
ub = i_section(d=304, b=165, t_f=10.2, t_w=6.1, r=11.4, n_r=8, material=steel)
# create timber panel on top of the UB
panel = rectangular_section(d=50, b=600, material=timber)
panel = panel.align_center(ub).align_to(ub, on="top")
# merge the two sections into one geometry object
geometry = CompoundGeometry([ub, panel])
# create a mesh - use a mesh size of 5 for the UB, 20 for the panel
geometry.create_mesh(mesh_sizes=[100, 100])
# create a Section object
section = Section(geometry)
# perform a geometric, warping and plastic analysis
section.calculate_geometric_properties()
section.calculate_plastic_properties()
# Checking sections that were defined above
#
nested_sec.calculate_geometric_properties()
nested_sec.calculate_plastic_properties()
overlay_sec.calculate_geometric_properties()
with pytest.warns(UserWarning):
overlay_sec.calculate_plastic_properties()
# section
x_pc, y_pc = section.get_pc()
assert x_pc == pytest.approx(82.5)
assert y_pc == pytest.approx(250.360654576)
# nested_sec
x_pc, y_pc = nested_sec.get_pc()
assert x_pc == pytest.approx(37.5)
assert y_pc == pytest.approx(50)
def test_geometry_from_3dm_file_simple():
section = pathlib.Path.cwd() / "sectionproperties" / "tests" / "3in x 2in.3dm"
exp = Polygon([(0, 0), (0, 3), (2, 3), (2, 0), (0, 0)])
test = Geometry.from_3dm(section)
assert (test.geom - exp).is_empty
def test_geometry_from_3dm_file_complex():
section_3dm = (
pathlib.Path.cwd() / "sectionproperties" / "tests" / "complex_shape.3dm"
)
section_wkt = (
pathlib.Path.cwd() / "sectionproperties" / "tests" / "complex_shape.txt"
)
with open(section_wkt) as file:
wkt_str = file.readlines()
exp = wkt.loads(wkt_str[0])
test = Geometry.from_3dm(section_3dm)
assert (test.geom - exp).is_empty
def test_geometry_from_3dm_file_compound():
section_3dm = (
pathlib.Path.cwd() / "sectionproperties" / "tests" / "compound_shape.3dm"
)
section_wkt = (
pathlib.Path.cwd() / "sectionproperties" / "tests" / "compound_shape.txt"
)
with open(section_wkt) as file:
wkt_str = file.readlines()
exp = [wkt.loads(wkt_str[0]), wkt.loads(wkt_str[1])]
test = CompoundGeometry.from_3dm(section_3dm)
assert (MultiPolygon([ii.geom for ii in test.geoms]) - MultiPolygon(exp)).is_empty
def test_geometry_from_3dm_encode():
section_3dm = pathlib.Path.cwd() / "sectionproperties" / "tests" / "rhino_data.json"
with open(section_3dm) as file:
brep_encoded = json.load(file)
exp = Polygon([(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)])
test = Geometry.from_rhino_encoding(brep_encoded)
assert (test.geom - exp).is_empty
def test_shift_points():
assymetrical_chan = nastran_chan(75, 200, 8, 16).shift_points(1, dy=-10)
assert (
assymetrical_chan.geom.wkt
== "POLYGON ((0 0, 75 -10, 75 16, 8 16, 8 184, 75 184, 75 200, 0 200, 0 0))"
)
def test_mirror_section():
assymetrical_chan = nastran_chan(75, 200, 8, 16).shift_points(1, dy=-10)
assert (
assymetrical_chan.mirror_section(axis="x").geom.wkt
== "POLYGON ((0 190, 75 200, 75 174, 8 174, 8 6, 75 6, 75 -10, 0 -10, 0 190))"
)
assert (
assymetrical_chan.mirror_section(axis="y").geom.wkt
== "POLYGON ((75 0, 0 -10, 0 16, 67 16, 67 184, 0 184, 0 200, 75 200, 75 0))"
)
assert (
assymetrical_chan.mirror_section(axis="y", mirror_point=[50, 50]).geom.wkt
== "POLYGON ((100 0, 25 -10, 25 16, 92 16, 92 184, 25 184, 25 200, 100 200, 100 0))"
)
assert (
assymetrical_chan.mirror_section(axis="x", mirror_point=[50, 50]).geom.wkt
== "POLYGON ((0 100, 75 110, 75 84, 8 84, 8 -84, 75 -84, 75 -100, 0 -100, 0 100))"
)
def test_filter_non_polygons():
point1 = Point([0, 0])
point2 = Point([1, 1])
point3 = Point([1, 0])
line = LineString([point1, point2])
poly = Polygon([point1, point2, point3])
multi_poly = MultiPolygon([poly, poly])
collection = GeometryCollection([poly, point1, line])
out = filter_non_polygons(collection)
assert filter_non_polygons(poly) == poly
assert filter_non_polygons(multi_poly) == multi_poly
assert filter_non_polygons(point1) == Polygon()
assert filter_non_polygons(line) == Polygon()
assert filter_non_polygons(collection) == poly
def test_round_polygon_vertices():
big_box = box(0, 0, 200, 200)
bottom_box = box(10.00001, 10.000001, 50.100, 50.2)
upper_box = box(120.000011, 120.000032, 169.999987, 170.0001)
test_shape = big_box - bottom_box - upper_box
assert (
test_shape.wkt
!= "POLYGON ((0 200, 200 200, 200 0, 0 0, 0 200), (10 50, 10 10, 50 10, 50 50, 10 50), (170 170, 120 170, 120 120, 170 120, 170 170))"
)
test_shape_rounded = round_polygon_vertices(test_shape, 0)
assert (
test_shape_rounded.wkt
== "POLYGON ((0 200, 200 200, 200 0, 0 0, 0 200), (10 50, 10 10, 50 10, 50 50, 10 50), (170 170, 120 170, 120 120, 170 120, 170 170))"
)
def test_check_geometry_overlaps():
big_sq = rectangular_section(d=300, b=250)
small_sq = rectangular_section(d=100, b=75)
small_hole = rectangular_section(d=40, b=30).align_center(small_sq)
assert check_geometry_overlaps([small_sq.geom, small_hole.geom]) == True
assert check_geometry_overlaps([small_sq.geom, small_sq.geom]) == True
assert (
check_geometry_overlaps(
[big_sq.geom, small_sq.shift_section(x_offset=270).geom]
)
== False
)
assert (
check_geometry_overlaps(
[big_sq.geom, small_sq.shift_section(x_offset=200, y_offset=150).geom]
)
== True
)
|
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo_policy import policy as oslo_policy
from oslo_utils import timeutils
from six.moves import range
import webob
from nova.api.openstack.compute import simple_tenant_usage as \
simple_tenant_usage_v21
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests import uuidsentinel as uuids
SERVERS = 5
TENANTS = 2
HOURS = 24
ROOT_GB = 10
EPHEMERAL_GB = 20
MEMORY_MB = 1024
VCPUS = 2
NOW = timeutils.utcnow()
START = NOW - datetime.timedelta(hours=HOURS)
STOP = NOW
FAKE_INST_TYPE = {'id': 1,
'vcpus': VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'memory_mb': MEMORY_MB,
'name': 'fakeflavor',
'flavorid': 'foo',
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'swap': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'disabled': False,
'is_public': True,
'extra_specs': {'foo': 'bar'}}
def _fake_instance(start, end, instance_id, tenant_id,
vm_state=vm_states.ACTIVE):
flavor = objects.Flavor(**FAKE_INST_TYPE)
return objects.Instance(
deleted=False,
id=instance_id,
uuid=getattr(uuids, 'instance_%d' % instance_id),
image_ref='1',
project_id=tenant_id,
user_id='fakeuser',
display_name='name',
instance_type_id=FAKE_INST_TYPE['id'],
launched_at=start,
terminated_at=end,
vm_state=vm_state,
memory_mb=MEMORY_MB,
vcpus=VCPUS,
root_gb=ROOT_GB,
ephemeral_gb=EPHEMERAL_GB,
flavor=flavor)
@classmethod
def fake_get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None, use_slave=False):
return objects.InstanceList(objects=[
_fake_instance(START, STOP, x,
project_id or 'faketenant_%s' % (x / SERVERS))
for x in range(TENANTS * SERVERS)])
@mock.patch('nova.objects.InstanceList.get_active_by_window_joined',
fake_get_active_by_window_joined)
class SimpleTenantUsageTestV21(test.TestCase):
policy_rule_prefix = "os_compute_api:os-simple-tenant-usage"
controller = simple_tenant_usage_v21.SimpleTenantUsageController()
def setUp(self):
super(SimpleTenantUsageTestV21, self).setUp()
self.admin_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=True)
self.user_context = context.RequestContext('fakeadmin_0',
'faketenant_0',
is_admin=False)
self.alt_user_context = context.RequestContext('fakeadmin_0',
'faketenant_1',
is_admin=False)
def _test_verify_index(self, start, stop):
req = fakes.HTTPRequest.blank('?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.environ['nova.context'] = self.admin_context
res_dict = self.controller.index(req)
usages = res_dict['tenant_usages']
for i in range(TENANTS):
self.assertEqual(SERVERS * HOURS, int(usages[i]['total_hours']))
self.assertEqual(SERVERS * (ROOT_GB + EPHEMERAL_GB) * HOURS,
int(usages[i]['total_local_gb_usage']))
self.assertEqual(SERVERS * MEMORY_MB * HOURS,
int(usages[i]['total_memory_mb_usage']))
self.assertEqual(SERVERS * VCPUS * HOURS,
int(usages[i]['total_vcpus_usage']))
self.assertFalse(usages[i].get('server_usages'))
def test_verify_index(self):
self._test_verify_index(START, STOP)
def test_verify_index_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_index(START, future)
def test_verify_show(self):
self._test_verify_show(START, STOP)
def test_verify_show_future_end_time(self):
future = NOW + datetime.timedelta(hours=HOURS)
self._test_verify_show(START, future)
def _get_tenant_usages(self, detailed=''):
req = fakes.HTTPRequest.blank('?detailed=%s&start=%s&end=%s' %
(detailed, START.isoformat(), STOP.isoformat()))
req.environ['nova.context'] = self.admin_context
# Make sure that get_active_by_window_joined is only called with
# expected_attrs=['flavor'].
orig_get_active_by_window_joined = (
objects.InstanceList.get_active_by_window_joined)
def fake_get_active_by_window_joined(context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
self.assertEqual(['flavor'], expected_attrs)
return orig_get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs, use_slave)
with mock.patch.object(objects.InstanceList,
'get_active_by_window_joined',
side_effect=fake_get_active_by_window_joined):
res_dict = self.controller.index(req)
return res_dict['tenant_usages']
def test_verify_detailed_index(self):
usages = self._get_tenant_usages('1')
for i in range(TENANTS):
servers = usages[i]['server_usages']
for j in range(SERVERS):
self.assertEqual(HOURS, int(servers[j]['hours']))
def test_verify_simple_index(self):
usages = self._get_tenant_usages(detailed='0')
for i in range(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def test_verify_simple_index_empty_param(self):
# NOTE(lzyeval): 'detailed=&start=..&end=..'
usages = self._get_tenant_usages()
for i in range(TENANTS):
self.assertIsNone(usages[i].get('server_usages'))
def _test_verify_show(self, start, stop):
tenant_id = 1
req = fakes.HTTPRequest.blank('?start=%s&end=%s' %
(start.isoformat(), stop.isoformat()))
req.environ['nova.context'] = self.user_context
res_dict = self.controller.show(req, tenant_id)
usage = res_dict['tenant_usage']
servers = usage['server_usages']
self.assertEqual(TENANTS * SERVERS, len(usage['server_usages']))
server_uuids = [getattr(uuids, 'instance_%d' % x)
for x in range(SERVERS)]
for j in range(SERVERS):
delta = STOP - START
# NOTE(javeme): cast seconds from float to int for clarity
uptime = int(delta.total_seconds())
self.assertEqual(uptime, int(servers[j]['uptime']))
self.assertEqual(HOURS, int(servers[j]['hours']))
self.assertIn(servers[j]['instance_id'], server_uuids)
def test_verify_show_cannot_view_other_tenant(self):
req = fakes.HTTPRequest.blank('?start=%s&end=%s' %
(START.isoformat(), STOP.isoformat()))
req.environ['nova.context'] = self.alt_user_context
rules = {
self.policy_rule_prefix + ":show": [
["role:admin"], ["project_id:%(project_id)s"]]
}
policy.set_rules(oslo_policy.Rules.from_dict(rules))
try:
self.assertRaises(exception.PolicyNotAuthorized,
self.controller.show, req, 'faketenant_0')
finally:
policy.reset()
def test_get_tenants_usage_with_bad_start_date(self):
future = NOW + datetime.timedelta(hours=HOURS)
req = fakes.HTTPRequest.blank('?start=%s&end=%s' %
(future.isoformat(), NOW.isoformat()))
req.environ['nova.context'] = self.user_context
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.show, req, 'faketenant_0')
def test_get_tenants_usage_with_invalid_start_date(self):
req = fakes.HTTPRequest.blank('?start=%s&end=%s' %
("xxxx", NOW.isoformat()))
req.environ['nova.context'] = self.user_context
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.show, req, 'faketenant_0')
def _test_get_tenants_usage_with_one_date(self, date_url_param):
req = fakes.HTTPRequest.blank('?%s' % date_url_param)
req.environ['nova.context'] = self.user_context
res = self.controller.show(req, 'faketenant_0')
self.assertIn('tenant_usage', res)
def test_get_tenants_usage_with_no_start_date(self):
self._test_get_tenants_usage_with_one_date(
'end=%s' % (NOW + datetime.timedelta(5)).isoformat())
def test_get_tenants_usage_with_no_end_date(self):
self._test_get_tenants_usage_with_one_date(
'start=%s' % (NOW - datetime.timedelta(5)).isoformat())
class SimpleTenantUsageControllerTestV21(test.TestCase):
controller = simple_tenant_usage_v21.SimpleTenantUsageController()
def setUp(self):
super(SimpleTenantUsageControllerTestV21, self).setUp()
self.context = context.RequestContext('fakeuser', 'fake-project')
self.inst_obj = _fake_instance(START, STOP, instance_id=1,
tenant_id=self.context.project_id,
vm_state=vm_states.DELETED)
@mock.patch('nova.objects.Instance.get_flavor',
side_effect=exception.NotFound())
def test_get_flavor_from_non_deleted_with_id_fails(self, fake_get_flavor):
# If an instance is not deleted and missing type information from
# instance.flavor, then that's a bug
self.assertRaises(exception.NotFound,
self.controller._get_flavor, self.context,
self.inst_obj, {})
@mock.patch('nova.objects.Instance.get_flavor',
side_effect=exception.NotFound())
def test_get_flavor_from_deleted_with_notfound(self, fake_get_flavor):
# If the flavor is not found from the instance and the instance is
# deleted, attempt to look it up from the DB and if found we're OK.
self.inst_obj.deleted = 1
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertEqual(objects.Flavor, type(flavor))
self.assertEqual(FAKE_INST_TYPE['id'], flavor.id)
@mock.patch('nova.objects.Instance.get_flavor',
side_effect=exception.NotFound())
def test_get_flavor_from_deleted_with_id_of_deleted(self, fake_get_flavor):
# Verify the legacy behavior of instance_type_id pointing to a
# missing type being non-fatal
self.inst_obj.deleted = 1
self.inst_obj.instance_type_id = 99
flavor = self.controller._get_flavor(self.context, self.inst_obj, {})
self.assertIsNone(flavor)
class SimpleTenantUsageUtilsV21(test.NoDBTestCase):
simple_tenant_usage = simple_tenant_usage_v21
def test_valid_string(self):
dt = self.simple_tenant_usage.parse_strtime(
"2014-02-21T13:47:20.824060", "%Y-%m-%dT%H:%M:%S.%f")
self.assertEqual(datetime.datetime(
microsecond=824060, second=20, minute=47, hour=13,
day=21, month=2, year=2014), dt)
def test_invalid_string(self):
self.assertRaises(exception.InvalidStrTime,
self.simple_tenant_usage.parse_strtime,
"2014-02-21 13:47:20.824060",
"%Y-%m-%dT%H:%M:%S.%f")
|
|
#!/usr/local/bin/python
from sprint import sprint as print
import datetime
import functools
import getopt
import glob
import http.client
from io import open
import os
import pprint
import pymysql
import random
import sys
import time
import traceback
import config
import crawls
import logger
import pifile
import useful
config.GURU_ID = ''.join(random.choice('0123456789ABCDEFGHJKLMNPRSTUVWXYZ') for i in range(10))
tb_fmt = """headline = '''{}'''
guru_id = '{}'
uri = '''{}'''
tb = '''
{}
'''
env = {}
"""
# --- Web Pages ---------------------------------------------------------
def get_page_info(page_id, form_key='', defval='', args='', dbedit=None):
return pifile.PageInfoFile(page_id, form_key, defval, args=args, dbedit=dbedit)
def write_traceback_file(pif, e):
str_tb = traceback.format_exc()
if pif and pif.unittest:
return str_tb # unit testing should not leave tb files sitting around.
tb_file_name = os.path.join(config.LOG_ROOT, datetime.datetime.now().strftime('%Y%m%d.%H%M%S.') + config.ENV + '.')
if pif:
tb_file_name += pif.page_id
else:
tb_file_name += 'unknown'
erf = open(tb_file_name, 'w')
erf.write(tb_fmt.format(' '.join([x.strip() for x in traceback.format_exception_only(type(e), e)]),
config.GURU_ID, os.environ.get('REQUEST_URI', ''), str_tb,
pprint.pformat(os.environ, indent=2, width=132)))
if pif:
erf.write(pif.error_report())
erf.close()
return str_tb
def simple_html(status=404):
if not useful.is_header_done():
print('Content-Type: text/html\n\n')
print('Status:', status, http.client.responses.get(status, ''))
# print('<!--\n' + str(os.environ) + '-->')
useful.header_done()
useful.write_comment()
def handle_exception(pif, e, header_done=False, write_traceback=True, status_code='unset'):
log = pif.log if pif and pif.log else logger.Logger()
log.exc.error('{} {}'.format(
os.environ.get('REMOTE_ADDR', '127.0.0.1'),
os.environ.get('REQUEST_URI', 'unknown')))
str_tb = write_traceback_file(pif, e) if write_traceback else ''
log_page_call(pif, status_code=status_code)
if not pif or not pif.render or not pif.dbh:
if not header_done:
simple_html()
if str_tb:
print('<!--\n' + str_tb + '-->')
final_exit()
pif.dbh.set_health(pif.page_id)
if not useful.is_header_done() and not header_done:
simple_html()
useful.header_done()
useful.write_comment()
while pif.render.table_count > 0:
print(pif.render.format_table_end())
if not pif.is_allowed('a'):
print('<!--\n' + str_tb + '-->')
final_exit()
def final_exit():
print("<p><h3>An error has occurred that prevents this page from displaying. Our apologies.<br>")
print("An alert has been sent and the problem will be fixed as soon as possible.</h3>")
# print("<p>We're doing some upgrades, and right now, not everything is playing nicely together.<br>")
# print("We'll get things going as soon as possible.")
print('<p><p>Guru Meditation ID: {}'.format(config.GURU_ID))
sys.exit()
def log_page_call(pif, status_code='unset'):
if pif and (pif.argv or pif.is_allowed('m')):
return # it's me! it's ME!
status_code = pif.render.status_printed if pif and pif.render else status_code
log = pif.log if pif and pif.log else logger.Logger()
if os.getenv('HTTP_USER_AGENT', '') in crawls.crawlers:
log.bot.info('{} {} {}'.format(os.environ.get('REMOTE_ADDR', '127.0.0.1'),
os.environ.get('REQUEST_URI', 'unknown'), status_code))
return
if pif:
pif.dbh.increment_counter(pif.page_id)
log.count.info(pif.page_id)
if pif.is_external_referrer():
log.refer.info(os.environ['HTTP_REFERER'])
log.url.info('{} {} {}'.format(os.environ.get('REMOTE_ADDR', '127.0.0.1'),
os.environ.get('REQUEST_URI', 'unknown'), status_code))
if os.getenv('HTTP_USER_AGENT'):
log.debug.info(os.getenv('HTTP_USER_AGENT'))
# --- Command Lines -----------------------------------------------------
'''
get_command_line front-ends getopt, to make it do stuff I want it to do.
Uses unix-style options, not Gnu-style.
switches - binary switches, like -v for verbose
options - switches that need an argument, like -f <file>
switches or options can be prefixed with '+' to make them required
long_options - dictionary of long options
keys are long option name (followed by '=' if it needs an argument)
values are the short option/switch to map to (if any)
keys can be prefixed with '+' to make them required
version - application version string, for display purposes
short_help - one-line help
long_help - multiline help
envar - an environment variable (if any) that can specify arguments
noerror - don't fail on getopt errors, just ignore them
defaults - dictionary of default values
for switches use False/True, for options use the argument (NOT in a list)
doglob - run glob.glob over 'files'
Please use named arguments for everything after the first three.
All arguments are optional.
-DD, developed over several years
'''
def get_req(sw, reqs=[]):
if isinstance(sw, dict):
# osw = []
for opt in sw:
if opt[0] == '+':
if sw[opt]:
reqs.append(sw[opt])
else:
if opt[-1] == '=':
reqs.append(opt[1:-1])
else:
reqs.append(opt[1:])
sw[opt[1:]] = sw[opt]
else:
while '+' in sw:
reqs.append(sw[sw.find('+') + 1])
sw = sw.replace('+', '', 1)
return sw, reqs
def get_command_line(switches="", options="", long_options={}, version="", short_help="",
long_help="", envar=None, noerror=False, defaults={}, doglob=False):
switches, reqs = get_req(switches)
options, reqs = get_req(options, reqs)
loptions, reqs = get_req(long_options, reqs)
switch = dict()
opts = list()
files = list()
coptions = switches
if options:
coptions += ':'.join(list(options)) + ':'
if 'h' not in coptions:
coptions += 'h'
if envar and envar in os.environ:
try: # get command line
opts, files = getopt.getopt(os.environ[envar].split(), coptions, loptions)
except getopt.GetoptError:
if not noerror:
print("*** Environment error")
print(sys.argv[0], short_help, file=sys.stderr)
sys.exit(1)
try: # get command line
opts2, files2 = getopt.getopt(sys.argv[1:], coptions, loptions)
except getopt.GetoptError:
if not noerror:
print("*** Options error")
print(sys.argv[0], short_help, file=sys.stderr)
sys.exit(2)
opts = opts + opts2
files = files + files2
for opt in switches:
switch[opt] = None
for opt in options:
switch[opt] = list()
for opt in long_options:
if not long_options[opt]:
if opt[-1] == '=':
switch[opt[:-1]] = list()
else:
switch[opt] = None
for opt in opts:
if opt[0] == "-h" and 'h' not in switches + options:
print(version, long_help, file=sys.stderr)
sys.exit(3)
elif opt[0][0:2] == '--':
if opt[0][2:] in long_options:
if long_options[opt[0][2:]]:
switch[long_options[opt[0][2:]]] = not switch.get(long_options[opt[0][2:]], False)
else:
switch[opt[0][2:]] = not switch.get(opt[0][2:], False)
elif opt[0][2:] + '=' in long_options:
if long_options[opt[0][2:] + '=']:
sw = switch.get(long_options[opt[0][2:] + '='], list())
switch[long_options[opt[0][2:] + '=']] = sw + [opt[1]]
else:
sw = switch.get(opt[0][2:], list())
switch[opt[0][2:]] = sw + [opt[1]]
elif opt[0][1] in options:
sw = switch.get(opt[0][1], list())
switch[opt[0][1]] = sw + [opt[1]]
else:
switch[opt[0][1]] = not switch.get(opt[0][1], False)
for req in reqs:
if not switch[req]:
print("*** Missing command line argument")
print(sys.argv[0], short_help, file=sys.stderr)
sys.exit(4)
for key in switch:
if switch[key] is None:
switch[key] = defaults.get(key, False)
elif switch[key] == [] and key in defaults:
switch[key] = [defaults[key]]
if doglob:
files = functools.reduce(lambda x, y: x + y, [glob.glob(x) for x in files], [])
return (switch, files)
# --- -------------------------------------------------------------------
# Decorator that wraps web page mains.
def web_page(main_fn):
@functools.wraps(main_fn)
def call_main(page_id, form_key='', defval='', args='', dbedit=None):
useful.write_comment('PID', os.getpid(), 'GURU', config.GURU_ID)
status_code = 'unset'
pif = None
try:
pif = (page_id if isinstance(page_id, pifile.PageInfoFile) else
get_page_info(page_id, form_key, defval, args, dbedit))
except SystemExit:
pass
except pymysql.OperationalError as e:
status_code = 'db'
simple_html()
print('The database is currently down, and thus, this page is unable to be shown.<p>')
write_traceback_file(pif, e)
handle_exception(pif, e, True, status_code=status_code)
return
except Exception as e:
status_code = 'exc'
simple_html()
handle_exception(pif, e, status_code=status_code)
return
pif.start()
try:
if ('/etc/passwd' in os.environ.get('QUERY_STRING', '') or
'%2fetc%2fpasswd' in os.environ.get('QUERY_STRING', '').lower()):
raise useful.Redirect('https://www.nsa.gov/')
ret = main_fn(pif)
if not useful.is_header_done():
pif.render.print_html()
if pif.render.is_html:
useful.write_comment("Page:", pif.page_id, 'Time:', time.time() - pif.start_seconds)
if ret and not pif.unittest:
print(ret)
except SystemExit:
pass # the happiest exception on earth
status_code = 'exit'
except useful.SimpleError as e:
if not useful.is_header_done():
status_code = e.status
pif.render.print_html(status=e.status)
print(pif.render.format_template('error.html', error=[e.value]))
except useful.Redirect as e:
if not useful.is_header_done():
status_code = 302
pif.render.print_html(status=302)
print(pif.render.format_template('forward.html', url=e.value, delay=e.delay))
except pymysql.OperationalError as e:
if not useful.is_header_done():
status_code = 500
pif.render.print_html(status=500)
print('The database is currently down, and thus, this page is unable to be shown.<p>')
write_traceback_file(pif, e)
except Exception as e:
status_code = 'exc'
handle_exception(pif, e, status_code=status_code)
raise
useful.header_done(True)
useful.write_comment()
log_page_call(pif, status_code=status_code)
return call_main
# --- -------------------------------------------------------------------
# Decorator that wraps command line mains.
def command_line(main_fn):
@functools.wraps(main_fn)
def call_main(page_id='cli', form_key='', defval='', args='', dbedit=None, switches='', options=''):
useful.header_done(False)
pif = None
try:
switch, filelist = get_command_line(switches, options)
for f in filelist:
if f.startswith('page_id='):
page_id = f[8:]
if isinstance(page_id, pifile.PageInfoFile):
pif = page_id
else:
pif = get_page_info(page_id, form_key, defval, args, dbedit)
pif.switch, pif.filelist = switch, filelist
ret = main_fn(pif)
useful.write_comment()
if ret:
print(ret)
except SystemExit:
pass
except useful.SimpleError as e:
print('***', e.value)
return call_main
# --- -------------------------------------------------------------------
# useful.py:def cmd_proc(pif, script, cmds):
# Main that processes command lists.
def process_command_list(page_id='cli', form_key='', defval='', args='', dbedit=None, cmds=[], switches='', options=''):
pif = None
try:
switch, filelist = get_command_line(switches, options)
pif = get_page_info(page_id, form_key, defval, args, dbedit)
pif.switch, pif.filelist = switch, filelist
useful.cmd_proc(pif, './' + os.path.split(sys.argv[0])[1], cmds)
except SystemExit:
pass
except useful.SimpleError as e:
print('***', e.value)
# --- -------------------------------------------------------------------
# Decorator for standalone (PIFless) command line mains.
def standalone(main_fn):
@functools.wraps(main_fn)
def call_main(switches="", options="", long_options={}, version="", short_help="", long_help="",
envar=None, noerror=False, defaults={}, doglob=False):
try:
switch, filelist = get_command_line(switches=switches, options=options, long_options=long_options,
version=version, short_help=short_help, long_help=long_help,
envar=envar, noerror=noerror,
defaults=defaults, doglob=doglob)
ret = main_fn(switch, filelist)
useful.write_comment()
if ret:
print(ret)
except SystemExit:
pass
return call_main
# --- -------------------------------------------------------------------
def goaway():
print('Content-Type: text/html')
print()
print('html><body bgcolor="#FFFFFF"><img src="../pic/gfx/tested.gif"></body></html>')
if __name__ == '__main__': # pragma: no cover
goaway()
|
|
# -*- coding: utf-8 -*-
import neo
import numpy as np
import quantities as pq
import unittest
import elephant.change_point_detection as mft
from numpy.testing.utils import assert_array_almost_equal, assert_allclose
# np.random.seed(13)
class FilterTestCase(unittest.TestCase):
def setUp(self):
self.test_array = [0.4, 0.5, 0.65, 0.7, 0.9, 1.15, 1.2, 1.9]
'''
spks_ri = [0.9, 1.15, 1.2]
spk_le = [0.4, 0.5, 0.65, 0.7]
'''
mu_ri = (0.25 + 0.05) / 2
mu_le = (0.1 + 0.15 + 0.05) / 3
sigma_ri = ((0.25 - 0.15) ** 2 + (0.05 - 0.15) ** 2) / 2
sigma_le = ((0.1 - 0.1) ** 2 + (0.15 - 0.1) ** 2 + (
0.05 - 0.1) ** 2) / 3
self.targ_t08_h025 = 0
self.targ_t08_h05 = (3 - 4) / np.sqrt(
(sigma_ri / mu_ri ** (3)) * 0.5 + (sigma_le / mu_le ** (3)) * 0.5)
# Window Large #
def test_filter_with_spiketrain_h05(self):
st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.0)
target = self.targ_t08_h05
res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st)
assert_array_almost_equal(res, target, decimal=9)
self.assertRaises(ValueError, mft._filter, 0.8, 0.5 * pq.s, st)
self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5, st)
self.assertRaises(ValueError, mft._filter, 0.8 * pq.s, 0.5 * pq.s,
self.test_array)
# Window Small #
def test_filter_with_spiketrain_h025(self):
st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.0)
target = self.targ_t08_h025
res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st)
assert_array_almost_equal(res, target, decimal=9)
def test_filter_with_quantities_h025(self):
st = pq.Quantity(self.test_array, units='s')
target = self.targ_t08_h025
res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st)
assert_array_almost_equal(res, target, decimal=9)
def test_filter_with_plain_array_h025(self):
st = self.test_array
target = self.targ_t08_h025
res = mft._filter(0.8 * pq.s, 0.25 * pq.s, st * pq.s)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_quantities_h05(self):
st = pq.Quantity(self.test_array, units='s')
target = self.targ_t08_h05
res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st)
assert_array_almost_equal(res, target, decimal=9)
def test_isi_with_plain_array_h05(self):
st = self.test_array
target = self.targ_t08_h05
res = mft._filter(0.8 * pq.s, 0.5 * pq.s, st * pq.s)
assert_array_almost_equal(res, target, decimal=9)
class FilterProcessTestCase(unittest.TestCase):
def setUp(self):
self.test_array = [1.1, 1.2, 1.4, 1.6, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95]
x = (7 - 3) / np.sqrt(
(0.0025 / 0.15 ** 3) * 0.5 + (0.0003472 / 0.05833 ** 3) * 0.5)
self.targ_h05 = [[0.5, 1, 1.5],
[(0 - 1.7) / np.sqrt(0.4), (0 - 1.7) / np.sqrt(0.4),
(x - 1.7) / np.sqrt(0.4)]]
def test_filter_process_with_spiketrain_h05(self):
st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.1)
target = self.targ_h05
res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st, 2.01 * pq.s,
np.array([[0.5], [1.7], [0.4]]))
assert_array_almost_equal(res[1], target[1], decimal=3)
self.assertRaises(ValueError, mft._filter_process, 0.5, 0.5 * pq.s,
st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s, 0.5,
st, 2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
self.assertRaises(ValueError, mft._filter_process, 0.5 * pq.s,
0.5 * pq.s, self.test_array, 2.01 * pq.s,
np.array([[0.5], [1.7], [0.4]]))
def test_filter_proces_with_quantities_h05(self):
st = pq.Quantity(self.test_array, units='s')
target = self.targ_h05
res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st, 2.01 * pq.s,
np.array([[0.5], [1.7], [0.4]]))
assert_array_almost_equal(res[0], target[0], decimal=3)
def test_filter_proces_with_plain_array_h05(self):
st = self.test_array
target = self.targ_h05
res = mft._filter_process(0.5 * pq.s, 0.5 * pq.s, st * pq.s,
2.01 * pq.s, np.array([[0.5], [1.7], [0.4]]))
self.assertNotIsInstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=3)
class MultipleFilterAlgorithmTestCase(unittest.TestCase):
def setUp(self):
self.test_array = [1.1, 1.2, 1.4, 1.6, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95]
self.targ_h05_dt05 = [1.5 * pq.s]
# to speed up the test, the following `test_param` and `test_quantile`
# paramters have been calculated offline using the function:
# empirical_parameters([10, 25, 50, 75, 100, 125, 150]*pq.s,700*pq.s,5,
# 10000)
# the user should do the same, if the metohd has to be applied to
# several spike trains of the same length `T` and with the same set of
# window.
self.test_param = np.array([[10.,
25.,
50.,
75.,
100.,
125.,
150.],
[3.167,
2.955,
2.721,
2.548,
2.412,
2.293,
2.180],
[0.150,
0.185,
0.224,
0.249,
0.269,
0.288,
0.301]])
self.test_quantile = 2.75
def test_MultipleFilterAlgorithm_with_spiketrain_h05(self):
st = neo.SpikeTrain(self.test_array, units='s', t_stop=2.1)
target = [self.targ_h05_dt05]
res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100,
time_step=0.1 * pq.s)
assert_array_almost_equal(res, target, decimal=9)
def test_MultipleFilterAlgorithm_with_quantities_h05(self):
st = pq.Quantity(self.test_array, units='s')
target = [self.targ_h05_dt05]
res = mft.multiple_filter_test([0.5] * pq.s, st, 2.1 * pq.s, 5, 100,
time_step=0.5 * pq.s)
assert_array_almost_equal(res, target, decimal=9)
def test_MultipleFilterAlgorithm_with_plain_array_h05(self):
st = self.test_array
target = [self.targ_h05_dt05]
res = mft.multiple_filter_test([0.5] * pq.s, st * pq.s, 2.1 * pq.s, 5,
100, time_step=0.5 * pq.s)
self.assertNotIsInstance(res, pq.Quantity)
assert_array_almost_equal(res, target, decimal=9)
def test_MultipleFilterAlgorithm_with_longdata(self):
def gamma_train(k, teta, tmax):
x = np.random.gamma(k, teta, int(tmax * (k * teta) ** (-1) * 3))
s = np.cumsum(x)
idx = np.where(s < tmax)
s = s[idx] # gamma process
return s
def alternative_hypothesis(k1, teta1, c1, k2, teta2, c2, k3, teta3, c3,
k4, teta4, T):
s1 = gamma_train(k1, teta1, c1)
s2 = gamma_train(k2, teta2, c2) + c1
s3 = gamma_train(k3, teta3, c3) + c1 + c2
s4 = gamma_train(k4, teta4, T) + c1 + c2 + c3
return np.concatenate((s1, s2, s3, s4)), [s1[-1], s2[-1], s3[-1]]
st = self.h1 = alternative_hypothesis(1, 1 / 4., 150, 2, 1 / 26., 30,
1, 1 / 36., 320,
2, 1 / 33., 200)[0]
window_size = [10, 25, 50, 75, 100, 125, 150] * pq.s
self.target_points = [150, 180, 500]
target = self.target_points
result = mft.multiple_filter_test(
window_size,
st * pq.s,
700 * pq.s,
5,
10000,
test_quantile=self.test_quantile,
test_param=self.test_param,
time_step=1 * pq.s)
self.assertNotIsInstance(result, pq.Quantity)
result_concatenated = []
for i in result:
result_concatenated = np.hstack([result_concatenated, i])
result_concatenated = np.sort(result_concatenated)
assert_allclose(result_concatenated[:3], target[:3], rtol=0,
atol=5)
print('detected {0} cps: {1}'.format(len(result_concatenated),
result_concatenated))
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""A module for writing, reading, and searching files for persistance of brags.
Implements the three public methods necessary for the bragly.persist API:
write : saves a message.
read : reads all messages in the relevant timeframe.
search: searches all messages according to a number of criteria
Attributes:
MESSAGE_STR_TEMPLATE (str): A template string for the 'log' format.
"""
from __future__ import absolute_import, print_function
import os
import json
import re
from collections import namedtuple
import arrow
MESSAGE_STR_TEMPLATE = "[{timestamp}][{tags}] {message}\n"
def write(message, file_path=None, file_dir=None, form='json'):
"""Writes a message to a file in the given format.
Args:
message (dict): A message, it's timestamp, and any associated tags.
form (str): A message format, one of log, json, json-pretty
file_path (str): The file path for the source file. If None, the
file_path will be generated from the file_dir and the form
file_dir (str): The file directory for the source file. If None,
the file_path will be used
Returns:
str: On success returns the word "success"
"""
timestamp = message['timestamp'].isoformat()
tags = message['tags']
message = message['message']
if form == 'json':
message_str = "{}\n".format(json.dumps(
dict(
message=message,
tags=tags,
timestamp=timestamp
),
))
elif form == 'json-pretty':
message_str = "{}\n".format(json.dumps(
dict(
message=message,
tags=tags,
timestamp=timestamp
),
indent=2
))
elif form == 'log':
tags = '|'.join(tags)
message_str = MESSAGE_STR_TEMPLATE.format(
timestamp=timestamp,
tags=tags,
message=message,
)
else:
raise RuntimeError('Form {form} not supported or missing.'.format(form=form))
file_path = _get_file_path(form, file_path, file_dir)
with open(file_path, 'a') as f:
f.write(message_str)
return "success"
def _get_file_path(form, file_path=None, file_dir=None):
"""Given a format, and either a filepath or file directory, returns the
proper filepath.
The format of the file, if dynamically generated from the file_dir
will be: /path/to/file/dir/brag-{format}.dat
Args:
form (str): A message format, one of log, json, json-pretty
file_path (str): The file path for the source file. If None, the
file_path will be generated from the file_dir and the form
file_dir (str): The file directory for the source file. If None, the
file_path will be used
Returns:
str: A file path for the source file
"""
if file_path is None and file_dir is None:
raise RuntimeError('No file_path or file_dir indicated.')
if file_path is None:
file_path = os.path.join(file_dir, 'brag-{form}.dat'.format(form=form))
return file_path
def read(start, end, out_form, form, file_dir=None, file_path=None):
"""Reads out entries within the given time frame.
Args:
start (arrow.Arrow): The start date time to look for messages
end (arrow.Arrow): The end date time to look for messages
out_form (str): The output format. One of log, json, json-pretty
form (str): The format that the source file is in
file_dir (str): The file directory for the source file. If None, the
file_path will be used.
file_path (str): The file path for the source file. If None, the
file_path will be generated from the file_dir and the form.
Yields:
str: A message line, in the requested format
"""
file_path = _get_file_path(form, file_path, file_dir)
if form == 'json-pretty':
raise NotImplementedError('json-pretty format is not yet supported')
with open(file_path, 'rb') as f:
for line in f:
line = line.decode('utf-8').strip()
parsed_line = _parse_line(line, form=form)
if ((start is None and parsed_line.timestamp <= end) or
(start is not None and
start <= parsed_line.timestamp <= end
)):
if out_form != form:
yield _coerce_line(parsed_line, out_form)
else:
yield line
ParsedLine = namedtuple('ParsedLine', ['timestamp', 'tags', 'message'])
def _parse_line(line, form):
"""Parse a message line according to the form that is passed in, returning
a ParsedLine object.
Args:
line (str): A line from the data file
form (str): Indicates the form that the line is in, one of log or json
(json-pretty not supported)
Returns:
ParsedLine: The message in a convenient namedtuple
"""
if form == 'log':
line_regex = re.compile(r'\[(.*)\]\[(.*)] (.*)')
timestamp, tags, message = line_regex.findall(line)[0]
timestamp = arrow.get(timestamp)
if not tags:
tags = []
else:
tags = tags.split('|')
message = message.strip()
return ParsedLine(timestamp, tags, message)
elif form == 'json':
message_json = json.loads(line)
return ParsedLine(
arrow.get(message_json['timestamp']),
message_json['tags'],
message_json['message']
)
elif form == 'json-pretty':
raise RuntimeError('No!')
def _coerce_line(parsed_line, out_form):
""" Coerce's a parsed line (named tuple) into the requested output format.
Args:
parsed_line (ParsedLine): A named tuple of the message.
out_form (str): The output format, one of log, json, json-pretty
Returns:
str OR ParsedLine: The line in the format requested
"""
if out_form == 'parsed_line':
return parsed_line
timestamp = parsed_line.timestamp.isoformat()
if out_form == 'log':
tags = '|'.join(parsed_line.tags)
return MESSAGE_STR_TEMPLATE.format(
timestamp=timestamp,
tags=tags,
message=parsed_line.message
).strip()
elif out_form == 'json':
# Translate from a named tuple to a dict, and then dump as a json string
return json.dumps({
'timestamp': timestamp,
'tags': parsed_line.tags,
'message': parsed_line.message
}).strip()
elif out_form == 'json-pretty':
# Translate from a named tuple to a dict, and then dump as a json string
return json.dumps({
'timestamp': timestamp,
'tags': parsed_line.tags,
'message': parsed_line.message
}, indent=2).strip()
else:
raise TypeError('form must be one of parsed_line, log, json, or json-pretty. '
'Instead got {form}'.format(form=out_form))
def search(start, end, out_form, tags, text,
all_args, form, file_dir=None, file_path=None):
"""Given a file path or a file directory and form, searches the files
according to the search parameters.
Args:
start (arrow.Arrow): The start date time to look for messages
end (arrow.Arrow): The end date time to look for messages
out_form (str): The output format. One of log, json, json-pretty
tags (list): A list of tags to look for
text (list): A list of text tokens (words) to look for
all_args (bool): Indicates whether all tags and text must exist for the
message to be surfaces
form (str): The format that the source file is in
file_dir (str): The file directory for the source file. If None, the
file_path will be used.
file_path (str): The file path for the source file. If None, the
file_path will be generated from the file_dir and the form.
Yields:
str: A line of text representing one result of the search
"""
base_results = read(start, end, 'parsed_line', form, file_dir, file_path)
for result in base_results:
# Handle any of the filters matching
if not all_args:
if tags and set(tags).intersection(set(result.tags)):
yield _coerce_line(result, out_form)
elif text and set(text).intersection(
set(result.message.split(' '))):
yield _coerce_line(result, out_form)
elif not text and not tags:
yield _coerce_line(result, out_form)
# Handle all filters matching
else:
tags_in_tags = False
text_in_message = False
if tags and set(tags).issubset(set(result.tags)):
tags_in_tags = True
elif not tags:
tags_in_tags = True
if text and set(text).issubset(set(result.message.split(' '))):
text_in_message = True
elif not text:
text_in_message = True
if tags_in_tags and text_in_message:
yield _coerce_line(result, out_form)
|
|
"""
Utilities to download NeuroImaging datasets
Author: Gael Varoquaux
"""
import fnmatch
import glob
import json
import os
import re
import warnings
import nibabel as nib
import numpy as np
import pandas as pd
from nilearn.datasets.utils import (_fetch_file,
_fetch_files,
_get_dataset_dir,
_uncompress_file,
)
from scipy.io import loadmat
from scipy.io.matlab.miobase import MatReadError
from sklearn.datasets.base import Bunch
from nistats.utils import _check_events_file_uses_tab_separators
SPM_AUDITORY_DATA_FILES = ["fM00223/fM00223_%03i.img" % index
for index in range(4, 100)]
SPM_AUDITORY_DATA_FILES.append("sM00223/sM00223_002.img")
def _check_import_boto3(module_name):
"""Helper function which checks boto3 is installed or not
If not installed raises an ImportError with user friendly
information.
"""
try:
module = __import__(module_name)
except ImportError:
info = "Please install boto3 to download openneuro datasets."
raise ImportError("Module {0} cannot be found. {1} "
.format(module_name, info))
return module
def fetch_bids_langloc_dataset(data_dir=None, verbose=1):
"""Download language localizer example bids dataset.
Parameters
----------
data_dir: string, optional
Path to store the downloaded dataset. if None employ nilearn
datasets default download directory.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
data_dir: string
Path to downloaded dataset
downloaded_files: list of string
Absolute paths of downloaded files on disk
"""
url = 'https://files.osf.io/v1/resources/9q7dv/providers/osfstorage/5888d9a76c613b01fc6acc4e'
dataset_name = 'bids_langloc_example'
main_folder = 'bids_langloc_dataset'
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
# The files_spec needed for _fetch_files
files_spec = [(main_folder + '.zip', url, {'move': main_folder + '.zip'})]
if not os.path.exists(os.path.join(data_dir, main_folder)):
downloaded_files = _fetch_files(data_dir, files_spec, resume=True,
verbose=verbose)
_uncompress_file(downloaded_files[0])
main_path = os.path.join(data_dir, main_folder)
file_list = [os.path.join(path, f) for
path, dirs, files in os.walk(main_path) for f in files]
return os.path.join(data_dir, main_folder), sorted(file_list)
def fetch_openneuro_dataset_index(
data_dir=None, dataset_version='ds000030_R1.0.4', verbose=1):
"""Download openneuro bids dataset index
Downloading the index allows to explore the dataset directories
to select specific files to download. The index is a sorted list of urls.
Note: This function requires boto3 to be installed.
Parameters
----------
data_dir: string, optional
Path to store the downloaded dataset. if None employ nilearn
datasets default download directory.
dataset_version: string, optional
dataset version name. Assumes it is of the form [name]_[version].
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
urls_path: string
Path to downloaded dataset index
urls: list of string
Sorted list of dataset directories
"""
from botocore.handlers import disable_signing
boto3 = _check_import_boto3("boto3")
data_prefix = '{}/{}/uncompressed'.format(
dataset_version.split('_')[0], dataset_version)
data_dir = _get_dataset_dir(data_prefix, data_dir=data_dir,
verbose=verbose)
# First we download the url list from the uncompressed dataset version
urls_path = os.path.join(data_dir, 'urls.json')
urls = []
if not os.path.exists(urls_path):
def get_url(endpoint_url, bucket_name, file_key):
return '{}/{}/{}'.format(endpoint_url, bucket_name, file_key)
resource = boto3.resource('s3')
resource.meta.client.meta.events.register('choose-signer.s3.*',
disable_signing)
bucket = resource.Bucket('openneuro')
for obj in bucket.objects.filter(Prefix=data_prefix):
# get url of files (keys of directories end with '/')
if obj.key[-1] != '/':
urls.append(
get_url(bucket.meta.client.meta.endpoint_url,
bucket.name, obj.key))
urls = sorted(urls)
with open(urls_path, 'w') as json_file:
json.dump(urls, json_file)
else:
with open(urls_path, 'r') as json_file:
urls = json.load(json_file)
return urls_path, urls
def select_from_index(urls, inclusion_filters=[], exclusion_filters=[],
n_subjects=None):
"""Select subset of urls with given filters.
Parameters
----------
urls: list of str
List of dataset urls obtained from index download
inclusion_filters: list of str, optional
List of unix shell-style wildcard strings
that will be used to filter the url list.
If a filter matches the url it is retained for download.
Multiple filters work on top of each other.
Like an "and" logical operator, creating a more restrictive query.
Inclusion and exclusion filters apply together.
For example the filter '*task-rest*'' would keep only urls
that contain the 'task-rest' string.
exclusion_filters: list of str, optional
List of unix shell-style wildcard strings
that will be used to filter the url list.
If a filter matches the url it is discarded for download.
Multiple filters work on top of each other.
Like an "and" logical operator, creating a more restrictive query.
Inclusion and exclusion filters apply together.
For example the filter '*task-rest*' would discard all urls
that contain the 'task-rest' string.
n_subjects: int, optional
number of subjects to download from the dataset. All by default.
Returns
-------
urls: list of string
Sorted list of filtered dataset directories
"""
# We apply filters to the urls
for exclusion in exclusion_filters:
urls = [url for url in urls if not fnmatch.fnmatch(url, exclusion)]
for inclusion in inclusion_filters:
urls = [url for url in urls if fnmatch.fnmatch(url, inclusion)]
# subject selection filter
# from the url list we infer all available subjects like 'sub-xxx/'
subject_regex = 'sub-[a-z|A-Z|0-9]*[_./]'
def infer_subjects(urls):
subjects = set()
for url in urls:
if 'sub-' in url:
subjects.add(re.search(subject_regex, url).group(0)[:-1])
return sorted(subjects)
# We get a list of subjects (for the moment the first n subjects)
selected_subjects = set(infer_subjects(urls)[:n_subjects])
# We exclude urls of subjects not selected
urls = [url for url in urls if 'sub-' not in url or
re.search(subject_regex, url).group(0)[:-1] in selected_subjects]
return urls
def fetch_openneuro_dataset(
urls=None, data_dir=None, dataset_version='ds000030_R1.0.4',
verbose=1):
"""Download openneuro bids dataset.
Note: This function requires boto3 to be installed.
Parameters
----------
urls: list of string, optional
Openneuro url list of dataset files to download. If not specified
all files of the specified dataset will be downloaded.
data_dir: string, optional
Path to store the downloaded dataset. if None employ nilearn
datasets default download directory.
dataset_version: string, optional
dataset version name. Assumes it is of the form [name]_[version].
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
data_dir: string
Path to downloaded dataset
downloaded_files: list of string
Absolute paths of downloaded files on disk
"""
boto3 = _check_import_boto3("boto3")
data_prefix = '{}/{}/uncompressed'.format(
dataset_version.split('_')[0], dataset_version)
data_dir = _get_dataset_dir(data_prefix, data_dir=data_dir,
verbose=verbose)
# if urls are not specified we download the complete dataset index
if urls is None:
_, urls = fetch_openneuro_dataset_index(
data_dir=data_dir, dataset_version=dataset_version, verbose=verbose)
# The files_spec needed for _fetch_files
files_spec = []
files_dir = []
for url in urls:
url_path = url.split(data_prefix + '/')[1]
file_dir = os.path.join(data_dir, url_path)
files_spec.append((os.path.basename(file_dir), url, {}))
files_dir.append(os.path.dirname(file_dir))
# download the files
downloaded = []
for file_spec, file_dir in zip(files_spec, files_dir):
# Timeout errors are common in the s3 connection so we try to avoid
# failure of the dataset download for a transient instability
success = False
download_attempts = 4
while download_attempts > 0 and not success:
try:
downloaded_files = _fetch_files(
file_dir, [file_spec], resume=True, verbose=verbose)
downloaded += downloaded_files
success = True
except Exception:
download_attempts -= 1
if not success:
raise Exception('multiple failures downloading %s' % file_spec[1])
return data_dir, sorted(downloaded)
def _make_events_file_localizer_first_level(events_file):
""" Makes the first-level localizer fMRI dataset events file
BIDS compliant. Overwrites the original file.
Adds headers in first row.
Removes first column (spurious data).
Uses Tab character as value separator.
Parameters
----------
events_file: string
path to the localizer_first_level dataset's events file.
Returns
-------
None
"""
events = pd.read_csv(events_file, sep=' ', header=None, index_col=None,
names=['session', 'trial_type', 'onset'],
)
events.drop(labels='session', axis=1, inplace=True)
# duration is required in BIDS specification
events['duration'] = np.ones_like(events.onset)
# if events_file is open file handle, reset cursor to file beginning.
if hasattr(events_file, 'read') or hasattr(events_file, 'write'):
events_file.seek(0)
events.to_csv(events_file, sep='\t', index=False)
def fetch_localizer_first_level(data_dir=None, verbose=1):
""" Download a first-level localizer fMRI dataset
Parameters
----------
data_dir: string
directory where data should be downloaded and unpacked.
Returns
-------
data: sklearn.datasets.base.Bunch
dictionary-like object, with the keys:
epi_img: the input 4D image
events: a csv file describing the paardigm
"""
url = 'ftp://ftp.cea.fr/pub/dsv/madic/download/nipy'
dataset_name = "localizer_first_level"
files = dict(epi_img="s12069_swaloc1_corr.nii.gz",
events="localizer_paradigm.csv")
# The options needed for _fetch_files
options = [(filename, os.path.join(url, filename), {})
for _, filename in sorted(files.items())]
data_dir = _get_dataset_dir(dataset_name, data_dir=data_dir,
verbose=verbose)
sub_files = _fetch_files(data_dir, options, resume=True,
verbose=verbose)
params = dict(zip(sorted(files.keys()), sub_files))
try:
_check_events_file_uses_tab_separators(params['events'])
except ValueError:
_make_events_file_localizer_first_level(events_file=
params['events']
)
return Bunch(**params)
def _download_spm_auditory_data(data_dir, subject_dir, subject_id):
print("Data absent, downloading...")
url = ("http://www.fil.ion.ucl.ac.uk/spm/download/data/MoAEpilot/"
"MoAEpilot.zip")
archive_path = os.path.join(subject_dir, os.path.basename(url))
_fetch_file(url, subject_dir)
try:
_uncompress_file(archive_path)
except:
print("Archive corrupted, trying to download it again.")
return fetch_spm_auditory(data_dir=data_dir, data_name="",
subject_id=subject_id)
def _prepare_downloaded_spm_auditory_data(subject_dir):
""" Uncompresses downloaded spm_auditory dataset and organizes
the data into apprpriate directories.
Parameters
----------
subject_dir: string
Path to subject's data directory.
Returns
-------
_subject_data: skl.Bunch object
Scikit-Learn Bunch object containing data of a single subject
from the SPM Auditory dataset.
"""
subject_data = {}
for file_name in SPM_AUDITORY_DATA_FILES:
file_path = os.path.join(subject_dir, file_name)
if os.path.exists(file_path):
subject_data[file_name] = file_path
else:
print("%s missing from filelist!" % file_name)
return None
_subject_data = {}
_subject_data["func"] = sorted(
[subject_data[x] for x in subject_data.keys()
if re.match("^fM00223_0\d\d\.img$", os.path.basename(x))])
# volumes for this dataset of shape (64, 64, 64, 1); let's fix this
for x in _subject_data["func"]:
vol = nib.load(x)
if len(vol.shape) == 4:
vol = nib.Nifti1Image(vol.get_data()[:, :, :, 0],
vol.affine)
nib.save(vol, x)
_subject_data["anat"] = [subject_data[x] for x in subject_data.keys()
if re.match("^sM00223_002\.img$",
os.path.basename(x))][0]
# ... same thing for anat
vol = nib.load(_subject_data["anat"])
if len(vol.shape) == 4:
vol = nib.Nifti1Image(vol.get_data()[:, :, :, 0],
vol.affine)
nib.save(vol, _subject_data["anat"])
return Bunch(**_subject_data)
def _make_path_events_file_spm_auditory_data(spm_auditory_data):
"""
Accepts data for spm_auditory dataset as Bunch
and constructs the filepath for its events descriptor file.
Parameters
----------
spm_auditory_data: Bunch
Returns
-------
events_filepath: string
Full path to the events.tsv file for spm_auditory dataset.
"""
events_file_location = os.path.dirname(spm_auditory_data['func'][0])
events_filename = os.path.basename(events_file_location) + '_events.tsv'
events_filepath = os.path.join(events_file_location, events_filename)
return events_filepath
def _make_events_file_spm_auditory_data(events_filepath):
"""
Accepts destination filepath including filename and
creates the events.tsv file for the spm_auditory dataset.
Parameters
----------
events_filepath: string
The path where the events file will be created;
Returns
-------
None
"""
tr = 7.
epoch_duration = 6 * tr # duration in seconds
conditions = ['rest', 'active'] * 8
n_blocks = len(conditions)
duration = epoch_duration * np.ones(n_blocks)
onset = np.linspace(0, (n_blocks - 1) * epoch_duration, n_blocks)
events = pd.DataFrame(
{'onset': onset, 'duration': duration, 'trial_type': conditions})
events.to_csv(events_filepath, sep='\t', index=False,
columns=['onset', 'duration', 'trial_type'])
def fetch_spm_auditory(data_dir=None, data_name='spm_auditory',
subject_id="sub001", verbose=1):
"""Function to fetch SPM auditory single-subject data.
Parameters
----------
data_dir: string
Path of the data directory. Used to force data storage in a specified
location. If the data is already present there, then will simply
glob it.
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are:
- 'func': string list. Paths to functional images
- 'anat': string list. Path to anat image
References
----------
:download:
http://www.fil.ion.ucl.ac.uk/spm/data/auditory/
"""
data_dir = _get_dataset_dir(data_name, data_dir=data_dir,
verbose=verbose)
subject_dir = os.path.join(data_dir, subject_id)
if not os.path.exists(subject_dir):
_download_spm_auditory_data(data_dir, subject_dir, subject_id)
spm_auditory_data = _prepare_downloaded_spm_auditory_data(subject_dir)
try:
spm_auditory_data['events']
except KeyError:
events_filepath = _make_path_events_file_spm_auditory_data(
spm_auditory_data)
if not os.path.isfile(events_filepath):
_make_events_file_spm_auditory_data(events_filepath)
spm_auditory_data['events'] = events_filepath
return spm_auditory_data
def _get_func_data_spm_multimodal(subject_dir, session, _subject_data):
session_func = sorted(glob.glob(
os.path.join(
subject_dir,
("fMRI/Session%i/fMETHODS-000%i-*-01.img" % (
session, session + 4)
)
)
))
if len(session_func) < 390:
print("Missing %i functional scans for session %i." % (
390 - len(session_func), session))
return None
_subject_data['func%i' % (session)] = session_func
return _subject_data
def _get_session_trials_spm_multimodal(subject_dir, session, _subject_data):
sess_trials = os.path.join(
subject_dir,
"fMRI/trials_ses%i.mat" % (session))
if not os.path.isfile(sess_trials):
print("Missing session file: %s" % sess_trials)
return None
_subject_data['trials_ses%i' % (session)] = sess_trials
return _subject_data
def _get_anatomical_data_spm_multimodal(subject_dir, _subject_data):
anat = os.path.join(subject_dir, "sMRI/smri.img")
if not os.path.isfile(anat):
print("Missing structural image.")
return None
_subject_data["anat"] = anat
return _subject_data
def _glob_spm_multimodal_fmri_data(subject_dir):
"""glob data from subject_dir."""
_subject_data = {'slice_order': 'descending'}
for session in range(1, 3):
# glob func data for session
_subject_data = _get_func_data_spm_multimodal(subject_dir, session, _subject_data)
if not _subject_data:
return None
# glob trials .mat file
_subject_data = _get_session_trials_spm_multimodal(subject_dir, session, _subject_data)
if not _subject_data:
return None
try:
events = _make_events_file_spm_multimodal_fmri(_subject_data, session)
except MatReadError as mat_err:
warnings.warn('{}. An events.tsv file cannot be generated'.format(str(mat_err)))
else:
events_filepath = _make_events_filepath_spm_multimodal_fmri(_subject_data, session)
events.to_csv(events_filepath, sep='\t', index=False)
_subject_data['events{}'.format(session)] = events_filepath
# glob for anat data
_subject_data = _get_anatomical_data_spm_multimodal(subject_dir, _subject_data)
if not _subject_data:
return None
return Bunch(**_subject_data)
def _download_data_spm_multimodal(data_dir, subject_dir, subject_id):
print("Data absent, downloading...")
urls = [
# fmri
("http://www.fil.ion.ucl.ac.uk/spm/download/data/mmfaces/"
"multimodal_fmri.zip"),
# structural
("http://www.fil.ion.ucl.ac.uk/spm/download/data/mmfaces/"
"multimodal_smri.zip")
]
for url in urls:
archive_path = os.path.join(subject_dir, os.path.basename(url))
_fetch_file(url, subject_dir)
try:
_uncompress_file(archive_path)
except:
print("Archive corrupted, trying to download it again.")
return fetch_spm_multimodal_fmri(data_dir=data_dir,
data_name="",
subject_id=subject_id)
return _glob_spm_multimodal_fmri_data(subject_dir)
def _make_events_filepath_spm_multimodal_fmri(_subject_data, session):
key = 'trials_ses{}'.format(session)
events_file_location = os.path.dirname(_subject_data[key])
events_filename = 'session{}_events.tsv'.format(session)
events_filepath = os.path.join(events_file_location, events_filename)
return events_filepath
def _make_events_file_spm_multimodal_fmri(_subject_data, session):
tr = 2.
timing = loadmat(_subject_data["trials_ses%i" % (session)],
squeeze_me=True, struct_as_record=False)
faces_onsets = timing['onsets'][0].ravel()
scrambled_onsets = timing['onsets'][1].ravel()
onsets = np.hstack((faces_onsets, scrambled_onsets))
onsets *= tr # because onsets were reporting in 'scans' units
conditions = (['faces'] * len(faces_onsets) +
['scrambled'] * len(scrambled_onsets))
duration = np.ones_like(onsets)
events = pd.DataFrame({'trial_type': conditions, 'onset': onsets,
'duration': duration})
return events
def fetch_spm_multimodal_fmri(data_dir=None, data_name="spm_multimodal_fmri",
subject_id="sub001", verbose=1):
"""Fetcher for Multi-modal Face Dataset.
Parameters
----------
data_dir: string
path of the data directory. Used to force data storage in a specified
location. If the data is already present there, then will simply
glob it.
Returns
-------
data: sklearn.datasets.base.Bunch
Dictionary-like object, the interest attributes are:
- 'func1': string list. Paths to functional images for session 1
- 'func2': string list. Paths to functional images for session 2
- 'trials_ses1': string list. Path to onsets file for session 1
- 'trials_ses2': string list. Path to onsets file for session 2
- 'anat': string. Path to anat file
References
----------
:download:
http://www.fil.ion.ucl.ac.uk/spm/data/mmfaces/
"""
data_dir = _get_dataset_dir(data_name, data_dir=data_dir, verbose=verbose)
subject_dir = os.path.join(data_dir, subject_id)
# maybe data_dir already contains the data ?
data = _glob_spm_multimodal_fmri_data(subject_dir)
if data is not None:
return data
# No. Download the data
return _download_data_spm_multimodal(data_dir, subject_dir, subject_id)
def fetch_fiac_first_level(data_dir=None, verbose=1):
""" Download a first-level fiac fMRI dataset (2 sessions)
Parameters
----------
data_dir: string
directory where data should be downloaded and unpacked.
"""
data_dir = _get_dataset_dir('fiac_nistats', data_dir=data_dir,
verbose=verbose)
def _glob_fiac_data():
"""glob data from subject_dir."""
_subject_data = {}
subject_dir = os.path.join(data_dir, 'nipy-data-0.2/data/fiac/fiac0')
for session in [1, 2]:
# glob func data for session
session_func = os.path.join(subject_dir, 'run%i.nii.gz' % session)
if not os.path.isfile(session_func):
print('Missing functional scan for session %i.' % session)
return None
_subject_data['func%i' % session] = session_func
# glob design matrix .npz file
sess_dmtx = os.path.join(subject_dir, 'run%i_design.npz' % session)
if not os.path.isfile(sess_dmtx):
print('Missing session file: %s' % sess_dmtx)
return None
_subject_data['design_matrix%i' % session] = sess_dmtx
# glob for mask data
mask = os.path.join(subject_dir, 'mask.nii.gz')
if not os.path.isfile(mask):
print('Missing mask image.')
return None
_subject_data['mask'] = mask
return Bunch(**_subject_data)
# maybe data_dir already contains the data ?
data = _glob_fiac_data()
if data is not None:
return data
# No. Download the data
print('Data absent, downloading...')
url = 'http://nipy.sourceforge.net/data-packages/nipy-data-0.2.tar.gz'
archive_path = os.path.join(data_dir, os.path.basename(url))
_fetch_file(url, data_dir)
try:
_uncompress_file(archive_path)
except:
print('Archive corrupted, trying to download it again.')
return fetch_fiac_first_level(data_dir=data_dir)
return _glob_fiac_data()
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import datetime
import json
import operator
import time
import flask
from stackalytics.dashboard import decorators
from stackalytics.dashboard import helpers
from stackalytics.dashboard import parameters
from stackalytics.dashboard import vault
from stackalytics.processor import utils
DEFAULT_DAYS_COUNT = 7
FIRST_MEMBER_DATE = "2012-Jul-18"
blueprint = flask.Blueprint('reports', __name__, url_prefix='/report')
@blueprint.route('/blueprint/<module>/<blueprint_name>')
@decorators.templated()
@decorators.exception_handler()
def blueprint_summary(module, blueprint_name):
blueprint_id = utils.get_blueprint_id(module, blueprint_name)
bpd = vault.get_memory_storage().get_record_by_primary_key(
'bpd:' + blueprint_id)
if not bpd:
flask.abort(404)
return
bpd = helpers.extend_record(bpd)
record_ids = vault.get_memory_storage().get_record_ids_by_blueprint_ids(
[blueprint_id])
activity = [helpers.extend_record(record) for record in
vault.get_memory_storage().get_records(record_ids)]
activity.sort(key=lambda x: x['date'], reverse=True)
return {'blueprint': bpd, 'activity': activity}
def _get_day(timestamp, time_now):
return int((time_now - timestamp) / 60 / 60 / 24)
def _process_stat(data, key, time_now):
if not data:
return None
data = sorted(data, key=operator.itemgetter(key))
days = _get_day(data[0][key], time_now)
chart_data = [0] * (days + 1)
sum_ages = 0
for review in data:
age = time_now - review[key]
sum_ages += age
review[key + '_age'] = utils.make_age_string(age)
chart_data[_get_day(review[key], time_now)] += 1
return {
'reviews': data,
'average': utils.make_age_string(sum_ages / len(data)),
'max': data[0][key + '_age'],
'chart_data': json.dumps(chart_data),
}
@blueprint.route('/reviews/<module>/open')
@decorators.templated()
@decorators.exception_handler()
def open_reviews(module):
memory_storage_inst = vault.get_memory_storage()
time_now = int(time.time())
module_id_index = vault.get_vault()['module_id_index']
module = module.lower()
if module not in module_id_index:
flask.abort(404)
modules = module_id_index[module]['modules']
review_ids = (memory_storage_inst.get_record_ids_by_modules(modules) &
memory_storage_inst.get_record_ids_by_types(['review']))
waiting_on_reviewer = []
waiting_on_submitter = []
total_open = 0
for review in memory_storage_inst.get_records(review_ids):
if review.status == 'NEW':
total_open += 1
# review.value is minimum from votes made for the latest patch
if review.value in [1, 2]:
# CI or engineer liked this change request, waiting for someone
# to merge or to put dislike
waiting_on_reviewer.append(helpers.extend_record(review))
elif review.value in [-1, -2]:
# CI or reviewer do not like this, waiting for submitter to fix
waiting_on_submitter.append(helpers.extend_record(review))
else:
# new requests without votes, waiting for CI
pass
return {
'module': module,
'total_open': total_open,
'waiting_on_reviewer': len(waiting_on_reviewer),
'waiting_on_submitter': len(waiting_on_submitter),
'waiting_on_ci': (total_open - len(waiting_on_reviewer) -
len(waiting_on_submitter)),
'reviewer_latest_revision': _process_stat(
waiting_on_reviewer, 'updated_on', time_now),
'reviewer_first_revision': _process_stat(
waiting_on_reviewer, 'date', time_now),
'submitter_latest_revision': _process_stat(
waiting_on_submitter, 'updated_on', time_now),
'submitter_first_revision': _process_stat(
waiting_on_submitter, 'date', time_now),
}
@blueprint.route('/contribution/<module>/<days>')
@decorators.templated()
@decorators.exception_handler()
def contribution(module, days):
return {
'module': module,
'days': days,
'start_date': int(time.time()) - int(days) * 24 * 60 * 60
}
@blueprint.route('/ci/<module>/<days>')
@decorators.templated()
@decorators.exception_handler()
def external_ci(module, days):
if int(days) > 100:
days = 100
return {
'module': module,
'days': days,
'start_date': int(time.time()) - int(days) * 24 * 60 * 60
}
@blueprint.route('/members')
@decorators.exception_handler()
@decorators.templated()
def members():
days = int(flask.request.args.get('days') or DEFAULT_DAYS_COUNT)
all_days = int(time.time() - utils.date_to_timestamp_ext(
FIRST_MEMBER_DATE)) / (24 * 60 * 60) + 1
return {
'days': days,
'all_days': all_days
}
@blueprint.route('/affiliation_changes')
@decorators.exception_handler()
@decorators.templated()
def affiliation_changes():
start_days = str(flask.request.args.get('start_days') or
utils.timestamp_to_date(int(time.time()) -
365 * 24 * 60 * 60))
end_days = str(flask.request.args.get('end_days') or
utils.timestamp_to_date(int(time.time())))
return {
'start_days': start_days,
'end_days': end_days,
}
@blueprint.route('/cores')
@decorators.exception_handler()
@decorators.templated()
def cores():
project_type = parameters.get_single_parameter({}, 'project_type')
return {
'project_type': project_type,
}
def _get_punch_card_data(records):
punch_card_raw = [] # matrix days x hours
for wday in range(7):
punch_card_raw.append([0] * 24)
for record in records:
tt = datetime.datetime.fromtimestamp(record.date).timetuple()
punch_card_raw[tt.tm_wday][tt.tm_hour] += 1
punch_card_data = [] # format for jqplot bubble renderer
for wday in range(7):
for hour in range(24):
v = punch_card_raw[wday][hour]
if v:
punch_card_data.append([hour, 6 - wday, v, v]) # upside down
# add corner point, otherwise chart doesn't know the bounds
if punch_card_raw[0][0] == 0:
punch_card_data.append([0, 0, 0, 0])
if punch_card_raw[6][23] == 0:
punch_card_data.append([23, 6, 0, 0])
return json.dumps(punch_card_data)
def _get_activity_summary(record_ids):
memory_storage_inst = vault.get_memory_storage()
record_ids_by_type = memory_storage_inst.get_record_ids_by_types(
['mark', 'patch', 'email', 'bpd', 'bpc', 'ci'])
record_ids &= record_ids_by_type
punch_card_data = _get_punch_card_data(
memory_storage_inst.get_records(record_ids))
return {
'punch_card_data': punch_card_data,
}
@blueprint.route('/users/<user_id>')
@decorators.templated()
@decorators.exception_handler()
def user_activity(user_id):
user = vault.get_user_from_runtime_storage(user_id)
if not user:
flask.abort(404)
user = helpers.extend_user(user)
memory_storage_inst = vault.get_memory_storage()
result = _get_activity_summary(
memory_storage_inst.get_record_ids_by_user_ids([user_id]))
result['user'] = user
return result
@blueprint.route('/companies/<company>')
@decorators.templated()
@decorators.exception_handler()
def company_activity(company):
memory_storage_inst = vault.get_memory_storage()
original_name = memory_storage_inst.get_original_company_name(company)
result = _get_activity_summary(
memory_storage_inst.get_record_ids_by_companies([original_name]))
result['company_name'] = original_name
return result
@blueprint.route('/record/<path:record_id>')
@decorators.templated()
@decorators.exception_handler()
def record(record_id):
memory_storage_inst = vault.get_memory_storage()
record_obj = memory_storage_inst.get_record_by_primary_key(record_id)
if not record_obj:
flask.abort(404)
result = dict(record=helpers.get_activity([record_obj], 0, 1)[0])
return result
@blueprint.route('/activity')
@decorators.templated()
@decorators.exception_handler()
def activity():
pass
@blueprint.route('/large_commits')
@decorators.response()
@decorators.jsonify('commits')
@decorators.exception_handler()
@decorators.record_filter()
def get_commit_report(records, **kwargs):
loc_threshold = int(flask.request.args.get('loc_threshold') or 1000)
response = []
for record in records:
if record.record_type == 'commit' and record.loc > loc_threshold:
ext_record = vault.extend_record(record)
nr = dict([(k, ext_record[k])
for k in ['loc', 'subject', 'module', 'primary_key',
'change_id']
if k in ext_record])
response.append(nr)
return response
@blueprint.route('/single_plus_two_reviews')
@decorators.response()
@decorators.jsonify()
@decorators.exception_handler()
@decorators.record_filter(ignore='metric')
def get_single_plus_two_reviews_report(records, **kwargs):
memory_storage_inst = vault.get_memory_storage()
plus_twos = collections.defaultdict(list)
for record in records:
if record['record_type'] != 'mark':
continue
if (record['branch'] == 'master' and
record['type'] == 'Code-Review' and record['value'] == +2):
review_id = record['review_id']
review = memory_storage_inst.get_record_by_primary_key(review_id)
if review and review['status'] == 'MERGED':
plus_twos[review_id].append(record)
response = []
for review_id in plus_twos.keys():
if len(plus_twos[review_id]) < 2:
mark = plus_twos[review_id][0]
review = memory_storage_inst.get_record_by_primary_key(
mark['review_id'])
response.append({'review_by': review['user_id'],
'mark_by': mark['user_id'],
'subject': review['subject'],
'url': review['url'],
'project': review['project']})
return response
@blueprint.route('/driverlog')
@decorators.templated()
@decorators.exception_handler()
def driverlog():
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.