gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
#!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import itertools
from optparse import OptionParser
import os
import random
import re
import sys
import subprocess
from collections import namedtuple
from sparktestsupport import SPARK_HOME, USER_HOME, ERROR_CODES
from sparktestsupport.shellutils import exit_from_command_with_retcode, run_cmd, rm_r, which
from sparktestsupport.toposort import toposort_flatten, toposort
import sparktestsupport.modules as modules
# -------------------------------------------------------------------------------------------------
# Functions for traversing module dependency graph
# -------------------------------------------------------------------------------------------------
def determine_modules_for_files(filenames):
"""
Given a list of filenames, return the set of modules that contain those files.
If a file is not associated with a more specific submodule, then this method will consider that
file to belong to the 'root' module.
>>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"]))
['pyspark-core', 'sql']
>>> [x.name for x in determine_modules_for_files(["file_not_matched_by_any_subproject"])]
['root']
"""
changed_modules = set()
for filename in filenames:
matched_at_least_one_module = False
for module in modules.all_modules:
if module.contains_file(filename):
changed_modules.add(module)
matched_at_least_one_module = True
if not matched_at_least_one_module:
changed_modules.add(modules.root)
return changed_modules
def identify_changed_files_from_git_commits(patch_sha, target_branch=None, target_ref=None):
"""
Given a git commit and target ref, use the set of files changed in the diff in order to
determine which modules' tests should be run.
>>> [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("fc0a1475ef", target_ref="5da21f07"))]
['graphx']
>>> 'root' in [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("50a0496a43", target_ref="6765ef9"))]
True
"""
if target_branch is None and target_ref is None:
raise AttributeError("must specify either target_branch or target_ref")
elif target_branch is not None and target_ref is not None:
raise AttributeError("must specify either target_branch or target_ref, not both")
if target_branch is not None:
diff_target = target_branch
run_cmd(['git', 'fetch', 'origin', str(target_branch+':'+target_branch)])
else:
diff_target = target_ref
raw_output = subprocess.check_output(['git', 'diff', '--name-only', patch_sha, diff_target],
universal_newlines=True)
# Remove any empty strings
return [f for f in raw_output.split('\n') if f]
def setup_test_environ(environ):
print("[info] Setup the following environment variables for tests: ")
for (k, v) in environ.items():
print("%s=%s" % (k, v))
os.environ[k] = v
def determine_modules_to_test(changed_modules):
"""
Given a set of modules that have changed, compute the transitive closure of those modules'
dependent modules in order to determine the set of modules that should be tested.
Returns a topologically-sorted list of modules (ties are broken by sorting on module names).
>>> [x.name for x in determine_modules_to_test([modules.root])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.build])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.graphx])]
['graphx', 'examples']
>>> x = [x.name for x in determine_modules_to_test([modules.sql])]
>>> x # doctest: +NORMALIZE_WHITESPACE
['sql', 'hive', 'mllib', 'sql-kafka-0-10', 'examples', 'hive-thriftserver',
'pyspark-sql', 'repl', 'sparkr', 'pyspark-mllib', 'pyspark-ml']
"""
modules_to_test = set()
for module in changed_modules:
modules_to_test = modules_to_test.union(determine_modules_to_test(module.dependent_modules))
modules_to_test = modules_to_test.union(set(changed_modules))
# If we need to run all of the tests, then we should short-circuit and return 'root'
if modules.root in modules_to_test:
return [modules.root]
return toposort_flatten(
{m: set(m.dependencies).intersection(modules_to_test) for m in modules_to_test}, sort=True)
def determine_tags_to_exclude(changed_modules):
tags = []
for m in modules.all_modules:
if m not in changed_modules:
tags += m.test_tags
return tags
# -------------------------------------------------------------------------------------------------
# Functions for working with subprocesses and shell tools
# -------------------------------------------------------------------------------------------------
def determine_java_executable():
"""Will return the path of the java executable that will be used by Spark's
tests or `None`"""
# Any changes in the way that Spark's build detects java must be reflected
# here. Currently the build looks for $JAVA_HOME/bin/java then falls back to
# the `java` executable on the path
java_home = os.environ.get("JAVA_HOME")
# check if there is an executable at $JAVA_HOME/bin/java
java_exe = which(os.path.join(java_home, "bin", "java")) if java_home else None
# if the java_exe wasn't set, check for a `java` version on the $PATH
return java_exe if java_exe else which("java")
JavaVersion = namedtuple('JavaVersion', ['major', 'minor', 'patch'])
def determine_java_version(java_exe):
"""Given a valid java executable will return its version in named tuple format
with accessors '.major', '.minor', '.patch', '.update'"""
raw_output = subprocess.check_output([java_exe, "-version"],
stderr=subprocess.STDOUT,
universal_newlines=True)
raw_output_lines = raw_output.split('\n')
# find raw version string, eg 'java version "1.8.0_25"'
raw_version_str = next(x for x in raw_output_lines if " version " in x)
match = re.search('(\d+)\.(\d+)\.(\d+)', raw_version_str)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
return JavaVersion(major, minor, patch)
# -------------------------------------------------------------------------------------------------
# Functions for running the other build and test scripts
# -------------------------------------------------------------------------------------------------
def set_title_and_block(title, err_block):
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES[err_block])
line_str = '=' * 72
print('')
print(line_str)
print(title)
print(line_str)
def run_apache_rat_checks():
set_title_and_block("Running Apache RAT checks", "BLOCK_RAT")
run_cmd([os.path.join(SPARK_HOME, "dev", "check-license")])
def run_scala_style_checks():
set_title_and_block("Running Scala style checks", "BLOCK_SCALA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-scala")])
def run_java_style_checks():
set_title_and_block("Running Java style checks", "BLOCK_JAVA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-java")])
def run_python_style_checks():
set_title_and_block("Running Python style checks", "BLOCK_PYTHON_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-python")])
def run_sparkr_style_checks():
set_title_and_block("Running R style checks", "BLOCK_R_STYLE")
if which("R"):
# R style check should be executed after `install-dev.sh`.
# Since warnings about `no visible global function definition` appear
# without the installation. SEE ALSO: SPARK-9121.
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-r")])
else:
print("Ignoring SparkR style check as R was not found in PATH")
def build_spark_documentation():
set_title_and_block("Building Spark Documentation", "BLOCK_DOCUMENTATION")
os.environ["PRODUCTION"] = "1 jekyll build"
os.chdir(os.path.join(SPARK_HOME, "docs"))
jekyll_bin = which("jekyll")
if not jekyll_bin:
print("[error] Cannot find a version of `jekyll` on the system; please",
" install one and retry to build documentation.")
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
else:
run_cmd([jekyll_bin, "build"])
os.chdir(SPARK_HOME)
def get_zinc_port():
"""
Get a randomized port on which to start Zinc
"""
return random.randrange(3030, 4030)
def kill_zinc_on_port(zinc_port):
"""
Kill the Zinc process running on the given port, if one exists.
"""
cmd = ("/usr/sbin/lsof -P |grep %s | grep LISTEN "
"| awk '{ print $2; }' | xargs kill") % zinc_port
subprocess.check_call(cmd, shell=True)
def exec_maven(mvn_args=()):
"""Will call Maven in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
zinc_port = get_zinc_port()
os.environ["ZINC_PORT"] = "%s" % zinc_port
zinc_flag = "-DzincPort=%s" % zinc_port
flags = [os.path.join(SPARK_HOME, "build", "mvn"), "--force", zinc_flag]
run_cmd(flags + mvn_args)
kill_zinc_on_port(zinc_port)
def exec_sbt(sbt_args=()):
"""Will call SBT in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
sbt_cmd = [os.path.join(SPARK_HOME, "build", "sbt")] + sbt_args
sbt_output_filter = re.compile(b"^.*[info].*Resolving" + b"|" +
b"^.*[warn].*Merging" + b"|" +
b"^.*[info].*Including")
# NOTE: echo "q" is needed because sbt on encountering a build file
# with failure (either resolution or compilation) prompts the user for
# input either q, r, etc to quit or retry. This echo is there to make it
# not block.
echo_proc = subprocess.Popen(["echo", "\"q\n\""], stdout=subprocess.PIPE)
sbt_proc = subprocess.Popen(sbt_cmd,
stdin=echo_proc.stdout,
stdout=subprocess.PIPE)
echo_proc.wait()
for line in iter(sbt_proc.stdout.readline, b''):
if not sbt_output_filter.match(line):
print(line, end='')
retcode = sbt_proc.wait()
if retcode != 0:
exit_from_command_with_retcode(sbt_cmd, retcode)
def get_hadoop_profiles(hadoop_version):
"""
For the given Hadoop version tag, return a list of Maven/SBT profile flags for
building and testing against that Hadoop version.
"""
sbt_maven_hadoop_profiles = {
"hadoop2.6": ["-Phadoop-2.6"],
"hadoop2.7": ["-Phadoop-2.7"],
}
if hadoop_version in sbt_maven_hadoop_profiles:
return sbt_maven_hadoop_profiles[hadoop_version]
else:
print("[error] Could not find", hadoop_version, "in the list. Valid options",
" are", sbt_maven_hadoop_profiles.keys())
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
def build_spark_maven(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
mvn_goals = ["clean", "package", "-DskipTests"]
profiles_and_goals = build_profiles + mvn_goals
print("[info] Building Spark (w/Hive 1.2.1) using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def build_spark_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["test:package", # Build test jars as some tests depend on them
"streaming-kafka-0-8-assembly/assembly",
"streaming-flume-assembly/assembly",
"streaming-kinesis-asl-assembly/assembly"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_unidoc_sbt(hadoop_version):
set_title_and_block("Building Unidoc API Documentation", "BLOCK_DOCUMENTATION")
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["unidoc"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark unidoc (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_assembly_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["assembly/package"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark assembly (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
# Note that we skip Unidoc build only if Hadoop 2.6 is explicitly set in this SBT build.
# Due to a different dependency resolution in SBT & Unidoc by an unknown reason, the
# documentation build fails on a specific machine & environment in Jenkins but it was unable
# to reproduce. Please see SPARK-20343. This is a band-aid fix that should be removed in
# the future.
is_hadoop_version_2_6 = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE") == "hadoop2.6"
if not is_hadoop_version_2_6:
# Make sure that Java and Scala API documentation can be generated
build_spark_unidoc_sbt(hadoop_version)
def build_apache_spark(build_tool, hadoop_version):
"""Will build Spark against Hive v1.2.1 given the passed in build tool (either `sbt` or
`maven`). Defaults to using `sbt`."""
set_title_and_block("Building Spark", "BLOCK_BUILD")
rm_r("lib_managed")
if build_tool == "maven":
build_spark_maven(hadoop_version)
else:
build_spark_sbt(hadoop_version)
def detect_binary_inop_with_mima(hadoop_version):
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
set_title_and_block("Detecting binary incompatibilities with MiMa", "BLOCK_MIMA")
run_cmd([os.path.join(SPARK_HOME, "dev", "mima")] + build_profiles)
def run_scala_tests_maven(test_profiles):
mvn_test_goals = ["test", "--fail-at-end"]
profiles_and_goals = test_profiles + mvn_test_goals
print("[info] Running Spark tests using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def run_scala_tests_sbt(test_modules, test_profiles):
sbt_test_goals = list(itertools.chain.from_iterable(m.sbt_test_goals for m in test_modules))
if not sbt_test_goals:
return
profiles_and_goals = test_profiles + sbt_test_goals
print("[info] Running Spark tests using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags):
"""Function to properly execute all tests passed in as a set from the
`determine_test_suites` function"""
set_title_and_block("Running Spark unit tests", "BLOCK_SPARK_UNIT_TESTS")
test_modules = set(test_modules)
test_profiles = get_hadoop_profiles(hadoop_version) + \
list(set(itertools.chain.from_iterable(m.build_profile_flags for m in test_modules)))
if excluded_tags:
test_profiles += ['-Dtest.exclude.tags=' + ",".join(excluded_tags)]
if build_tool == "maven":
run_scala_tests_maven(test_profiles)
else:
run_scala_tests_sbt(test_modules, test_profiles)
def run_python_tests(test_modules, parallelism):
set_title_and_block("Running PySpark tests", "BLOCK_PYSPARK_UNIT_TESTS")
command = [os.path.join(SPARK_HOME, "python", "run-tests")]
if test_modules != [modules.root]:
command.append("--modules=%s" % ','.join(m.name for m in test_modules))
command.append("--parallelism=%i" % parallelism)
run_cmd(command)
def run_python_packaging_tests():
set_title_and_block("Running PySpark packaging tests", "BLOCK_PYSPARK_PIP_TESTS")
command = [os.path.join(SPARK_HOME, "dev", "run-pip-tests")]
run_cmd(command)
def run_build_tests():
set_title_and_block("Running build tests", "BLOCK_BUILD_TESTS")
run_cmd([os.path.join(SPARK_HOME, "dev", "test-dependencies.sh")])
pass
def run_sparkr_tests():
set_title_and_block("Running SparkR tests", "BLOCK_SPARKR_UNIT_TESTS")
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "run-tests.sh")])
else:
print("Ignoring SparkR tests as R was not found in PATH")
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
# Ensure the user home directory (HOME) is valid and is an absolute directory
if not USER_HOME or not os.path.isabs(USER_HOME):
print("[error] Cannot determine your home directory as an absolute path;",
" ensure the $HOME environment variable is set properly.")
sys.exit(1)
os.chdir(SPARK_HOME)
rm_r(os.path.join(SPARK_HOME, "work"))
rm_r(os.path.join(USER_HOME, ".ivy2", "local", "org.apache.spark"))
rm_r(os.path.join(USER_HOME, ".ivy2", "cache", "org.apache.spark"))
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES["BLOCK_GENERAL"])
java_exe = determine_java_executable()
if not java_exe:
print("[error] Cannot find a version of `java` on the system; please",
" install one and retry.")
sys.exit(2)
java_version = determine_java_version(java_exe)
# install SparkR
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "install-dev.sh")])
else:
print("Cannot install SparkR as R was not found in PATH")
if os.environ.get("AMPLAB_JENKINS"):
# if we're on the Amplab Jenkins build servers setup variables
# to reflect the environment settings
build_tool = os.environ.get("AMPLAB_JENKINS_BUILD_TOOL", "sbt")
hadoop_version = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE", "hadoop2.6")
test_env = "amplab_jenkins"
# add path for Python3 in Jenkins if we're calling from a Jenkins machine
os.environ["PATH"] = "/home/anaconda/envs/py3k/bin:" + os.environ.get("PATH")
else:
# else we're running locally and can use local settings
build_tool = "sbt"
hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.6")
test_env = "local"
print("[info] Using build tool", build_tool, "with Hadoop profile", hadoop_version,
"under environment", test_env)
changed_modules = None
changed_files = None
if test_env == "amplab_jenkins" and os.environ.get("AMP_JENKINS_PRB"):
target_branch = os.environ["ghprbTargetBranch"]
changed_files = identify_changed_files_from_git_commits("HEAD", target_branch=target_branch)
changed_modules = determine_modules_for_files(changed_files)
excluded_tags = determine_tags_to_exclude(changed_modules)
if not changed_modules:
changed_modules = [modules.root]
excluded_tags = []
print("[info] Found the following changed modules:",
", ".join(x.name for x in changed_modules))
# setup environment variables
# note - the 'root' module doesn't collect environment variables for all modules. Because the
# environment variables should not be set if a module is not changed, even if running the 'root'
# module. So here we should use changed_modules rather than test_modules.
test_environ = {}
for m in changed_modules:
test_environ.update(m.environ)
setup_test_environ(test_environ)
test_modules = determine_modules_to_test(changed_modules)
# license checks
run_apache_rat_checks()
# style checks
if not changed_files or any(f.endswith(".scala")
or f.endswith("scalastyle-config.xml")
for f in changed_files):
run_scala_style_checks()
if not changed_files or any(f.endswith(".java")
or f.endswith("checkstyle.xml")
or f.endswith("checkstyle-suppressions.xml")
for f in changed_files):
# run_java_style_checks()
pass
if not changed_files or any(f.endswith(".py") for f in changed_files):
run_python_style_checks()
if not changed_files or any(f.endswith(".R") for f in changed_files):
run_sparkr_style_checks()
# determine if docs were changed and if we're inside the amplab environment
# note - the below commented out until *all* Jenkins workers can get `jekyll` installed
# if "DOCS" in changed_modules and test_env == "amplab_jenkins":
# build_spark_documentation()
if any(m.should_run_build_tests for m in test_modules):
run_build_tests()
# spark build
build_apache_spark(build_tool, hadoop_version)
# backwards compatibility checks
if build_tool == "sbt":
# Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima(hadoop_version)
# Since we did not build assembly/package before running dev/mima, we need to
# do it here because the tests still rely on it; see SPARK-13294 for details.
build_spark_assembly_sbt(hadoop_version)
# run the test suites
run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags)
modules_with_python_tests = [m for m in test_modules if m.python_test_goals]
if modules_with_python_tests:
run_python_tests(modules_with_python_tests, opts.parallelism)
run_python_packaging_tests()
if any(m.should_run_r_tests for m in test_modules):
run_sparkr_tests()
def _test():
import doctest
failure_count = doctest.testmod()[0]
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
main()
|
|
from collections import defaultdict
from hashlib import sha1
from itertools import chain
from operator import itemgetter
import os
from os import listdir
from funcy import merge, imap, autocurry
from dxr.filters import LINE
from dxr.indexers import (FileToIndex as FileToIndexBase,
TreeToIndex as TreeToIndexBase,
QUALIFIED_LINE_NEEDLE, unsparsify, FuncSig)
from dxr.plugins.clang.condense import condense_file, condense_global
from dxr.plugins.clang.menus import (FunctionRef, VariableRef, TypeRef,
NamespaceRef, NamespaceAliasRef, MacroRef, IncludeRef, TypedefRef)
from dxr.plugins.clang.needles import all_needles
mappings = {
LINE: {
'properties': {
'c_function': QUALIFIED_LINE_NEEDLE,
'c_function_ref': QUALIFIED_LINE_NEEDLE,
'c_function_decl': QUALIFIED_LINE_NEEDLE,
'c_type_ref': QUALIFIED_LINE_NEEDLE,
'c_type_decl': QUALIFIED_LINE_NEEDLE,
'c_type': QUALIFIED_LINE_NEEDLE,
'c_var': QUALIFIED_LINE_NEEDLE,
'c_var_ref': QUALIFIED_LINE_NEEDLE,
'c_var_decl': QUALIFIED_LINE_NEEDLE,
'c_macro': QUALIFIED_LINE_NEEDLE,
'c_macro_ref': QUALIFIED_LINE_NEEDLE,
'c_namespace': QUALIFIED_LINE_NEEDLE,
'c_namespace_ref': QUALIFIED_LINE_NEEDLE,
'c_namespace_alias': QUALIFIED_LINE_NEEDLE,
'c_namespace_alias_ref': QUALIFIED_LINE_NEEDLE,
'c_warning': QUALIFIED_LINE_NEEDLE,
'c_warning_opt': QUALIFIED_LINE_NEEDLE,
'c_call': QUALIFIED_LINE_NEEDLE,
'c_bases': QUALIFIED_LINE_NEEDLE,
'c_derived': QUALIFIED_LINE_NEEDLE,
'c_member': QUALIFIED_LINE_NEEDLE,
'c_overrides': QUALIFIED_LINE_NEEDLE,
# At a base method's site, record all the methods that override
# it. Then we can search for any of those methods and turn up the
# base one:
'c_overridden': QUALIFIED_LINE_NEEDLE
}
}
}
class FileToIndex(FileToIndexBase):
"""C and C++ indexer using clang compiler plugin"""
def __init__(self, path, contents, plugin_name, tree, overrides, overriddens, parents, children, csv_names, temp_folder):
super(FileToIndex, self).__init__(path, contents, plugin_name, tree)
self.overrides = overrides
self.overriddens = overriddens
self.parents = parents
self.children = children
self.condensed = condense_file(temp_folder, path,
overrides, overriddens,
parents, children,
csv_names)
def needles_by_line(self):
return all_needles(
self.condensed,
self.overrides,
self.overriddens,
self.parents,
self.children)
def refs(self):
def getter_or_empty(y):
return lambda x: x.get(y, [])
# Ref subclasses and the thing-getters that provide input to their
# from_condensed() methods:
classes_and_getters = [
(FunctionRef, [getter_or_empty('function'),
kind_getter('decldef', 'function'),
# Refs are not structured much like functions, but
# they have a qualname key, which is all FunctionRef
# requires, so we can just chain kind_getters
# together with other getters.
kind_getter('ref', 'function')]),
(VariableRef, [getter_or_empty('variable'),
kind_getter('ref', 'variable')]),
(TypeRef, [getter_or_empty('type'),
kind_getter('ref', 'type'),
not_kind_getter('decldef', 'function')]),
(TypedefRef, [getter_or_empty('typedef'),
kind_getter('ref', 'typedef')]),
(NamespaceRef, [getter_or_empty('namespace'),
kind_getter('ref', 'namespace')]),
(NamespaceAliasRef, [getter_or_empty('namespace_alias'),
kind_getter('ref', 'namespace_alias')]),
(MacroRef, [getter_or_empty('macro'),
kind_getter('ref', 'macro')]),
(IncludeRef, [getter_or_empty('include')])]
for ref_class, getters in classes_and_getters:
for prop in chain.from_iterable(g(self.condensed) for g in getters):
if 'span' in prop:
start, end = prop['span']
yield (self.char_offset(start.row, start.col),
self.char_offset(end.row, end.col),
ref_class.from_condensed(self.tree, prop))
@unsparsify
def annotations_by_line(self):
icon = "background-image: url('{0}/static/icons/warning.png');".format(
self.tree.config.www_root) # TODO: DRY
getter = itemgetter('msg', 'opt', 'span')
for msg, opt, span in imap(getter, self.condensed.get('warnings', [])):
if opt:
msg = "{0}[{1}]".format(msg, opt)
annotation = {
'title': msg,
'class': "note note-warning",
'style': icon
}
yield annotation, span
def links(self):
"""Yield a section for each class, type, enum, etc., as well as one
for macro definitions.
"""
def get_scopes_to_members():
"""Return a hash of qualified-scope-of-type -> set-of-members."""
ret = defaultdict(list)
for member in chain(self.condensed['function'],
self.condensed['variable']):
try:
scope, _ = member['qualname'].rsplit('::', 1)
except ValueError:
# There was no ::, so this wasn't a member of anything.
pass
else:
ret[scope].append(member)
return ret
scopes_to_members = get_scopes_to_members()
# Spin around the types (enums, classes, unions, etc.):
for type in self.condensed['type']:
if type['name']:
# First, link to the type definition itself:
links = [(type['kind'],
type['name'],
'#%s' % type['span'].start.row)]
# Look up the stuff with that scope in the hash, and spit out
# names and line numbers, sorting by line number.
members = list(scopes_to_members[type['qualname']])
members.sort(key=lambda m: m['span'].start.row)
links.extend(('method' if isinstance(m['type'], FuncSig)
else 'field', # icon
m['name'],
'#%s' % m['span'].start.row)
for m in members if m['name'])
yield 30, type['name'], links
# Add all macros to the macro section:
links = [('macro', t['name'], '#%s' % t['span'].start.row)
for t in self.condensed['macro']]
if links:
yield 100, 'Macros', links
@autocurry
def kind_getter(field, kind, condensed):
"""Reach into a field and filter based on the kind."""
return (ref for ref in condensed.get(field, []) if ref.get('kind') == kind)
@autocurry
def not_kind_getter(field, kind, condensed):
"""Reach into a field and filter out those with given kind."""
return (ref for ref in condensed.get(field, []) if ref.get('kind') != kind)
class TreeToIndex(TreeToIndexBase):
def pre_build(self):
self._temp_folder = os.path.join(self.tree.temp_folder,
'plugins',
self.plugin_name)
def environment(self, vars_):
"""Set up environment variables to trigger analysis dumps from clang.
We'll store all the havested metadata in the plugins temporary folder.
"""
tree = self.tree
plugin_folder = os.path.dirname(__file__)
flags = [
'-load', os.path.join(plugin_folder, 'libclang-index-plugin.so'),
'-add-plugin', 'dxr-index',
'-plugin-arg-dxr-index', tree.source_folder
]
flags_str = " ".join(imap('-Xclang {}'.format, flags))
env = {
'CC': "clang %s" % flags_str,
'CXX': "clang++ %s" % flags_str,
'DXR_CLANG_FLAGS': flags_str,
'DXR_CXX_CLANG_OBJECT_FOLDER': tree.object_folder,
'DXR_CXX_CLANG_TEMP_FOLDER': self._temp_folder,
}
env['DXR_CC'] = env['CC']
env['DXR_CXX'] = env['CXX']
return merge(vars_, env)
def post_build(self):
def csv_map():
"""Map input files to the output CSVs corresponding to them.
Return {path sha1: [file names (minus '.csv' extension)]}.
This saves a lot of globbing later, which can add up to hours over
the course of tens of thousands of files, depending on IO speed. An
alternative approach might be a radix tree of folders: less RAM,
more IO. Try that and bench it sometime.
"""
ret = defaultdict(list)
for csv_name in listdir(self._temp_folder):
if csv_name.endswith('.csv'):
path_hash, content_hash, ext = csv_name.split('.')
# Removing ".csv" saves at least 2MB per worker on 700K files:
ret[path_hash].append(csv_name[:-4])
return ret
self._csv_map = csv_map()
self._overrides, self._overriddens, self._parents, self._children = condense_global(self._temp_folder,
chain.from_iterable(self._csv_map.itervalues()))
def file_to_index(self, path, contents):
return FileToIndex(path,
contents,
self.plugin_name,
self.tree,
self._overrides,
self._overriddens,
self._parents,
self._children,
self._csv_map[sha1(path.encode('utf-8')).hexdigest()],
self._temp_folder)
|
|
import os
import json
import copy
import logging
import pprint
import stat
import tempfile
import glob
import urlparse
import pprint
from collections import Iterable
import errno
import shutil
import uuid
import abc
import schema_salad.validate as validate
import schema_salad.schema
from schema_salad.ref_resolver import Loader
import avro.schema
from typing import (Any, AnyStr, Callable, cast, Dict, List, Generator, IO, Text,
Tuple, Union)
from rdflib import URIRef
from rdflib.namespace import RDFS, OWL
from rdflib import Graph
from pkg_resources import resource_stream
from .utils import aslist, get_feature
from .stdfsaccess import StdFsAccess
from .builder import Builder, adjustFileObjs, adjustDirObjs
from .errors import WorkflowException, UnsupportedRequirement
from .pathmapper import PathMapper, abspath, normalizeFilesDirs
_logger = logging.getLogger("cwltool")
supportedProcessRequirements = ["DockerRequirement",
"SchemaDefRequirement",
"EnvVarRequirement",
"ScatterFeatureRequirement",
"SubworkflowFeatureRequirement",
"MultipleInputFeatureRequirement",
"InlineJavascriptRequirement",
"ShellCommandRequirement",
"StepInputExpressionRequirement",
"ResourceRequirement",
"InitialWorkDirRequirement"]
cwl_files = (
"Workflow.yml",
"CommandLineTool.yml",
"CommonWorkflowLanguage.yml",
"Process.yml",
"concepts.md",
"contrib.md",
"intro.md",
"invocation.md")
salad_files = ('metaschema.yml',
'metaschema_base.yml',
'salad.md',
'field_name.yml',
'import_include.md',
'link_res.yml',
'ident_res.yml',
'vocab_res.yml',
'vocab_res.yml',
'field_name_schema.yml',
'field_name_src.yml',
'field_name_proc.yml',
'ident_res_schema.yml',
'ident_res_src.yml',
'ident_res_proc.yml',
'link_res_schema.yml',
'link_res_src.yml',
'link_res_proc.yml',
'vocab_res_schema.yml',
'vocab_res_src.yml',
'vocab_res_proc.yml')
SCHEMA_CACHE = {} # type: Dict[Text, Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text, Any], Loader]]
SCHEMA_FILE = None # type: Dict[Text, Any]
SCHEMA_DIR = None # type: Dict[Text, Any]
SCHEMA_ANY = None # type: Dict[Text, Any]
def get_schema(version):
# type: (Text) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text,Any], Loader]
if version in SCHEMA_CACHE:
return SCHEMA_CACHE[version]
cache = {}
version = version.split("#")[-1]
if '.dev' in version:
version = ".".join(version.split(".")[:-1])
for f in cwl_files:
try:
res = resource_stream(__name__, 'schemas/%s/%s' % (version, f))
cache["https://w3id.org/cwl/" + f] = res.read()
res.close()
except IOError:
pass
for f in salad_files:
try:
res = resource_stream(
__name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
% (version, f))
cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
+ f] = res.read()
res.close()
except IOError:
pass
SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
"https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
return SCHEMA_CACHE[version]
def shortname(inputid):
# type: (Text) -> Text
d = urlparse.urlparse(inputid)
if d.fragment:
return d.fragment.split(u"/")[-1]
else:
return d.path.split(u"/")[-1]
def checkRequirements(rec, supportedProcessRequirements):
# type: (Any, Iterable[Any]) -> None
if isinstance(rec, dict):
if "requirements" in rec:
for r in rec["requirements"]:
if r["class"] not in supportedProcessRequirements:
raise UnsupportedRequirement(u"Unsupported requirement %s" % r["class"])
for d in rec:
checkRequirements(rec[d], supportedProcessRequirements)
if isinstance(rec, list):
for d in rec:
checkRequirements(d, supportedProcessRequirements)
def adjustFilesWithSecondary(rec, op, primary=None):
"""Apply a mapping function to each File path in the object `rec`, propagating
the primary file associated with a group of secondary files.
"""
if isinstance(rec, dict):
if rec.get("class") == "File":
rec["path"] = op(rec["path"], primary=primary)
adjustFilesWithSecondary(rec.get("secondaryFiles", []), op,
primary if primary else rec["path"])
else:
for d in rec:
adjustFilesWithSecondary(rec[d], op)
if isinstance(rec, list):
for d in rec:
adjustFilesWithSecondary(d, op, primary)
def getListing(fs_access, rec):
# type: (StdFsAccess, Dict[Text, Any]) -> None
if "listing" not in rec:
listing = []
loc = rec["location"]
for ld in fs_access.listdir(loc):
if fs_access.isdir(ld):
ent = {u"class": u"Directory",
u"location": ld}
getListing(fs_access, ent)
listing.append(ent)
else:
listing.append({"class": "File", "location": ld})
rec["listing"] = listing
def stageFiles(pm, stageFunc, ignoreWritable=False):
# type: (PathMapper, Callable[..., Any], bool) -> None
for f, p in pm.items():
if not os.path.exists(os.path.dirname(p.target)):
os.makedirs(os.path.dirname(p.target), 0755)
if p.type == "File":
stageFunc(p.resolved, p.target)
elif p.type == "WritableFile" and not ignoreWritable:
shutil.copy(p.resolved, p.target)
elif p.type == "CreateFile" and not ignoreWritable:
with open(p.target, "w") as n:
n.write(p.resolved.encode("utf-8"))
def collectFilesAndDirs(obj, out):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], List[Dict[Text, Any]]) -> None
if isinstance(obj, dict):
if obj.get("class") in ("File", "Directory"):
out.append(obj)
else:
for v in obj.values():
collectFilesAndDirs(v, out)
if isinstance(obj, list):
for l in obj:
collectFilesAndDirs(l, out)
def relocateOutputs(outputObj, outdir, output_dirs, action):
# type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Set[Text], Text) -> Union[Dict[Text, Any], List[Dict[Text, Any]]]
if action not in ("move", "copy"):
return outputObj
def moveIt(src, dst):
for a in output_dirs:
if src.startswith(a):
if action == "move":
_logger.debug("Moving %s to %s", src, dst)
shutil.move(src, dst)
elif action == "copy":
_logger.debug("Copying %s to %s", src, dst)
shutil.copy(src, dst)
outfiles = [] # type: List[Dict[Text, Any]]
collectFilesAndDirs(outputObj, outfiles)
pm = PathMapper(outfiles, "", outdir, separateDirs=False)
stageFiles(pm, moveIt)
def _check_adjust(f):
f["location"] = "file://" + pm.mapper(f["location"])[1]
return f
adjustFileObjs(outputObj, _check_adjust)
adjustDirObjs(outputObj, _check_adjust)
return outputObj
def cleanIntermediate(output_dirs): # type: (Set[Text]) -> None
for a in output_dirs:
if os.path.exists(a) and empty_subtree(a):
_logger.debug(u"Removing intermediate output directory %s", a)
shutil.rmtree(a, True)
def formatSubclassOf(fmt, cls, ontology, visited):
# type: (Text, Text, Graph, Set[Text]) -> bool
"""Determine if `fmt` is a subclass of `cls`."""
if URIRef(fmt) == URIRef(cls):
return True
if ontology is None:
return False
if fmt in visited:
return False
visited.add(fmt)
uriRefFmt = URIRef(fmt)
for s,p,o in ontology.triples( (uriRefFmt, RDFS.subClassOf, None) ):
# Find parent classes of `fmt` and search upward
if formatSubclassOf(o, cls, ontology, visited):
return True
for s,p,o in ontology.triples( (uriRefFmt, OWL.equivalentClass, None) ):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(o, cls, ontology, visited):
return True
for s,p,o in ontology.triples( (None, OWL.equivalentClass, uriRefFmt) ):
# Find equivalent classes of `fmt` and search horizontally
if formatSubclassOf(s, cls, ontology, visited):
return True
return False
def checkFormat(actualFile, inputFormats, ontology):
# type: (Union[Dict[Text, Any], List, Text], Union[List[Text], Text], Graph) -> None
for af in aslist(actualFile):
if "format" not in af:
raise validate.ValidationException(u"Missing required 'format' for File %s" % af)
for inpf in aslist(inputFormats):
if af["format"] == inpf or formatSubclassOf(af["format"], inpf, ontology, set()):
return
raise validate.ValidationException(u"Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
def fillInDefaults(inputs, job):
# type: (List[Dict[Text, Text]], Dict[Text, Union[Dict[Text, Any], List, Text]]) -> None
for inp in inputs:
if shortname(inp[u"id"]) in job:
pass
elif shortname(inp[u"id"]) not in job and u"default" in inp:
job[shortname(inp[u"id"])] = copy.copy(inp[u"default"])
elif shortname(inp[u"id"]) not in job and inp[u"type"][0] == u"null":
pass
else:
raise validate.ValidationException("Missing input parameter `%s`" % shortname(inp["id"]))
def avroize_type(field_type, name_prefix=""):
# type: (Union[List[Dict[Text, Any]], Dict[Text, Any]], Text) -> Any
"""
adds missing information to a type so that CWL types are valid in schema_salad.
"""
if isinstance(field_type, list):
for f in field_type:
avroize_type(f, name_prefix)
elif isinstance(field_type, dict):
if field_type["type"] in ("enum", "record"):
if "name" not in field_type:
field_type["name"] = name_prefix+Text(uuid.uuid4())
if field_type["type"] == "record":
avroize_type(field_type["fields"], name_prefix)
if field_type["type"] == "array":
avroize_type(field_type["items"], name_prefix)
return field_type
class Process(object):
__metaclass__ = abc.ABCMeta
def __init__(self, toolpath_object, **kwargs):
# type: (Dict[Text, Any], **Any) -> None
"""
kwargs:
metadata: tool document metadata
requirements: inherited requirements
hints: inherited hints
loader: schema_salad.ref_resolver.Loader used to load tool document
avsc_names: CWL Avro schema object used to validate document
strict: flag to determine strict validation (fail on unrecognized fields)
"""
self.metadata = kwargs.get("metadata", {}) # type: Dict[Text,Any]
self.names = None # type: avro.schema.Names
global SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY # pylint: disable=global-statement
if SCHEMA_FILE is None:
get_schema("v1.0")
SCHEMA_ANY = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/salad#Any"])
SCHEMA_FILE = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#File"])
SCHEMA_DIR = cast(Dict[Text, Any],
SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#Directory"])
names = schema_salad.schema.make_avro_schema([SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY],
schema_salad.ref_resolver.Loader({}))[0]
if isinstance(names, avro.schema.SchemaParseException):
raise names
else:
self.names = names
self.tool = toolpath_object
self.requirements = kwargs.get("requirements", []) + self.tool.get("requirements", [])
self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
self.formatgraph = None # type: Graph
if "loader" in kwargs:
self.formatgraph = kwargs["loader"].graph
self.doc_loader = kwargs["loader"]
self.doc_schema = kwargs["avsc_names"]
checkRequirements(self.tool, supportedProcessRequirements)
self.validate_hints(kwargs["avsc_names"], self.tool.get("hints", []),
strict=kwargs.get("strict"))
self.schemaDefs = {} # type: Dict[Text,Dict[Text, Any]]
sd, _ = self.get_requirement("SchemaDefRequirement")
if sd:
sdtypes = sd["types"]
av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in avroize_type(sdtypes)}, set())
for i in av:
self.schemaDefs[i["name"]] = i
avro.schema.make_avsc_object(av, self.names)
# Build record schema from inputs
self.inputs_record_schema = {
"name": "input_record_schema", "type": "record",
"fields": []} # type: Dict[Text, Any]
self.outputs_record_schema = {
"name": "outputs_record_schema", "type": "record",
"fields": []} # type: Dict[Text, Any]
for key in ("inputs", "outputs"):
for i in self.tool[key]:
c = copy.copy(i)
c["name"] = shortname(c["id"])
del c["id"]
if "type" not in c:
raise validate.ValidationException(u"Missing `type` in parameter `%s`" % c["name"])
if "default" in c and "null" not in aslist(c["type"]):
c["type"] = ["null"] + aslist(c["type"])
else:
c["type"] = c["type"]
c["type"] = avroize_type(c["type"], c["name"])
if key == "inputs":
self.inputs_record_schema["fields"].append(c)
elif key == "outputs":
self.outputs_record_schema["fields"].append(c)
try:
self.inputs_record_schema = schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set())
avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException(u"Got error `%s` while processing inputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.inputs_record_schema, indent=4)))
try:
self.outputs_record_schema = schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set())
avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
except avro.schema.SchemaParseException as e:
raise validate.ValidationException(u"Got error `%s` while processing outputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.outputs_record_schema, indent=4)))
def _init_job(self, joborder, **kwargs):
# type: (Dict[Text, Text], **Any) -> Builder
"""
kwargs:
eval_timeout: javascript evaluation timeout
use_container: do/don't use Docker when DockerRequirement hint provided
make_fs_access: make an FsAccess() object with given basedir
basedir: basedir for FsAccess
docker_outdir: output directory inside docker for this job
docker_tmpdir: tmpdir inside docker for this job
docker_stagedir: stagedir inside docker for this job
outdir: outdir on host for this job
tmpdir: tmpdir on host for this job
stagedir: stagedir on host for this job
select_resources: callback to select compute resources
"""
builder = Builder()
builder.job = cast(Dict[Text, Union[Dict[Text, Any], List,
Text]], copy.deepcopy(joborder))
fillInDefaults(self.tool[u"inputs"], builder.job)
normalizeFilesDirs(builder.job)
# Validate job order
try:
validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job)
except validate.ValidationException as e:
raise WorkflowException("Error validating input record, " + Text(e))
builder.files = []
builder.bindings = []
builder.schemaDefs = self.schemaDefs
builder.names = self.names
builder.requirements = self.requirements
builder.hints = self.hints
builder.resources = {}
builder.timeout = kwargs.get("eval_timeout")
dockerReq, is_req = self.get_requirement("DockerRequirement")
if dockerReq and is_req and not kwargs.get("use_container"):
raise WorkflowException("Document has DockerRequirement under 'requirements' but use_container is false. DockerRequirement must be under 'hints' or use_container must be true.")
builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
builder.fs_access = builder.make_fs_access(kwargs["basedir"])
if dockerReq and kwargs.get("use_container"):
builder.outdir = builder.fs_access.realpath(kwargs.get("docker_outdir") or "/var/spool/cwl")
builder.tmpdir = builder.fs_access.realpath(kwargs.get("docker_tmpdir") or "/tmp")
builder.stagedir = builder.fs_access.realpath(kwargs.get("docker_stagedir") or "/var/lib/cwl")
else:
builder.outdir = builder.fs_access.realpath(kwargs.get("outdir") or tempfile.mkdtemp())
builder.tmpdir = builder.fs_access.realpath(kwargs.get("tmpdir") or tempfile.mkdtemp())
builder.stagedir = builder.fs_access.realpath(kwargs.get("stagedir") or tempfile.mkdtemp())
if self.formatgraph:
for i in self.tool["inputs"]:
d = shortname(i["id"])
if d in builder.job and i.get("format"):
checkFormat(builder.job[d], builder.do_eval(i["format"]), self.formatgraph)
builder.bindings.extend(builder.bind_input(self.inputs_record_schema, builder.job))
if self.tool.get("baseCommand"):
for n, b in enumerate(aslist(self.tool["baseCommand"])):
builder.bindings.append({
"position": [-1000000, n],
"datum": b
})
if self.tool.get("arguments"):
for i, a in enumerate(self.tool["arguments"]):
if isinstance(a, dict):
a = copy.copy(a)
if a.get("position"):
a["position"] = [a["position"], i]
else:
a["position"] = [0, i]
builder.bindings.append(a)
elif ("$(" in a) or ("${" in a):
builder.bindings.append({
"position": [0, i],
"valueFrom": a
})
else:
builder.bindings.append({
"position": [0, i],
"datum": a
})
builder.bindings.sort(key=lambda a: a["position"])
builder.resources = self.evalResources(builder, kwargs)
return builder
def evalResources(self, builder, kwargs):
# type: (Builder, Dict[AnyStr, Any]) -> Dict[Text, Union[int, Text]]
resourceReq, _ = self.get_requirement("ResourceRequirement")
if resourceReq is None:
resourceReq = {}
request = {
"coresMin": 1,
"coresMax": 1,
"ramMin": 1024,
"ramMax": 1024,
"tmpdirMin": 1024,
"tmpdirMax": 1024,
"outdirMin": 1024,
"outdirMax": 1024
}
for a in ("cores", "ram", "tmpdir", "outdir"):
mn = None
mx = None
if resourceReq.get(a+"Min"):
mn = builder.do_eval(resourceReq[a+"Min"])
if resourceReq.get(a+"Max"):
mx = builder.do_eval(resourceReq[a+"Max"])
if mn is None:
mn = mx
elif mx is None:
mx = mn
if mn:
request[a+"Min"] = mn
request[a+"Max"] = mx
if kwargs.get("select_resources"):
return kwargs["select_resources"](request)
else:
return {
"cores": request["coresMin"],
"ram": request["ramMin"],
"tmpdirSize": request["tmpdirMin"],
"outdirSize": request["outdirMin"],
}
def validate_hints(self, avsc_names, hints, strict):
# type: (Any, List[Dict[Text, Any]], bool) -> None
for r in hints:
try:
if avsc_names.get_name(r["class"], "") is not None:
validate.validate_ex(avsc_names.get_name(r["class"], ""),
r, strict=strict)
else:
_logger.info(Text(validate.ValidationException(
u"Unknown hint %s" % (r["class"]))))
except validate.ValidationException as v:
raise validate.ValidationException(u"Validating hint `%s`: %s" % (r["class"], Text(v)))
def get_requirement(self, feature): # type: (Any) -> Tuple[Any, bool]
return get_feature(self, feature)
def visit(self, op): # type: (Callable[[Dict[Text, Any]], None]) -> None
op(self.tool)
@abc.abstractmethod
def job(self, job_order, output_callbacks, **kwargs):
# type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator[Any, None, None]
return None
def empty_subtree(dirpath): # type: (Text) -> bool
# Test if a directory tree contains any files (does not count empty
# subdirectories)
for d in os.listdir(dirpath):
d = os.path.join(dirpath, d)
try:
if stat.S_ISDIR(os.stat(d).st_mode):
if empty_subtree(d) is False:
return False
else:
return False
except OSError as e:
if e.errno == errno.ENOENT:
pass
else:
raise
return True
_names = set() # type: Set[Text]
def uniquename(stem): # type: (Text) -> Text
c = 1
u = stem
while u in _names:
c += 1
u = u"%s_%s" % (stem, c)
_names.add(u)
return u
def nestdir(base, deps):
# type: (Text, Dict[Text, Any]) -> Dict[Text, Any]
dirname = os.path.dirname(base) + "/"
subid = deps["location"]
if subid.startswith(dirname):
s2 = subid[len(dirname):]
sp = s2.split('/')
sp.pop()
while sp:
nx = sp.pop()
deps = {
"class": "Directory",
"basename": nx,
"listing": [deps]
}
return deps
def mergedirs(listing):
# type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
r = [] # type: List[Dict[Text, Any]]
ents = {} # type: Dict[Text, Any]
for e in listing:
if e["basename"] not in ents:
ents[e["basename"]] = e
elif e["class"] == "Directory":
ents[e["basename"]]["listing"].extend(e["listing"])
for e in ents.itervalues():
if e["class"] == "Directory" and "listing" in e:
e["listing"] = mergedirs(e["listing"])
r.extend(ents.itervalues())
return r
def scandeps(base, doc, reffields, urlfields, loadref):
# type: (Text, Any, Set[Text], Set[Text], Callable[[Text, Text], Any]) -> List[Dict[Text, Text]]
r = [] # type: List[Dict[Text, Text]]
deps = None # type: Dict[Text, Any]
if isinstance(doc, dict):
if "id" in doc:
if doc["id"].startswith("file://"):
df, _ = urlparse.urldefrag(doc["id"])
if base != df:
r.append({
"class": "File",
"location": df
})
base = df
if doc.get("class") in ("File", "Directory") and "location" in urlfields:
u = doc.get("location", doc.get("path"))
if u and not u.startswith("_:"):
deps = {
"class": doc["class"],
"location": urlparse.urljoin(base, u)
}
if doc["class"] == "Directory" and "listing" in doc:
deps["listing"] = doc["listing"]
if doc["class"] == "File" and "secondaryFiles" in doc:
deps["secondaryFiles"] = doc["secondaryFiles"]
deps = nestdir(base, deps)
r.append(deps)
else:
if doc["class"] == "Directory" and "listing" in doc:
r.extend(scandeps(base, doc["listing"], reffields, urlfields, loadref))
elif doc["class"] == "File" and "secondaryFiles" in doc:
r.extend(scandeps(base, doc["secondaryFiles"], reffields, urlfields, loadref))
for k, v in doc.iteritems():
if k in reffields:
for u in aslist(v):
if isinstance(u, dict):
r.extend(scandeps(base, u, reffields, urlfields, loadref))
else:
sub = loadref(base, u)
subid = urlparse.urljoin(base, u)
deps = {
"class": "File",
"location": subid
}
sf = scandeps(subid, sub, reffields, urlfields, loadref)
if sf:
deps["secondaryFiles"] = sf
deps = nestdir(base, deps)
r.append(deps)
elif k in urlfields and k != "location":
for u in aslist(v):
deps = {
"class": "File",
"location": urlparse.urljoin(base, u)
}
deps = nestdir(base, deps)
r.append(deps)
elif k not in ("listing", "secondaryFiles"):
r.extend(scandeps(base, v, reffields, urlfields, loadref))
elif isinstance(doc, list):
for d in doc:
r.extend(scandeps(base, d, reffields, urlfields, loadref))
if r:
normalizeFilesDirs(r)
r = mergedirs(r)
return r
|
|
import os
from unittest import mock
from tornado.testing import AsyncTestCase, ExpectLog, gen_test
from remoteappmanager.docker.container import Container
from remoteappmanager.docker.docker_labels import SIMPHONY_NS_RUNINFO
from remoteappmanager.docker.container_manager import ContainerManager, \
OperationInProgress
from remoteappmanager.docker.image import Image
from remoteappmanager.tests import utils
from remoteappmanager.tests.mocking.dummy import create_container_manager
from remoteappmanager.tests.mocking.virtual.docker_client import (
VirtualDockerClient)
class TestContainerManager(AsyncTestCase):
def setUp(self):
super().setUp()
self.manager = create_container_manager()
self.mock_docker_client = self.manager._docker_client._sync_client
def test_instantiation(self):
self.assertIsNotNone(self.manager._docker_client)
@gen_test
def test_start_stop(self):
mock_client = self.mock_docker_client
with mock.patch.object(mock_client, "start",
wraps=mock_client.start), \
mock.patch.object(mock_client, "stop",
wraps=mock_client.stop), \
mock.patch.object(mock_client, "create_container",
wraps=mock_client.create_container), \
mock.patch.object(mock_client, "remove_container",
wraps=mock_client.remove_container):
result = yield self.manager.start_container(
"johndoe",
'simphonyproject/simphony-mayavi:0.6.0',
"63dce9335bca49798bbb93146ad07c66",
"/user/johndoe/containers/cbeb652678244ed1aa5f68735abb4868",
None,
None)
self.assertTrue(mock_client.start.called)
self.assertTrue(mock_client.create_container.called)
runinfo_labels = mock_client.create_container.call_args[1][
"labels"]
self.assertEqual(runinfo_labels[SIMPHONY_NS_RUNINFO.user],
"johndoe")
self.assertEqual(runinfo_labels[SIMPHONY_NS_RUNINFO.realm],
"myrealm")
self.assertIn(SIMPHONY_NS_RUNINFO.url_id, runinfo_labels)
self.assertEqual(runinfo_labels[SIMPHONY_NS_RUNINFO.mapping_id],
"63dce9335bca49798bbb93146ad07c66")
self.assertIsInstance(result, Container)
self.assertFalse(mock_client.stop.called)
self.assertFalse(mock_client.remove_container.called)
yield self.manager.stop_and_remove_container(result.docker_id)
self.assertTrue(mock_client.stop.called)
self.assertTrue(mock_client.remove_container.called)
@gen_test
def test_find_from_mapping_id(self):
""" Test containers_for_mapping_id returns a list of Container """
result = yield self.manager.find_containers(
user_name="johndoe",
mapping_id="5b34ce60d95742fa828cdced12b4c342")
expected = Container(docker_id='d2b56bffb5655cb7668b685b80116041a20ee8662ebfa5b5cb68cfc423d9dc30', # noqa
mapping_id="5b34ce60d95742fa828cdced12b4c342",
name="/myrealm-johndoe-5b34ce60d95742fa828cdced12b4c342-ascvbefsda", # noqa
image_name='simphonyproject/simphony-mayavi:0.6.0', # noqa
user="johndoe",
image_id="sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", # noqa
ip='127.0.0.1',
port=666,
url_id='20dcb84cdbea4b1899447246789093d0',
realm="myrealm",
urlpath="/user/johndoe/containers/20dcb84cdbea4b1899447246789093d0" # noqa
)
self.assertEqual(len(result), 1)
utils.assert_containers_equal(self, result[0], expected)
@gen_test
def test_find_from_url_id(self):
""" Test containers_for_mapping_id returns a list of Container """
result = yield self.manager.find_container(
url_id="20dcb84cdbea4b1899447246789093d0")
expected = Container(docker_id='d2b56bffb5655cb7668b685b80116041a20ee8662ebfa5b5cb68cfc423d9dc30', # noqa
mapping_id="5b34ce60d95742fa828cdced12b4c342",
name="/myrealm-johndoe-5b34ce60d95742fa828cdced12b4c342-ascvbefsda", # noqa
image_name='simphonyproject/simphony-mayavi:0.6.0', # noqa
user="johndoe",
image_id="sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", # noqa
ip='127.0.0.1',
port=666,
url_id='20dcb84cdbea4b1899447246789093d0',
realm="myrealm",
urlpath="/user/johndoe/containers/20dcb84cdbea4b1899447246789093d0" # noqa
)
utils.assert_containers_equal(self, result, expected)
@gen_test
def test_find_from_url_id_exceptions(self):
""" Test containers_for_mapping_id returns a list of Container """
docker_client = self.mock_docker_client
docker_client.port = mock.Mock(side_effect=Exception("Boom!"))
result = yield self.manager.find_container(url_id="url_id")
self.assertEqual(result, None)
# Making it so that no valid dictionary is returned.
docker_client.port = mock.Mock(return_value=1234)
self.mock_docker_client = docker_client
self.manager._docker_client._sync_client = docker_client
result = yield self.manager.find_container(url_id="url_id")
self.assertEqual(result, None)
@gen_test
def test_find_containers(self):
result = yield self.manager.find_containers()
self.assertEqual(len(result), 1)
@gen_test
def test_race_condition_spawning(self):
# Start the operations, and retrieve the future.
# they will stop at the first yield and not go further until
# we yield them
with mock.patch.object(self.mock_docker_client, "start",
wraps=self.mock_docker_client.start):
f1 = self.manager.start_container("johndoe",
"simphonyproject/simphony-mayavi:0.6.0", # noqa
"76cd29a4d61f4ddc95fa633347934807", # noqa
"/user/johndoe/containers/4b9a34d803c74013939d8df05eef9262", # noqa
None,
)
f2 = self.manager.start_container("johndoe",
"simphonyproject/simphony-mayavi:0.6.0", # noqa
"76cd29a4d61f4ddc95fa633347934807", # noqa
"/user/johndoe/containers/a2612a5a12074ce99ece1c2888fe34c0", # noqa
None,
)
# If these yielding raise a KeyError, it is because the second
# one tries to remove the same key from the list, but it has been
# already removed by the first one. Race condition.
yield f1
with self.assertRaises(OperationInProgress):
yield f2
self.assertEqual(self.mock_docker_client.start.call_count, 1)
@gen_test
def test_race_condition_stopping(self):
docker_client = self.mock_docker_client
with mock.patch.object(docker_client, "stop",
wraps=docker_client.stop):
f1 = self.manager.stop_and_remove_container("d2b56bffb5655cb7668b685b80116041a20ee8662ebfa5b5cb68cfc423d9dc30") # noqa
f2 = self.manager.stop_and_remove_container("d2b56bffb5655cb7668b685b80116041a20ee8662ebfa5b5cb68cfc423d9dc30") # noqa
yield f1
with self.assertRaises(OperationInProgress):
yield f2
self.assertEqual(self.mock_docker_client.stop.call_count, 1)
@gen_test
def test_start_already_present_container(self):
mock_client = self.mock_docker_client
with mock.patch.object(mock_client, "start",
wraps=mock_client.start), \
mock.patch.object(mock_client, "stop",
wraps=mock_client.stop), \
mock.patch.object(mock_client, "remove_container",
wraps=mock_client.remove_container):
result = yield self.manager.start_container(
"johndoe",
"simphonyproject/simphony-mayavi:0.6.0",
"5b34ce60d95742fa828cdced12b4c342",
"/users/johndoe/containers/random_value",
None,
None)
self.assertTrue(mock_client.start.called)
self.assertIsInstance(result, Container)
# Stop should have been called and the container removed
self.assertTrue(mock_client.stop.called)
self.assertTrue(mock_client.remove_container.called)
@gen_test
def test_image(self):
image = yield self.manager.image('simphonyproject/simphony-mayavi:0.6.0') # noqa
self.assertIsInstance(image, Image)
self.assertEqual(image.description,
'Ubuntu machine with mayavi preinstalled')
self.assertEqual(image.icon_128, "")
self.assertEqual(image.ui_name, "Mayavi 4.4.4")
self.assertEqual(image.docker_id, "sha256:2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824") # noqa
image = yield self.manager.image("whatev")
self.assertIsNone(image)
@gen_test
def test_start_container_with_nonexisting_volume_source(self):
# These volume sources are invalid
docker_client = self.manager._docker_client._sync_client
with mock.patch.object(docker_client, "create_container",
wraps=docker_client.create_container):
volumes = {'~no_way_this_be_valid': {'bind': '/target_vol1',
'mode': 'ro'},
'/no_way_this_be_valid': {'bind': '/target_vol2',
'mode': 'ro'}}
# This volume source is valid
good_path = os.path.abspath('.')
volumes[good_path] = {'bind': '/target_vol3',
'mode': 'ro'}
with ExpectLog('tornado.application', ''):
yield self.manager.start_container(
"johndoe",
"simphonyproject/simphony-mayavi:0.6.0",
"5b34ce60d95742fa828cdced12b4c342",
"/foo/bar",
volumes,
)
# Call args and keyword args that create_container receives
args = docker_client.create_container.call_args
actual_volume_targets = args[1]['volumes']
# Invalid volume paths should have been filtered away
self.assertNotIn('/target_vol1', actual_volume_targets)
self.assertNotIn('/target_vol2', actual_volume_targets)
# The current directory is valid, should stay
self.assertIn('/target_vol3', actual_volume_targets)
@gen_test
def test_start_container_exception_cleanup(self):
docker_client = self.mock_docker_client
def raiser(*args, **kwargs):
raise Exception("Boom!")
with mock.patch.object(docker_client, "stop",
wraps=docker_client.stop), \
mock.patch.object(docker_client, "remove_container",
wraps=docker_client.remove_container), \
mock.patch.object(docker_client, "start",
side_effect=raiser):
self.assertFalse(self.mock_docker_client.stop.called)
self.assertFalse(self.mock_docker_client.remove_container.called)
with self.assertRaisesRegex(Exception, 'Boom!'):
with ExpectLog('tornado.application', ''):
yield self.manager.start_container(
"johndoe",
"simphonyproject/simphony-mayavi:0.6.0",
"5b34ce60d95742fa828cdced12b4c342",
"/users/johndoe/containers/4273750ddce3454283a5b1817526260b/", # noqa
None,
None
)
self.assertTrue(docker_client.stop.called)
self.assertTrue(docker_client.remove_container.called)
@gen_test
def test_start_container_exception_cleanup_2(self):
# Same test as above, but checks after the start (at ip and port)
docker_client = self.mock_docker_client
def raiser(*args, **kwargs):
raise Exception("Boom!")
with mock.patch.object(docker_client, "stop",
wraps=docker_client.stop), \
mock.patch.object(docker_client, "remove_container",
wraps=docker_client.remove_container), \
mock.patch.object(docker_client, "port",
side_effect=raiser):
self.manager._docker_client.port = mock.Mock(side_effect=raiser)
with self.assertRaisesRegex(Exception, 'Boom!'):
with ExpectLog('tornado.application', ''):
yield self.manager.start_container(
"johndoe",
"simphonyproject/simphony-mayavi:0.6.0",
"4273750ddce3454283a5b1817526260b",
"/users/johndoe/containers/3b83f81f2e4544e6aa1493b50202f8eb", # noqa
None,
None
)
self.assertTrue(self.mock_docker_client.stop.called)
self.assertTrue(self.mock_docker_client.remove_container.called)
@gen_test
def test_start_container_with_environment(self):
mock_client = self.mock_docker_client
with mock.patch.object(mock_client, "create_container",
wraps=mock_client.create_container):
environment = {
"FOO": "bar"
}
yield self.manager.start_container(
"johndoe",
"simphonyproject/simphony-mayavi:0.6.0",
"4273750ddce3454283a5b1817526260b",
"/users/johndoe/containers/3b83f81f2e4544e6aa1493b50202f8eb",
None,
environment)
self.assertEqual(
mock_client.create_container.call_args[1][
"environment"]["FOO"],
"bar")
@gen_test
def test_different_realm(self):
manager = ContainerManager(docker_config={},
realm="anotherrealm")
manager._docker_client._sync_client = \
VirtualDockerClient.with_containers()
result = yield manager.find_container(
user_name="johndoe", mapping_id="5b34ce60d95742fa828cdced12b4c342")
self.assertIsNone(result)
result = yield manager.find_containers()
self.assertEqual(result, [])
@gen_test
def test_not_stopping_if_different_realm(self):
self.mock_docker_client.stop = mock.Mock()
self.mock_docker_client.remove_container = mock.Mock()
manager = ContainerManager(docker_config={},
realm="anotherrealm")
with ExpectLog('tornado.application', ''):
yield manager.stop_and_remove_container(
"d2b56bffb5655cb7668b685b80116041a20ee8662ebfa5b5cb68cfc423d9dc30") # noqa
self.assertFalse(self.mock_docker_client.stop.called)
self.assertFalse(self.mock_docker_client.remove_container.called)
|
|
from toontown.toonbase.ToontownGlobals import *
from toontown.toonbase.ToonBaseGlobal import *
from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
import HouseGlobals
from toontown.catalog import CatalogItemList
from toontown.catalog import CatalogItem
from toontown.catalog import CatalogSurfaceItem
from toontown.catalog import CatalogWallpaperItem
from toontown.catalog import CatalogFlooringItem
from toontown.catalog import CatalogMouldingItem
from toontown.catalog import CatalogWainscotingItem
from toontown.dna.DNAParser import *
WindowPlugNames = ('**/windowcut_a*', '**/windowcut_b*', '**/windowcut_c*', '**/windowcut_d*', '**/windowcut_e*', '**/windowcut_f*')
RoomNames = ('**/group2', '**/group1')
WallNames = ('ceiling*', 'wall_side_middle*', 'wall_front_middle*', 'windowcut_*')
MouldingNames = ('wall_side_top*', 'wall_front_top*')
FloorNames = ('floor*',)
WainscotingNames = ('wall_side_bottom*', 'wall_front_bottom*')
BorderNames = ('wall_side_middle*_border', 'wall_front_middle*_border', 'windowcut_*_border')
WallpaperPieceNames = (WallNames,
MouldingNames,
FloorNames,
WainscotingNames,
BorderNames)
class DistributedHouseInterior(DistributedObject.DistributedObject):
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.houseId = 0
self.houseIndex = 0
self.interior = None
self.exteriorWindowsHidden = 0
return
def generate(self):
DistributedObject.DistributedObject.generate(self)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
self.setup()
def disable(self):
self.interior.removeNode()
del self.interior
DistributedObject.DistributedObject.disable(self)
def delete(self):
self.ignore(self.uniqueName('enterclosetSphere'))
DistributedObject.DistributedObject.delete(self)
def setup(self):
dnaStore = base.cr.playGame.dnaStore
self.interior = loader.loadModel('phase_5.5/models/estate/tt_m_ara_int_estateHouseA')
self.interior.reparentTo(render)
doorModelName = 'door_double_round_ur'
door = dnaStore.findNode(doorModelName)
door_origin = self.interior.find('**/door_origin')
door_origin.setHpr(180, 0, 0)
door_origin.setScale(0.8, 0.8, 0.8)
door_origin.setPos(door_origin, 0, -0.025, 0)
doorNP = door.copyTo(door_origin)
houseColor = HouseGlobals.atticWood
color = Vec4(houseColor[0], houseColor[1], houseColor[2], 1)
setupDoor(doorNP, door_origin, door_origin, dnaStore, str(self.houseId), color)
doorFrame = doorNP.find('door_*_flat')
doorFrame.setColor(color)
self.interior.flattenMedium()
self.windowSlots = []
for name in WindowPlugNames:
plugNodes = self.interior.findAllMatches(name)
if plugNodes.isEmpty():
self.windowSlots.append((None, None))
else:
viewBase = plugNodes[0].getParent().attachNewNode('view')
viewBase.setTransform(plugNodes[0].getTransform())
plug = plugNodes[0].getParent().attachNewNode('plug')
plugNodes.reparentTo(plug)
plug.flattenLight()
self.windowSlots.append((plug, viewBase))
self.windowSlots[2][1].setPosHpr(16.0, -12.0, 5.51, -90, 0, 0)
self.windowSlots[4][1].setPosHpr(-12.0, 26.0, 5.51, 0, 0, 0)
self.__colorWalls()
self.__setupWindows()
messenger.send('houseInteriorLoaded-%d' % self.zoneId)
return None
def __colorWalls(self):
if not self.wallpaper:
self.notify.info('No wallpaper in interior; clearing.')
for str in WallNames + WainscotingNames:
nodes = self.interior.findAllMatches('**/%s' % str)
for node in nodes:
node.setTextureOff(1)
return
numSurfaceTypes = CatalogSurfaceItem.NUM_ST_TYPES
numRooms = min(len(self.wallpaper) / numSurfaceTypes, len(RoomNames))
for room in range(numRooms):
roomName = RoomNames[room]
roomNode = self.interior.find(roomName)
if not roomNode.isEmpty():
for surface in range(numSurfaceTypes):
slot = room * numSurfaceTypes + surface
wallpaper = self.wallpaper[slot]
color = wallpaper.getColor()
texture = wallpaper.loadTexture()
for str in WallpaperPieceNames[surface]:
nodes = roomNode.findAllMatches('**/%s' % str)
for node in nodes:
if str == 'ceiling*':
r, g, b, a = color
scale = 0.66
r *= scale
g *= scale
b *= scale
node.setColorScale(r, g, b, a)
else:
node.setColorScale(*color)
node.setTexture(texture, 1)
if wallpaper.getSurfaceType() == CatalogSurfaceItem.STWallpaper:
color2 = wallpaper.getBorderColor()
texture2 = wallpaper.loadBorderTexture()
nodes = roomNode.findAllMatches('**/%s_border' % str)
for node in nodes:
node.setColorScale(*color2)
node.setTexture(texture2, 1)
nodes = self.interior.findAllMatches('**/arch*')
for node in nodes:
node.setColorScale(*(HouseGlobals.archWood + (1,)))
def __setupWindows(self):
for plug, viewBase in self.windowSlots:
if plug:
plug.show()
if viewBase:
viewBase.getChildren().detach()
if not self.windows:
self.notify.info('No windows in interior; returning.')
return
for item in self.windows:
plug, viewBase = self.windowSlots[item.placement]
if plug:
plug.hide()
if viewBase:
model = item.loadModel()
model.reparentTo(viewBase)
if self.exteriorWindowsHidden:
model.findAllMatches('**/outside').stash()
def hideExteriorWindows(self):
self.exteriorWindowsHidden = 1
for item in self.windows:
plug, viewBase = self.windowSlots[item.placement]
if viewBase:
viewBase.findAllMatches('**/outside').stash()
def showExteriorWindows(self):
self.exteriorWindowsHidden = 0
for item in self.windows:
plug, viewBase = self.windowSlots[item.placement]
if viewBase:
viewBase.findAllMatches('**/outside;+s').unstash()
def setHouseId(self, index):
self.houseId = index
def setHouseIndex(self, index):
self.houseIndex = index
def setWallpaper(self, items):
self.wallpaper = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
if self.interior:
self.__colorWalls()
def setWindows(self, items):
self.windows = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization | CatalogItem.WindowPlacement)
if self.interior:
self.__setupWindows()
def testWallpaperCombo(self, wallpaperType, wallpaperColorIndex, borderIndex, borderColorIndex, mouldingType, mouldingColorIndex, flooringType, flooringColorIndex, wainscotingType, wainscotingColorIndex):
wallpaperItem = CatalogWallpaperItem.CatalogWallpaperItem(wallpaperType, wallpaperColorIndex, borderIndex, borderColorIndex)
mouldingItem = CatalogMouldingItem.CatalogMouldingItem(mouldingType, mouldingColorIndex)
flooringItem = CatalogFlooringItem.CatalogFlooringItem(flooringType, flooringColorIndex)
wainscotingItem = CatalogWainscotingItem.CatalogWainscotingItem(wainscotingType, wainscotingColorIndex)
self.wallpaper = CatalogItemList.CatalogItemList([wallpaperItem,
mouldingItem,
flooringItem,
wainscotingItem,
wallpaperItem,
mouldingItem,
flooringItem,
wainscotingItem], store=CatalogItem.Customization)
if self.interior:
self.__colorWalls()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Binary for training translation models and decoding from them.
Running this program without --decode will download the WMT corpus into
the directory specified as --data_dir and tokenize it in a very basic way,
and then start training a model saving checkpoints to --train_dir.
Running with --decode starts an interactive loop so you can see how
the current checkpoint translates English sentences into French.
See the following papers for more information on neural translation models.
* http://arxiv.org/abs/1409.3215
* http://arxiv.org/abs/1409.0473
* http://arxiv.org/abs/1412.2007
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import logging
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import data_utils
import seq2seq_model
tf.app.flags.DEFINE_float("learning_rate", 0.7, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 1024, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 3, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("from_vocab_size", 40000, "English vocabulary size.")
tf.app.flags.DEFINE_integer("to_vocab_size", 40000, "French vocabulary size.")
tf.app.flags.DEFINE_string("data_dir", "data/", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "checkpoint/", "Training directory.")
tf.app.flags.DEFINE_string("from_train_data", "data/giga-fren.release2.fixed.ids40000.en", "Training data.")
tf.app.flags.DEFINE_string("to_train_data", "data/giga-fren.release2.fixed.ids40000.en", "Training data.")
tf.app.flags.DEFINE_string("from_dev_data", "data/newstest2013.en", "Training data.")
tf.app.flags.DEFINE_string("to_dev_data", "data/newst", "Training data.")
tf.app.flags.DEFINE_integer("max_train_data_size", 7000000,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 100,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_fp16", False,
"Train using fp16 instead of fp32.")
FLAGS = tf.app.flags.FLAGS
# We use a number of buckets and pad to the closest one for efficiency.
# See seq2seq_model.Seq2SeqModel for details of how they work.
_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
def read_data(source_path, target_path, max_size=None):
"""Read data from source and target files and put into buckets.
Args:
source_path: path to the files with token-ids for the source language.
target_path: path to the file with token-ids for the target language;
it must be aligned with the source file: n-th line contains the desired
output for n-th line from the source_path.
max_size: maximum number of lines to read, all other will be ignored;
if 0 or None, data files will be read completely (no limit).
Returns:
data_set: a list of length len(_buckets); data_set[n] contains a list of
(source, target) pairs read from the provided data files that fit
into the n-th bucket, i.e., such that len(source) < _buckets[n][0] and
len(target) < _buckets[n][1]; source and target are lists of token-ids.
"""
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only):
"""Create translation model and initialize or load parameters in session."""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = seq2seq_model.Seq2SeqModel(
FLAGS.from_vocab_size,
FLAGS.to_vocab_size,
_buckets,
FLAGS.size,
FLAGS.num_layers,
FLAGS.max_gradient_norm,
FLAGS.batch_size,
FLAGS.learning_rate,
FLAGS.learning_rate_decay_factor,
forward_only=forward_only,
dtype=dtype)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.global_variables_initializer())
return model
def checkFlags():
return (FLAGS.from_train_data and \
FLAGS.to_train_data and \
FLAGS.from_dev_data and \
FLAGS.to_dev_data)
def checkFlagPathsExist():
return (os.path.exists(FLAGS.from_train_data) and \
os.path.exists(FLAGS.to_train_data) and \
os.path.exists(FLAGS.from_dev_data) and \
os.path.exists(FLAGS.to_dev_data))
def train():
"""Train a en->fr translation model using WMT data."""
from_train = None
to_train = None
from_dev = None
to_dev = None
if checkFlags() is True and checkFlagPathsExist() is True:
from_train = FLAGS.from_train_data
to_train = FLAGS.to_train_data
from_dev = FLAGS.from_dev_data
to_dev = FLAGS.to_dev_data
else:
# Prepare WMT data.
print("Preparing WMT data in %s" % FLAGS.data_dir)
from_train, to_train, from_dev, to_dev, _, _ = data_utils.prepare_wmt_data(
FLAGS.data_dir, FLAGS.from_vocab_size, FLAGS.to_vocab_size)
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
dev_set = read_data(from_dev, to_dev, FLAGS.max_train_data_size)
train_set = read_data(from_train, to_train, FLAGS.max_train_data_size)
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(float(loss)) if loss < 300 else float("inf")
print ("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(float(eval_loss)) if eval_loss < 300 else float(
"inf")
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def decode():
with tf.Session() as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1 # We decode one sentence at a time.
# Load vocabularies.
en_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.en" % FLAGS.from_vocab_size)
fr_vocab_path = os.path.join(FLAGS.data_dir,
"vocab%d.fr" % FLAGS.to_vocab_size)
en_vocab, _ = data_utils.initialize_vocabulary(en_vocab_path)
_, rev_fr_vocab = data_utils.initialize_vocabulary(fr_vocab_path)
# Decode from standard input.
sys.stdout.write("> ")
sys.stdout.flush()
sentence = sys.stdin.readline()
while sentence:
# Get token-ids for the input sentence.
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), en_vocab)
# Which bucket does it belong to?
bucket_id = len(_buckets) - 1
for i, bucket in enumerate(_buckets):
if bucket[0] >= len(token_ids):
bucket_id = i
break
else:
logging.warning("Sentence truncated: %s", sentence)
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out French sentence corresponding to outputs.
print(" ".join([tf.compat.as_str(rev_fr_vocab[output]) for output in outputs]))
print("> ", end="")
sys.stdout.flush()
sentence = sys.stdin.readline()
def self_test():
"""Test the translation model."""
with tf.Session() as sess:
print("Self-test for neural translation model.")
# Create model with vocabularies of 10, 2 small buckets, 2 layers of 32.
model = seq2seq_model.Seq2SeqModel(10, 10, [(3, 3), (6, 6)], 32, 2,
5.0, 32, 0.3, 0.99, num_samples=8)
sess.run(tf.global_variables_initializer())
# Fake data set for both the (3, 3) and (6, 6) bucket.
data_set = ([([1, 1], [2, 2]), ([3, 3], [4]), ([5], [6])],
[([1, 1, 1, 1, 1], [2, 2, 2, 2, 2]), ([3, 3, 3], [5, 6])])
for _ in xrange(5): # Train the fake model for 5 steps.
bucket_id = random.choice([0, 1])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
data_set, bucket_id)
model.step(sess, encoder_inputs, decoder_inputs, target_weights,
bucket_id, False)
def main(_):
if FLAGS.self_test:
self_test()
elif FLAGS.decode:
decode()
else:
train()
if __name__ == "__main__":
tf.app.run()
|
|
# encoding: utf-8
from testcases import (
TestServerTestCase,
get_client
)
from django.core.management import call_command
from queryset_client.client import (
FieldTypeError,
ObjectDoesNotExist
)
from .utils import id_generator
class ModelTestCase(TestServerTestCase):
def setUp(self):
self.start_test_server()
self.client = get_client()
def tearDown(self):
self.stop_test_server()
def test_type1(self):
value = 1
message = self.client.message()
message.id = value
self.assertTrue(message.id == value)
self.assertTrue(message._fields["id"] == value)
def test_type2(self):
value = 1
message = self.client.message()
try:
message.subject = value
except FieldTypeError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_type3(self):
value = 1
message = get_client(strict_field=False).message()
try:
message.subject = value
except FieldTypeError:
self.assertTrue(False)
else:
self.assertTrue(True)
def test_call1(self):
subject = "subject call 1"
body = "body call 1"
message_obj = self.client.message(subject=subject, body=body)
self.assertTrue(message_obj.subject == subject)
self.assertTrue(message_obj.body == body)
def test_call2(self):
try:
self.client.message(errorfield="oha yo! oha yo!")
except FieldTypeError:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_save1(self):
""" (new) """
subject = "subject save 1"
body = "body save 1"
message = self.client.message(subject=subject, body=body)
message.save()
message_ = self.client.message.objects.get(id=message.id, subject=subject, body=body)
self.assertTrue(message_.id == message.id)
self.assertTrue(message_.subject == message.subject)
self.assertTrue(message_.body == message.body)
def test_save2(self):
""" (update) """
subject1 = "subject save 2"
body1 = "body save 2"
message = self.client.message(subject=subject1, body=body1)
message.save()
subject2 = "subject save 2 update"
body2 = "body save 2 update"
message.subject = subject2
message.body = body2
message.save()
try:
self.client.message.objects.get(id=message.id, subject=subject1, body=body1)
except ObjectDoesNotExist:
self.assertTrue(True)
else:
self.assertTrue(False)
try:
message_ = self.client.message.objects.get(id=message.id, subject=subject2, body=body2)
except ObjectDoesNotExist:
self.assertTrue(False)
else:
self.assertTrue(True)
self.assertTrue(message_.id == message.id)
self.assertTrue(message_.subject == message.subject)
self.assertTrue(message_.body == message.body)
def test_save3(self):
""" (update) for query_set """
subject = id_generator()
body = id_generator()
fil = self.client.message.objects.filter(id__in=range(20, 33))
for message in fil:
message.subject = subject
message.body = body
message.save()
for message in self.client.message.objects.filter(id__in=range(20, 33)):
self.assertTrue(message.subject == subject)
self.assertTrue(message.body == body)
def test_object_save1(self):
""" object save
"""
for i in range(0, 12):
message = self.client.message()
message.subject = id_generator()
message.body = id_generator()
message.save()
inbox = self.client.inbox()
inbox.did = id_generator()
inbox.save()
inbox_message = self.client.inbox_message()
inbox_message.message = message
inbox_message.inbox = inbox
inbox_message.save() # TODO: save success
# def test_save_rel1(self):
# """ relation """
# subject = ""
# body = ""
# message = self.client.inbox_message(subject=subject, body=body)
# message.save()
def test_save_many1(self):
""" post """
call_command('loaddata', 'base_data.json')
for inbox_message in self.client.inbox_message.objects.all():
inbox_message_many = self.client.inbox_message_many()
inbox_message_many.inbox_message = inbox_message
inbox_message_many.save()
def test_save_many2(self):
""" put """
call_command('loaddata', 'base_data.json')
for inbox_message_many in self.client.inbox_message_many.objects.all():
inbox_message_many.save()
def test_save_many3(self):
""" add, remove, clear
* Check resource_uri (response, field)
"""
call_command('loaddata', 'base_data.json')
inbox_messages = []
inbox_message_many = self.client.inbox_message_many.objects.get(id=2)
count_orig = inbox_message_many.inbox_message.count()
for i in range(0, 5):
message = self.client.message()
message.subject = id_generator()
message.body = id_generator()
message.save()
inbox = self.client.inbox()
inbox.did = id_generator()
inbox.save()
inbox_message = self.client.inbox_message()
inbox_message.message = message
inbox_message.inbox = inbox
inbox_message.save()
inbox_messages.append(inbox_message)
inbox_message_many.inbox_message.add(inbox_message)
inbox_message_many.save()
self.assertTrue(
inbox_message_many._response["inbox_message"] == inbox_message_many.model.inbox_message)
inbox_all = inbox_message_many.inbox_message.all()
for obj, obj_ in zip(inbox_all, inbox_message_many.inbox_message.filter()):
self.assertTrue(obj.id == obj_.id)
inbox_all_ = self.client.inbox_message.objects.filter(id__in=inbox_all._query["id__in"])
self.assertTrue(len(inbox_all) == len(inbox_all_))
for obj, obj_ in zip(inbox_all, inbox_all_):
self.assertTrue(obj.id == obj_.id)
ids = inbox_all._query["id__in"]
inbox_message_many = self.client.inbox_message_many.objects.get(id=2)
num = inbox_message_many.inbox_message.filter(id=ids[0]).count()
self.assertTrue(num == 1)
# remove
inbox_message_many.inbox_message.remove(*inbox_messages)
self.assertTrue(count_orig == inbox_message_many.inbox_message.count())
inbox_message_many.save()
self.assertTrue(count_orig == inbox_message_many.inbox_message.count())
# clear TODO: Issue #??
# inbox_message_many.inbox_message.clear()
# self.assertTrue(0 != inbox_message_many.inbox_message.count())
# inbox_message_many.save()
# self.assertTrue(0 == inbox_message_many.inbox_message.count())
inbox_message_many.save()
def test_delete1(self):
subject = "subject delete 1"
body = "body delete 1"
message = self.client.message(subject=subject, body=body)
message.save()
message_ = self.client.message.objects.get(id=message.id, subject=subject, body=body)
self.assertTrue(message_.id == message.id)
self.assertTrue(message_.subject == message.subject)
self.assertTrue(message_.body == message.body)
message.delete()
try:
message.id
except AttributeError:
self.assertTrue(True)
else:
self.assertTrue(False)
try:
message_.delete()
except Exception:
self.assertTrue(True)
else:
self.assertTrue(False)
try:
message.id
except AttributeError:
self.assertTrue(True)
else:
self.assertTrue(False)
|
|
from connection import Connection
from vec3 import Vec3
from event import BlockEvent
from block import Block
import math
from util import flatten
""" Minecraft PI low level api v0.1_1
Note: many methods have the parameter *arg. This solution makes it
simple to allow different types, and variable number of arguments.
The actual magic is a mix of flatten_parameters() and __iter__. Example:
A Cube class could implement __iter__ to work in Minecraft.setBlocks(c, id).
(Because of this, it's possible to "erase" arguments. CmdPlayer removes
entityId, by injecting [] that flattens to nothing)
@author: Aron Nieminen, Mojang AB
Updated to included additional functionality provided by RaspberryJuice:
- getBlocks() : implemented
- .create() : can now accept "name" (player name) for use in multiplayer
- CmdPositioner.getDirection
- CmdPositioner.getPitch
- CmdPositioner.getRotation
- getPlayerEntityId
"""
def intFloor(*args):
return [int(math.floor(x)) for x in flatten(args)]
class CmdPositioner:
"""Methods for setting and getting positions"""
def __init__(self, connection, packagePrefix):
self.conn = connection
self.pkg = packagePrefix
def getPos(self, id):
"""Get entity position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + ".getPos", id)
return Vec3(*map(float, s.split(",")))
def setPos(self, id, *args):
"""Set entity position (entityId:int, x,y,z)"""
self.conn.send(self.pkg + ".setPos", id, args)
def getTilePos(self, id):
"""Get entity tile position (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + ".getTile", id)
return Vec3(*map(int, s.split(",")))
def setTilePos(self, id, *args):
"""Set entity tile position (entityId:int) => Vec3"""
self.conn.send(self.pkg + ".setTile", id, intFloor(*args))
def getDirection(self, id):
"""Get entity direction (entityId:int) => Vec3"""
s = self.conn.sendReceive(self.pkg + ".getDirection", id)
return Vec3(*map(float, s.split(",")))
def getRotation(self, id):
"""get entity rotation (entityId:int) => float"""
return float(self.conn.sendReceive(self.pkg + ".getRotation", id))
def getPitch(self, id):
"""get entity pitch (entityId:int) => float"""
return float(self.conn.sendReceive(self.pkg + ".getPitch", id))
def setting(self, setting, status):
"""Set a player setting (setting, status). keys: autojump"""
self.conn.send(self.pkg + ".setting", setting, 1 if bool(status) else 0)
class CmdEntity(CmdPositioner):
"""Methods for entities"""
def __init__(self, connection):
CmdPositioner.__init__(self, connection, "entity")
class CmdPlayer(CmdPositioner):
"""Methods for the host (Raspberry Pi) player"""
def __init__(self, connection, name=None):
CmdPositioner.__init__(self, connection, "player")
self.conn = connection
self.name = name
def getPos(self):
return CmdPositioner.getPos(self, self.name)
def setPos(self, *args):
return CmdPositioner.setPos(self, self.name, args)
def getTilePos(self):
return CmdPositioner.getTilePos(self, self.name)
def setTilePos(self, *args):
return CmdPositioner.setTilePos(self, self.name, args)
def getDirection(self):
return CmdPositioner.getDirection(self, self.name)
def getRotation(self):
return CmdPositioner.getRotation(self, self.name)
def getPitch(self):
return CmdPositioner.getPitch(self, self.name)
class CmdCamera:
def __init__(self, connection):
self.conn = connection
def setNormal(self, *args):
"""Set camera mode to normal Minecraft view ([entityId])"""
self.conn.send("camera.mode.setNormal", args)
def setFixed(self):
"""Set camera mode to fixed view"""
self.conn.send("camera.mode.setFixed")
def setFollow(self, *args):
"""Set camera mode to follow an entity ([entityId])"""
self.conn.send("camera.mode.setFollow", args)
def setPos(self, *args):
"""Set camera entity position (x,y,z)"""
self.conn.send("camera.setPos", args)
class CmdEvents:
"""Events"""
def __init__(self, connection):
self.conn = connection
def clearAll(self):
"""Clear all old events"""
self.conn.send("events.clear")
def pollBlockHits(self):
"""Only triggered by sword => [BlockEvent]"""
s = self.conn.sendReceive("events.block.hits")
events = [e for e in s.split("|") if e]
return [BlockEvent.Hit(*map(int, e.split(","))) for e in events]
class Minecraft:
"""The main class to interact with a running instance of Minecraft Pi."""
def __init__(self, connection, name=None):
self.conn = connection
self.camera = CmdCamera(connection)
self.entity = CmdEntity(connection)
self.player = CmdPlayer(connection, name)
self.events = CmdEvents(connection)
def getBlock(self, *args):
"""Get block (x,y,z) => id:int"""
return int(self.conn.sendReceive("world.getBlock", intFloor(args)))
def getBlockWithData(self, *args):
"""Get block with data (x,y,z) => Block"""
ans = self.conn.sendReceive("world.getBlockWithData", intFloor(args))
return Block(*map(int, ans.split(",")))
"""
@TODO
"""
def getBlocks(self, *args):
"""Get a cuboid of blocks (x0,y0,z0,x1,y1,z1) => [id:int]"""
s = self.conn.sendReceive("world.getBlocks", intFloor(args))
return map(int, s.split(","))
def setBlock(self, *args):
"""Set block (x,y,z,id,[data])"""
self.conn.send("world.setBlock", intFloor(args))
def setBlocks(self, *args):
"""Set a cuboid of blocks (x0,y0,z0,x1,y1,z1,id,[data])"""
self.conn.send("world.setBlocks", intFloor(args))
def getHeight(self, *args):
"""Get the height of the world (x,z) => int"""
return int(self.conn.sendReceive("world.getHeight", intFloor(args)))
def getPlayerEntityIds(self):
"""Get the entity ids of the connected players => [id:int]"""
ids = self.conn.sendReceive("world.getPlayerIds")
return map(int, ids.split("|"))
def getPlayerEntityId(self, name):
"""Get the entity id of the named player => [id:int]"""
return int(self.conn.sendReceive("world.getPlayerId", name))
def saveCheckpoint(self):
"""Save a checkpoint that can be used for restoring the world"""
self.conn.send("world.checkpoint.save")
def restoreCheckpoint(self):
"""Restore the world state to the checkpoint"""
self.conn.send("world.checkpoint.restore")
def postToChat(self, msg):
"""Post a message to the game chat"""
self.conn.send("chat.post", msg)
def setting(self, setting, status):
"""Set a world setting (setting, status). keys: world_immutable, nametags_visible"""
self.conn.send("world.setting", setting, 1 if bool(status) else 0)
@staticmethod
def create(address = "localhost", port = 4711, name = None):
return Minecraft(Connection(address, port), name)
if __name__ == "__main__":
mc = Minecraft.create()
mc.postToChat("Hello, Minecraft!")
|
|
import pytest
from illuminate import *
MOCK_SERVER: str = 'mock.com'
MOCK_USER: str = 'mock'
MOCK_PASS: str = 'mock'
MOCK_INDICATOR: str = 'mock-indicator'
BASE_MOCK_JSON: dict = {
'type': 'domain',
'value': {
'name': f'{MOCK_INDICATOR}',
'classification': 'U'
},
'description': None,
'activityDates': [
{
'date': '2020-01-20',
'classification': 'U'
}
],
'reportedDates': [
{
'date': '2020-01-31',
'classification': 'U'
}
],
'targets': [
{
'name': 'Mock Target',
'id': 1,
'classification': 'U'
}
],
'attackPatterns': [
{
'name': 'Mock Attack Pattern',
'id': 1,
'classification': 'U'
}
],
'actors': [
{
'name': 'Mock Actor',
'id': 1,
'classification': 'U'
}
],
'malwares': [],
'status': 'aw',
'hashes': None,
'fileNames': None,
'fileSize': None,
'path': None,
'ports': [],
'ipRegistration': None,
'domainRegistration': None,
'ipResolution': None,
'originatingIps': None,
'subjects': None,
'requestMethods': None,
'tlp': 'mocktlp',
'tlpJustification': None,
'tlpCaveats': None,
'tlpResolution': 'resolved',
'tlpHighestAssociated': 'mocktlp',
'tlpLowestAssociated': 'mocktlp',
'active': True,
'benign': {
'value': False,
'classification': 'U'
},
'confidenceLevel': None,
'exploitStage': None,
'lastHit': None,
'firstHit': None,
'hitCount': None,
'reportCount': 1,
'verified': False,
'tasked': False,
'links': [
{
'rel': 'self',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
},
{
'rel': 'evidence',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1/evidence',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
},
{
'rel': 'stix',
'href': f'https://{MOCK_SERVER}.com/api/1_0/indicator/1/stix',
'hreflang': None,
'media': None,
'title': None,
'type': None,
'deprecation': None
}
],
'id': 1
}
MOCK_CLIENT_PARAMS = {
'server': MOCK_SERVER,
'proxy': 'false',
'insecure': 'true',
'credentials': {
'identifier': MOCK_USER,
'password': MOCK_PASS
}
}
@pytest.fixture
def mock_client():
return build_client(MOCK_CLIENT_PARAMS)
def mock_indicator_search(indicator_type: str, requests_mock):
requests_mock.get(
f'https://{MOCK_SERVER}/api/1_0/indicator/match?type={indicator_type}&value={MOCK_INDICATOR}',
json=BASE_MOCK_JSON
)
def test_domain_command(requests_mock, mock_client):
mock_indicator_search('domain', requests_mock)
args: dict = {'domain': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = domain_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_email_command(requests_mock, mock_client):
mock_indicator_search('email', requests_mock)
args: dict = {'email': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = email_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_ip_command(requests_mock, mock_client):
mock_indicator_search('ip', requests_mock)
args: dict = {'ip': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = ip_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_file_command(requests_mock, mock_client):
mock_indicator_search('file', requests_mock)
args: dict = {'file': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = file_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_url_command(requests_mock, mock_client):
mock_indicator_search('url', requests_mock)
args: dict = {'url': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = url_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_illuminate_enrich_string_command(requests_mock, mock_client):
mock_indicator_search('string', requests_mock)
args: dict = {'string': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = illuminate_enrich_string_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_illuminate_enrich_ipv6_command(requests_mock, mock_client):
mock_indicator_search('ipv6', requests_mock)
args: dict = {'ip': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = illuminate_enrich_ipv6_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_illuminate_enrich_mutex_command(requests_mock, mock_client):
mock_indicator_search('mutex', requests_mock)
args: dict = {'mutex': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = illuminate_enrich_mutex_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_illuminate_enrich_http_request_command(requests_mock, mock_client):
mock_indicator_search('httpRequest', requests_mock)
args: dict = {'http-request': f'{MOCK_INDICATOR}'}
enrichment_output: EnrichmentOutput = illuminate_enrich_http_request_command(mock_client, args)[0]
assert enrichment_output.illuminate_context_data.get('ID') == BASE_MOCK_JSON.get('id')
def test_malicious_indicator_check_empty(mock_client):
data = {}
assert mock_client.is_indicator_malicious(data) is False
def test_malicious_indicator_check_benign_false(mock_client):
data = {
"benign": {
"value": False
}
}
assert mock_client.is_indicator_malicious(data) is True
def test_malicious_indicator_check_benign_true(mock_client):
data = {
"benign": {
"value": True
}
}
assert mock_client.is_indicator_malicious(data) is False
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
'''
Azure External Inventory Script
===============================
Generates dynamic inventory by making API requests to the Azure Resource
Manager using the AAzure Python SDK. For instruction on installing the
Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/
Authentication
--------------
The order of precedence is command line arguments, environment variables,
and finally the [default] profile found in ~/.azure/credentials.
If using a credentials file, it should be an ini formatted file with one or
more sections, which we refer to as profiles. The script looks for a
[default] section, if a profile is not specified either on the command line
or with an environment variable. The keys in a profile will match the
list of command line arguments below.
For command line arguments and environment variables specify a profile found
in your ~/.azure/credentials file, or a service principal or Active Directory
user.
Command line arguments:
- profile
- client_id
- secret
- subscription_id
- tenant
- ad_user
- password
Environment variables:
- AZURE_PROFILE
- AZURE_CLIENT_ID
- AZURE_SECRET
- AZURE_SUBSCRIPTION_ID
- AZURE_TENANT
- AZURE_AD_USER
- AZURE_PASSWORD
Run for Specific Host
-----------------------
When run for a specific host using the --host option, a resource group is
required. For a specific host, this script returns the following variables:
{
"ansible_host": "XXX.XXX.XXX.XXX",
"computer_name": "computer_name2",
"fqdn": null,
"id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name",
"image": {
"offer": "CentOS",
"publisher": "OpenLogic",
"sku": "7.1",
"version": "latest"
},
"location": "westus",
"mac_address": "00-00-5E-00-53-FE",
"name": "object-name",
"network_interface": "interface-name",
"network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1",
"network_security_group": null,
"network_security_group_id": null,
"os_disk": {
"name": "object-name",
"operating_system_type": "Linux"
},
"plan": null,
"powerstate": "running",
"private_ip": "172.26.3.6",
"private_ip_alloc_method": "Static",
"provisioning_state": "Succeeded",
"public_ip": "XXX.XXX.XXX.XXX",
"public_ip_alloc_method": "Static",
"public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name",
"public_ip_name": "object-name",
"resource_group": "galaxy-production",
"security_group": "object-name",
"security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name",
"tags": {
"db": "database"
},
"type": "Microsoft.Compute/virtualMachines",
"virtual_machine_size": "Standard_DS4"
}
Groups
------
When run in --list mode, instances are grouped by the following categories:
- azure
- location
- resource_group
- security_group
- tag key
- tag key_value
Control groups using azure_rm.ini or set environment variables:
AZURE_GROUP_BY_RESOURCE_GROUP=yes
AZURE_GROUP_BY_LOCATION=yes
AZURE_GROUP_BY_SECURITY_GROUP=yes
AZURE_GROUP_BY_TAG=yes
Select hosts within specific resource groups by assigning a comma separated list to:
AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b
Select hosts for specific tag key by assigning a comma separated list of tag keys to:
AZURE_TAGS=key1,key2,key3
Select hosts for specific locations:
AZURE_LOCATIONS=eastus,westus,eastus2
Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to:
AZURE_TAGS=key1:value1,key2:value2
If you don't need the powerstate, you can improve performance by turning off powerstate fetching:
AZURE_INCLUDE_POWERSTATE=no
azure_rm.ini
------------
As mentioned above, you can control execution using environment variables or a .ini file. A sample
azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case
'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify
a different path for the .ini file, define the AZURE_INI_PATH environment variable:
export AZURE_INI_PATH=/path/to/custom.ini
Powerstate:
-----------
The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is
up. If the value is anything other than 'running', the machine is down, and will be unreachable.
Examples:
---------
Execute /bin/uname on all instances in the galaxy-qa resource group
$ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a"
Use the inventory script to print instance specific information
$ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty
Use with a playbook
$ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa
Insecure Platform Warning
-------------------------
If you receive InsecurePlatformWarning from urllib3, install the
requests security packages:
pip install requests[security]
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
Company: Ansible by Red Hat
Version: 1.0.0
'''
import argparse
import ConfigParser
import json
import os
import re
import sys
from distutils.version import LooseVersion
from os.path import expanduser
HAS_AZURE = True
HAS_AZURE_EXC = None
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute import __version__ as azure_compute_version
from azure.common import AzureMissingResourceHttpError, AzureHttpError
from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
from azure.mgmt.network.network_management_client import NetworkManagementClient
from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient
from azure.mgmt.compute.compute_management_client import ComputeManagementClient
except ImportError as exc:
HAS_AZURE_EXC = exc
HAS_AZURE = False
AZURE_CREDENTIAL_ENV_MAPPING = dict(
profile='AZURE_PROFILE',
subscription_id='AZURE_SUBSCRIPTION_ID',
client_id='AZURE_CLIENT_ID',
secret='AZURE_SECRET',
tenant='AZURE_TENANT',
ad_user='AZURE_AD_USER',
password='AZURE_PASSWORD'
)
AZURE_CONFIG_SETTINGS = dict(
resource_groups='AZURE_RESOURCE_GROUPS',
tags='AZURE_TAGS',
locations='AZURE_LOCATIONS',
include_powerstate='AZURE_INCLUDE_POWERSTATE',
group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP',
group_by_location='AZURE_GROUP_BY_LOCATION',
group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP',
group_by_tag='AZURE_GROUP_BY_TAG'
)
AZURE_MIN_VERSION = "0.30.0rc5"
def azure_id_to_dict(id):
pieces = re.sub(r'^\/', '', id).split('/')
result = {}
index = 0
while index < len(pieces) - 1:
result[pieces[index]] = pieces[index + 1]
index += 1
return result
class AzureRM(object):
def __init__(self, args):
self._args = args
self._compute_client = None
self._resource_client = None
self._network_client = None
self.debug = False
if args.debug:
self.debug = True
self.credentials = self._get_credentials(args)
if not self.credentials:
self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
"or define a profile in ~/.azure/credentials.")
if self.credentials.get('subscription_id', None) is None:
self.fail("Credentials did not include a subscription_id value.")
self.log("setting subscription_id")
self.subscription_id = self.credentials['subscription_id']
if self.credentials.get('client_id') is not None and \
self.credentials.get('secret') is not None and \
self.credentials.get('tenant') is not None:
self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
secret=self.credentials['secret'],
tenant=self.credentials['tenant'])
elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password'])
else:
self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
"Credentials must include client_id, secret and tenant or ad_user and password.")
def log(self, msg):
if self.debug:
print (msg + u'\n')
def fail(self, msg):
raise Exception(msg)
def _get_profile(self, profile="default"):
path = expanduser("~")
path += "/.azure/credentials"
try:
config = ConfigParser.ConfigParser()
config.read(path)
except Exception as exc:
self.fail("Failed to access {0}. Check that the file exists and you have read "
"access. {1}".format(path, str(exc)))
credentials = dict()
for key in AZURE_CREDENTIAL_ENV_MAPPING:
try:
credentials[key] = config.get(profile, key, raw=True)
except:
pass
if credentials.get('client_id') is not None or credentials.get('ad_user') is not None:
return credentials
return None
def _get_env_credentials(self):
env_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
env_credentials[attribute] = os.environ.get(env_variable, None)
if env_credentials['profile'] is not None:
credentials = self._get_profile(env_credentials['profile'])
return credentials
if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None:
return env_credentials
return None
def _get_credentials(self, params):
# Get authentication credentials.
# Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials.
self.log('Getting credentials')
arg_credentials = dict()
for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
arg_credentials[attribute] = getattr(params, attribute)
# try module params
if arg_credentials['profile'] is not None:
self.log('Retrieving credentials with profile parameter.')
credentials = self._get_profile(arg_credentials['profile'])
return credentials
if arg_credentials['client_id'] is not None:
self.log('Received credentials from parameters.')
return arg_credentials
# try environment
env_credentials = self._get_env_credentials()
if env_credentials:
self.log('Received credentials from env.')
return env_credentials
# try default profile from ~./azure/credentials
default_credentials = self._get_profile()
if default_credentials:
self.log('Retrieved default profile credentials from ~/.azure/credentials.')
return default_credentials
return None
def _register(self, key):
try:
# We have to perform the one-time registration here. Otherwise, we receive an error the first
# time we attempt to use the requested client.
resource_client = self.rm_client
resource_client.providers.register(key)
except Exception as exc:
self.fail("One-time registration of {0} failed - {1}".format(key, str(exc)))
@property
def network_client(self):
self.log('Getting network client')
if not self._network_client:
self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Network')
return self._network_client
@property
def rm_client(self):
self.log('Getting resource manager client')
if not self._resource_client:
self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id)
return self._resource_client
@property
def compute_client(self):
self.log('Getting compute client')
if not self._compute_client:
self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id)
self._register('Microsoft.Compute')
return self._compute_client
class AzureInventory(object):
def __init__(self):
self._args = self._parse_cli_args()
try:
rm = AzureRM(self._args)
except Exception as e:
sys.exit("{0}".format(str(e)))
self._compute_client = rm.compute_client
self._network_client = rm.network_client
self._resource_client = rm.rm_client
self._security_groups = None
self.resource_groups = []
self.tags = None
self.locations = None
self.replace_dash_in_groups = False
self.group_by_resource_group = True
self.group_by_location = True
self.group_by_security_group = True
self.group_by_tag = True
self.include_powerstate = True
self._inventory = dict(
_meta=dict(
hostvars=dict()
),
azure=[]
)
self._get_settings()
if self._args.resource_groups:
self.resource_groups = self._args.resource_groups.split(',')
if self._args.tags:
self.tags = self._args.tags.split(',')
if self._args.locations:
self.locations = self._args.locations.split(',')
if self._args.no_powerstate:
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
help='Azure Subscription Id')
parser.add_argument('--client_id', action='store',
help='Azure Client Id ')
parser.add_argument('--secret', action='store',
help='Azure Client Secret')
parser.add_argument('--tenant', action='store',
help='Azure Tenant Id')
parser.add_argument('--ad-user', action='store',
help='Active Directory User')
parser.add_argument('--password', action='store',
help='password')
parser.add_argument('--resource-groups', action='store',
help='Return inventory for comma separated list of resource group names')
parser.add_argument('--tags', action='store',
help='Return inventory for comma separated list of tag key:value pairs')
parser.add_argument('--locations', action='store',
help='Return inventory for comma separated list of locations')
parser.add_argument('--no-powerstate', action='store_true', default=False,
help='Do not include the power state of each virtual host')
return parser.parse_args()
def get_inventory(self):
if len(self.resource_groups) > 0:
# get VMs for requested resource groups
for resource_group in self.resource_groups:
try:
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
else:
# get all VMs within the subscription
try:
virtual_machines = self._compute_client.virtual_machines.list_all()
except Exception as exc:
sys.exit("Error: fetching virtual machines - {0}".format(str(exc)))
if self._args.host or self.tags or self.locations:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
else:
self._load_machines(virtual_machines)
def _load_machines(self, machines):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()
if self.group_by_security_group:
self._get_security_groups(resource_group)
host_vars = dict(
ansible_host=None,
private_ip=None,
private_ip_alloc_method=None,
public_ip=None,
public_ip_name=None,
public_ip_id=None,
public_ip_alloc_method=None,
fqdn=None,
location=machine.location,
name=machine.name,
type=machine.type,
id=machine.id,
tags=machine.tags,
network_interface_id=None,
network_interface=None,
resource_group=resource_group,
mac_address=None,
plan=(machine.plan.name if machine.plan else None),
virtual_machine_size=machine.hardware_profile.vm_size,
computer_name=machine.os_profile.computer_name,
provisioning_state=machine.provisioning_state,
)
host_vars['os_disk'] = dict(
name=machine.storage_profile.os_disk.name,
operating_system_type=machine.storage_profile.os_disk.os_type.value
)
if self.include_powerstate:
host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name)
if machine.storage_profile.image_reference:
host_vars['image'] = dict(
offer=machine.storage_profile.image_reference.offer,
publisher=machine.storage_profile.image_reference.publisher,
sku=machine.storage_profile.image_reference.sku,
version=machine.storage_profile.image_reference.version
)
# Add windows details
if machine.os_profile.windows_configuration is not None:
host_vars['windows_auto_updates_enabled'] = \
machine.os_profile.windows_configuration.enable_automatic_updates
host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone
host_vars['windows_rm'] = None
if machine.os_profile.windows_configuration.win_rm is not None:
host_vars['windows_rm'] = dict(listeners=None)
if machine.os_profile.windows_configuration.win_rm.listeners is not None:
host_vars['windows_rm']['listeners'] = []
for listener in machine.os_profile.windows_configuration.win_rm.listeners:
host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol,
certificate_url=listener.certificate_url))
for interface in machine.network_profile.network_interfaces:
interface_reference = self._parse_ref_id(interface.id)
network_interface = self._network_client.network_interfaces.get(
interface_reference['resourceGroups'],
interface_reference['networkInterfaces'])
if network_interface.primary:
if self.group_by_security_group and \
self._security_groups[resource_group].get(network_interface.id, None):
host_vars['security_group'] = \
self._security_groups[resource_group][network_interface.id]['name']
host_vars['security_group_id'] = \
self._security_groups[resource_group][network_interface.id]['id']
host_vars['network_interface'] = network_interface.name
host_vars['network_interface_id'] = network_interface.id
host_vars['mac_address'] = network_interface.mac_address
for ip_config in network_interface.ip_configurations:
host_vars['private_ip'] = ip_config.private_ip_address
host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method
if ip_config.public_ip_address:
public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id)
public_ip_address = self._network_client.public_ip_addresses.get(
public_ip_reference['resourceGroups'],
public_ip_reference['publicIPAddresses'])
host_vars['ansible_host'] = public_ip_address.ip_address
host_vars['public_ip'] = public_ip_address.ip_address
host_vars['public_ip_name'] = public_ip_address.name
host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method
host_vars['public_ip_id'] = public_ip_address.id
if public_ip_address.dns_settings:
host_vars['fqdn'] = public_ip_address.dns_settings.fqdn
self._add_host(host_vars)
def _selected_machines(self, virtual_machines):
selected_machines = []
for machine in virtual_machines:
if self._args.host and self._args.host == machine.name:
selected_machines.append(machine)
if self.tags and self._tags_match(machine.tags, self.tags):
selected_machines.append(machine)
if self.locations and machine.location in self.locations:
selected_machines.append(machine)
return selected_machines
def _get_security_groups(self, resource_group):
''' For a given resource_group build a mapping of network_interface.id to security_group name '''
if not self._security_groups:
self._security_groups = dict()
if not self._security_groups.get(resource_group):
self._security_groups[resource_group] = dict()
for group in self._network_client.network_security_groups.list(resource_group):
if group.network_interfaces:
for interface in group.network_interfaces:
self._security_groups[resource_group][interface.id] = dict(
name=group.name,
id=group.id
)
def _get_powerstate(self, resource_group, name):
try:
vm = self._compute_client.virtual_machines.get(resource_group,
name,
expand='instanceview')
except Exception as exc:
sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc)))
return next((s.code.replace('PowerState/', '')
for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
def _add_host(self, vars):
host_name = self._to_safe(vars['name'])
resource_group = self._to_safe(vars['resource_group'])
security_group = None
if vars.get('security_group'):
security_group = self._to_safe(vars['security_group'])
if self.group_by_resource_group:
if not self._inventory.get(resource_group):
self._inventory[resource_group] = []
self._inventory[resource_group].append(host_name)
if self.group_by_location:
if not self._inventory.get(vars['location']):
self._inventory[vars['location']] = []
self._inventory[vars['location']].append(host_name)
if self.group_by_security_group and security_group:
if not self._inventory.get(security_group):
self._inventory[security_group] = []
self._inventory[security_group].append(host_name)
self._inventory['_meta']['hostvars'][host_name] = vars
self._inventory['azure'].append(host_name)
if self.group_by_tag and vars.get('tags'):
for key, value in vars['tags'].items():
safe_key = self._to_safe(key)
safe_value = safe_key + '_' + self._to_safe(value)
if not self._inventory.get(safe_key):
self._inventory[safe_key] = []
if not self._inventory.get(safe_value):
self._inventory[safe_value] = []
self._inventory[safe_key].append(host_name)
self._inventory[safe_value].append(host_name)
def _json_format_dict(self, pretty=False):
# convert inventory to json
if pretty:
return json.dumps(self._inventory, sort_keys=True, indent=2)
else:
return json.dumps(self._inventory)
def _get_settings(self):
# Load settings from the .ini, if it exists. Otherwise,
# look for environment values.
file_settings = self._load_settings()
if file_settings:
for key in AZURE_CONFIG_SETTINGS:
if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key):
values = file_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif file_settings.get(key):
val = self._to_boolean(file_settings[key])
setattr(self, key, val)
else:
env_settings = self._get_env_settings()
for key in AZURE_CONFIG_SETTINGS:
if key in('resource_groups', 'tags', 'locations') and env_settings.get(key):
values = env_settings.get(key).split(',')
if len(values) > 0:
setattr(self, key, values)
elif env_settings.get(key, None) is not None:
val = self._to_boolean(env_settings[key])
setattr(self, key, val)
def _parse_ref_id(self, reference):
response = {}
keys = reference.strip('/').split('/')
for index in range(len(keys)):
if index < len(keys) - 1 and index % 2 == 0:
response[keys[index]] = keys[index + 1]
return response
def _to_boolean(self, value):
if value in ['Yes', 'yes', 1, 'True', 'true', True]:
result = True
elif value in ['No', 'no', 0, 'False', 'false', False]:
result = False
else:
result = True
return result
def _get_env_settings(self):
env_settings = dict()
for attribute, env_variable in AZURE_CONFIG_SETTINGS.items():
env_settings[attribute] = os.environ.get(env_variable, None)
return env_settings
def _load_settings(self):
basename = os.path.splitext(os.path.basename(__file__))[0]
default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini'))
path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path)))
config = None
settings = None
try:
config = ConfigParser.ConfigParser()
config.read(path)
except:
pass
if config is not None:
settings = dict()
for key in AZURE_CONFIG_SETTINGS:
try:
settings[key] = config.get('azure', key, raw=True)
except:
pass
return settings
def _tags_match(self, tag_obj, tag_args):
'''
Return True if the tags object from a VM contains the requested tag values.
:param tag_obj: Dictionary of string:string pairs
:param tag_args: List of strings in the form key=value
:return: boolean
'''
if not tag_obj:
return False
matches = 0
for arg in tag_args:
arg_key = arg
arg_value = None
if re.search(r':', arg):
arg_key, arg_value = arg.split(':')
if arg_value and tag_obj.get(arg_key, None) == arg_value:
matches += 1
elif not arg_value and tag_obj.get(arg_key, None) is not None:
matches += 1
if matches == len(tag_args):
return True
return False
def _to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def main():
if not HAS_AZURE:
sys.exit("The Azure python sdk is not installed (try 'pip install azure==2.0.0rc5') - {0}".format(HAS_AZURE_EXC))
if LooseVersion(azure_compute_version) != LooseVersion(AZURE_MIN_VERSION):
sys.exit("Expecting azure.mgmt.compute.__version__ to be {0}. Found version {1} "
"Do you have Azure == 2.0.0rc5 installed?".format(AZURE_MIN_VERSION, azure_compute_version))
AzureInventory()
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2016 iNuron NV
#
# This file is part of Open vStorage Open Source Edition (OSE),
# as available from
#
# http://www.openvstorage.org and
# http://www.openvstorage.com.
#
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)
# as published by the Free Software Foundation, in version 3 as it comes
# in the LICENSE.txt file of the Open vStorage OSE distribution.
#
# Open vStorage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
"""
Basic functionality unit tests for the OVS Cinder Plugin
- create volume
- delete volume
- create snapshot
- delete snapshot
- clone from existing snapshot
- clone from volume (via new snapshot)
* validate on OVS model
* validate on FS (volumedriver filesystem)
"""
from ovs_common import OVSPluginTestCase
class OVSPluginBasicTestCase(OVSPluginTestCase):
"""
Basic tests - the real thing, takes some time
"""
# TESTS
def test_create_volume(self):
"""
Create a volume using the cinder client
COMMAND:
cinder create --volume-type ovs --display-name VOLUME_NAME VOLUME_SIZE
ASSERTS:
file exists on mountpoint
vdisk modelled in OVS
CLEANUP:
delete volume
"""
volume, volume_name, file_name = self._new_volume()
self.assertTrue(self._file_exists_on_mountpoint(file_name), 'File %s not created on mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name), 'Device not modeled in OVS')
def test_create_delete_volume(self):
"""
Create a volume using the cinder client, delete it
COMMAND:
cinder create --volume-type ovs --display-name VOLUME_NAME VOLUME_SIZE
cinder delete <VOLID>
ASSERTS:
file exists on mountpoint
vdisk modeled in OVS
file removed from mountpoint
vdisk no longer modeled in OVS
CLEANUP:
-none
"""
volume, volume_name, file_name = self._new_volume()
self.assertTrue(self._file_exists_on_mountpoint(file_name), 'File %s not created on mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name), 'Device not modeled in OVS')
self._remove_volume(volume, volume_name)
self.assertFalse(self._file_exists_on_mountpoint(file_name), 'File %s not deleted from mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name, exists=False), 'Device still modeled in OVS')
def test_create_delete_volume_snapshots(self):
"""
Create a volume using the cinder client
Create a snapshot using the cinder client
List the snapshot using the cinder client
Delete the snapshot using the cinder client
Delete the volume using the cinder client
COMMAND:
cinder create --volume-type ovs --display-name VOLUME_NAME VOLUME_SIZE
cinder snapshot-create --display-name SNAP_NAME <VOLID>
cinder snapshot-list | grep SNAP_NAME
cinder snapshot-delete <SNAPID>
cinder snapshot-list | grep SNAP_NAME
cinder delete <VOLID>
ASSERTS:
file exists on mountpoint
vdisk modeled in OVS
snapshot exists in cinder DB
snapshot modeled in OVS
snapshot deleted from cinder DB
snapshot removed from OVS
file removed from mountpoint
vdisk no longer modeled in OVS
CLEANUP:
-none
"""
self._debug('started test')
volume, volume_name, file_name = self._new_volume()
self.assertTrue(self._file_exists_on_mountpoint(file_name), 'File %s not created on mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name), 'Device not modeled in OVS')
snapshot, snap_name = self._new_snapshot(volume)
cinder_snapshots = self._cinder_list_snapshots()
self.assertTrue(snapshot.id in cinder_snapshots.keys(), 'Snapshot not modeled in Cinder')
snapshot_name = cinder_snapshots[snapshot.id]
self.assertTrue(snapshot_name == snap_name, 'Wrong name for snapshot %s' % snapshot_name)
self.assertTrue(self._ovs_snapshot_id_in_vdisklist_snapshots(snapshot.id), 'Snapshot not modeled in OVS')
self._remove_snapshot(snap_name, snapshot, force = True)
cinder_snapshots = self._cinder_list_snapshots()
self.assertFalse(snapshot.id in cinder_snapshots.keys(), 'Snapshot still modeled in Cinder')
self._remove_volume(volume, volume_name)
self.assertFalse(self._file_exists_on_mountpoint(file_name), 'File %s not deleted from mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name, exists=False), 'Device still modeled in OVS')
self._debug('ended test')
def test_create_delete_volume_clone_delete_from_snapshot(self):
"""
Create a volume using the cinder client
Create a snapshot using the cinder client
List the snapshot using the cinder client
Create a volume from that snapshot using the cinder client
Delete the cloned volume using the cinder client
Delete the snapshot using the cinder client
Delete the volume using the cinder client
COMMAND:
cinder create --volume-type ovs --display-name VOLUME_NAME VOLUME_SIZE
cinder snapshot-create --display-name SNAP_NAME <VOLID>
cinder snapshot-list | grep SNAP_NAME
cinder create --snapshot-id <SNAP_ID> --display-name CLONE_NAME VOLUME_SIZE
cinder delete <CLONEID>
cinder snapshot-delete <SNAPID>
cinder snapshot-list | grep SNAP_NAME
cinder delete <VOLID>
ASSERTS:
file exists on mountpoint
vdisk modeled in OVS
snapshot exists in cinder DB
snapshot modeled in OVS
clone vdisk modeled in OVS
clone vdisk deleted from OVS
snapshot deleted from cinder DB
snapshot removed from OVS
file removed from mountpoint
vdisk no longer modeled in OVS
CLEANUP:
-none
"""
self._debug('started test')
volume, volume_name, file_name = self._new_volume()
self.assertTrue(self._file_exists_on_mountpoint(file_name), 'File %s not created on mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name), 'Device not modeled in OVS')
snapshot, snap_name = self._new_snapshot(volume)
cinder_snapshots = self._cinder_list_snapshots()
self.assertTrue(snapshot.id in cinder_snapshots.keys(), 'Snapshot not modeled in Cinder')
snapshot_name = cinder_snapshots[snapshot.id]
self.assertTrue(snapshot_name == snap_name, 'Wrong name for snapshot %s' % snapshot_name)
self.assertTrue(self._ovs_snapshot_id_in_vdisklist_snapshots(snapshot.id), 'Snapshot not modeled in OVS')
clone, clone_name, clone_file_name = self._new_volume_from_snapshot(snapshot)
self.assertTrue(self._file_exists_on_mountpoint(clone_file_name), 'File %s not created on mountpoint %s ' % (clone_file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(clone_file_name), 'Device not modeled in OVS')
self._remove_volume(clone, clone_name)
self.assertFalse(self._file_exists_on_mountpoint(clone_file_name), 'File %s not deleted from mountpoint %s ' % (clone_file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(clone_file_name, exists=False), 'Device still modeled in OVS')
self._remove_snapshot(snap_name, snapshot, force = True)
cinder_snapshots = self._cinder_list_snapshots()
self.assertFalse(snapshot.id in cinder_snapshots.keys(), 'Snapshot still modeled in Cinder')
self._remove_volume(volume, volume_name)
self.assertFalse(self._file_exists_on_mountpoint(file_name), 'File %s not deleted from mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name, exists=False), 'Device still modeled in OVS')
self._debug('ended test')
def test_create_delete_volume_clone_delete_from_volume(self):
"""
Create a volume using the cinder client - THIN clone via snapshot
Create a volume from that volume using the cinder client
Delete the cloned volume using the cinder client
Delete the volume using the cinder client
COMMAND:
cinder create --volume-type ovs --display-name VOLUME_NAME VOLUME_SIZE
cinder create --source-volid <VOLUME_ID> --display-name CLONE_NAME VOLUME_SIZE
cinder delete <CLONEID>
cinder delete <VOLID>
ASSERTS:
file exists on mountpoint
vdisk modeled in OVS
clone vdisk modeled in OVS
OVS snapshot created (since it's a new disk it has no default snapshot) - no cinder snapshot for this
clone vdisk deleted from OVS
original vdisk deleted from OVS
file removed from mountpoint
vdisk no longer modeled in OVS
CLEANUP:
-none
"""
self._debug('started test')
volume, volume_name, file_name = self._new_volume()
self.assertTrue(self._file_exists_on_mountpoint(file_name), 'File %s not created on mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name), 'Device not modeled in OVS')
clone, clone_name, clone_file_name = self._new_volume_from_volume(volume)
self.assertTrue(self._file_exists_on_mountpoint(clone_file_name), 'File %s not created on mountpoint %s ' % (clone_file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(clone_file_name), 'Device not modeled in OVS')
# assert snapshot created for volume
vdisk = self._get_ovs_vdisk_by_devicename(file_name)
self.assertTrue(len(vdisk.snapshots) > 0, 'No snapshots created for source disk, expected at least 1')
self._remove_volume(clone, clone_name)
self.assertFalse(self._file_exists_on_mountpoint(clone_file_name), 'File %s not deleted from mountpoint %s ' % (clone_file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(clone_file_name, exists=False), 'Device still modeled in OVS')
self._remove_volume(volume, volume_name)
self.assertFalse(self._file_exists_on_mountpoint(file_name), 'File %s not deleted from mountpoint %s ' % (file_name, OVSPluginTestCase.VPOOL_MOUNTPOINT))
self.assertTrue(self._ovs_devicename_in_vdisklist(file_name, exists=False), 'Device still modeled in OVS')
self._debug('ended test')
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
from google.protobuf import text_format
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export(v1=["train.generate_checkpoint_state_proto"])
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
all_model_checkpoint_timestamps: A list of floats, indicating the number of
seconds since the Epoch when each checkpoint was generated.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
Raises:
ValueError: If `all_model_checkpoint_timestamps` was provided but its length
does not match `all_model_checkpoint_paths`.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
if (all_model_checkpoint_timestamps
and (len(all_model_checkpoint_timestamps)
!= len(all_model_checkpoint_paths))):
raise ValueError(
("Checkpoint timestamps, if provided, must match checkpoint paths (got "
"paths %s and timestamps %s)")
% (all_model_checkpoint_paths, all_model_checkpoint_timestamps))
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i, p in enumerate(all_model_checkpoint_paths):
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
return coord_checkpoint_proto
@deprecation.deprecated(
date=None,
instructions=("Use `tf.train.CheckpointManager` to manage checkpoints "
"rather than manually editing the Checkpoint proto."))
@tf_export(v1=["train.update_checkpoint_state"])
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
update_checkpoint_state_internal(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
def update_checkpoint_state_internal(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False,
all_model_checkpoint_timestamps=None,
last_preserved_timestamp=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
all_model_checkpoint_timestamps: Optional list of timestamps (floats,
seconds since the Epoch) indicating when the checkpoints in
`all_model_checkpoint_paths` were created.
last_preserved_timestamp: A float, indicating the number of seconds since
the Epoch when the last preserved checkpoint was written, e.g. due to a
`keep_checkpoint_every_n_hours` parameter (see
`tf.train.CheckpointManager` for an implementation).
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
all_model_checkpoint_timestamps=all_model_checkpoint_timestamps,
last_preserved_timestamp=last_preserved_timestamp)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from "
+ checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i, p in enumerate(ckpt.all_model_checkpoint_paths):
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Gets the checkpoint state given the provided checkpoint_dir and looks for a
corresponding TensorFlow 2 (preferred) or TensorFlow 1.x checkpoint path.
The latest_filename argument is only applicable if you are saving checkpoint
using `v1.Saver.save`
See the [Training Checkpoints
Guide](https://www.tensorflow.org/guide/checkpoint) for more details and
examples.`
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `v1.Saver.save`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
def checkpoint_exists_internal(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is an internal function to check if a checkpoint exists,
since it takes into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true if a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to check for files with this prefix.")
@tf_export(v1=["train.checkpoint_exists"])
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true if a checkpoint referred to by `checkpoint_prefix` exists.
"""
return checkpoint_exists_internal(checkpoint_prefix)
@deprecation.deprecated(
date=None,
instructions="Use standard file utilities to get mtimes.")
@tf_export(v1=["train.get_checkpoint_mtimes"])
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Note: If not all checkpoints exist, the length of the returned mtimes list
will be smaller than the length of `checkpoint_prefixes` list, so mapping
checkpoints to corresponding mtimes will not be possible.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
@deprecation.deprecated(
date=None,
instructions="Use standard file APIs to delete files with this prefix.")
@tf_export(v1=["train.remove_checkpoint"])
def remove_checkpoint(checkpoint_prefix,
checkpoint_format_version=saver_pb2.SaverDef.V2,
meta_graph_suffix="meta"):
"""Removes a checkpoint given by `checkpoint_prefix`.
Args:
checkpoint_prefix: The prefix of a V1 or V2 checkpoint. Typically the result
of `Saver.save()` or that of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
checkpoint_format_version: `SaverDef.CheckpointFormatVersion`, defaults to
`SaverDef.V2`.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
_delete_file_if_exists(
meta_graph_filename(checkpoint_prefix, meta_graph_suffix))
if checkpoint_format_version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
_delete_file_if_exists(checkpoint_prefix + ".index")
_delete_file_if_exists(checkpoint_prefix + ".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
_delete_file_if_exists(checkpoint_prefix)
def _delete_file_if_exists(filespec):
"""Deletes files matching `filespec`."""
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def meta_graph_filename(checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
suffixed_filename = ".".join([basename, meta_graph_suffix])
return suffixed_filename
# TODO(allenl): Allow tf.keras.Model instances in the constructor directly?
@tf_export("train.CheckpointManager")
class CheckpointManager(object):
"""Deletes old checkpoints.
Example usage:
```python
import tensorflow as tf
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
manager = tf.train.CheckpointManager(
checkpoint, directory="/tmp/model", max_to_keep=5)
status = checkpoint.restore(manager.latest_checkpoint)
while True:
# train
manager.save()
```
`CheckpointManager` preserves its own state across instantiations (see the
`__init__` documentation for details). Only one should be active in a
particular directory at a time.
"""
def __init__(self,
checkpoint,
directory,
max_to_keep,
keep_checkpoint_every_n_hours=None,
checkpoint_name="ckpt"):
"""Configure a `CheckpointManager` for use in `directory`.
If a `CheckpointManager` was previously used in `directory`, its
state will be restored. This includes the list of managed checkpoints and
the timestamp bookkeeping necessary to support
`keep_checkpoint_every_n_hours`. The behavior of the new `CheckpointManager`
will be the same as the previous `CheckpointManager`, including cleaning up
existing checkpoints if appropriate.
Checkpoints are only considered for deletion just after a new checkpoint has
been added. At that point, `max_to_keep` checkpoints will remain in an
"active set". Once a checkpoint is preserved by
`keep_checkpoint_every_n_hours` it will not be deleted by this
`CheckpointManager` or any future `CheckpointManager` instantiated in
`directory` (regardless of the new setting of
`keep_checkpoint_every_n_hours`). The `max_to_keep` checkpoints in the
active set may be deleted by this `CheckpointManager` or a future
`CheckpointManager` instantiated in `directory` (subject to its
`max_to_keep` and `keep_checkpoint_every_n_hours` settings).
Args:
checkpoint: The `tf.train.Checkpoint` instance to save and manage
checkpoints for.
directory: The path to a directory in which to write checkpoints. A
special file named "checkpoint" is also written to this directory (in a
human-readable text format) which contains the state of the
`CheckpointManager`.
max_to_keep: An integer, the number of checkpoints to keep. Unless
preserved by `keep_checkpoint_every_n_hours`, checkpoints will be
deleted from the active set, oldest first, until only `max_to_keep`
checkpoints remain. If `None`, no checkpoints are deleted and everything
stays in the active set. Note that `max_to_keep=None` will keep all
checkpoint paths in memory and in the checkpoint state protocol buffer
on disk.
keep_checkpoint_every_n_hours: Upon removal from the active set, a
checkpoint will be preserved if it has been at least
`keep_checkpoint_every_n_hours` since the last preserved checkpoint. The
default setting of `None` does not preserve any checkpoints in this way.
checkpoint_name: Custom name for the checkpoint file.
Raises:
ValueError: If `max_to_keep` is not a positive integer.
"""
self._checkpoint = checkpoint
self._save_counter_assign = None
if max_to_keep is not None and max_to_keep <= 0:
raise ValueError(
("Expected a positive integer or `None` for `max_to_keep`, "
"got %d.")
% (max_to_keep,))
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._directory = directory
self._checkpoint_prefix = os.path.join(directory, checkpoint_name)
recovered_state = get_checkpoint_state(directory)
current_clock = time.time()
self._maybe_delete = collections.OrderedDict()
if recovered_state is None:
self._latest_checkpoint = None
# Set the clock back slightly to avoid race conditions when quckly
# re-creating a CheckpointManager.
self._last_preserved_timestamp = current_clock - 1.
else:
self._latest_checkpoint = recovered_state.model_checkpoint_path
self._last_preserved_timestamp = recovered_state.last_preserved_timestamp
if current_clock < self._last_preserved_timestamp:
# Time seems to have reversed itself. In addition to this warning, we'll
# min() saved checkpoint timestamps with the current time to ensure that
# old checkpoints don't get deleted accidentally.
logging.warning(
("time.time() returned a value %f seconds behind the last "
"preserved checkpoint timestamp.")
% (self._last_preserved_timestamp - current_clock,))
self._last_preserved_timestamp = current_clock
all_timestamps = recovered_state.all_model_checkpoint_timestamps
all_paths = recovered_state.all_model_checkpoint_paths
del recovered_state # Uses modified values from now on
if not all_timestamps:
all_timestamps = [self._last_preserved_timestamp] * len(all_paths)
for filename, timestamp in zip(all_paths, all_timestamps):
timestamp = min(timestamp, current_clock)
if timestamp > self._last_preserved_timestamp:
self._maybe_delete[filename] = timestamp
@property
def directory(self):
return self._directory
@property
def latest_checkpoint(self):
"""The prefix of the most recent checkpoint in `directory`.
Equivalent to `tf.train.latest_checkpoint(directory)` where `directory` is
the constructor argument to `CheckpointManager`.
Suitable for passing to `tf.train.Checkpoint.restore` to resume training.
Returns:
The checkpoint prefix. If there are no checkpoints, returns `None`.
"""
return self._latest_checkpoint
@property
def checkpoints(self):
"""A list of managed checkpoints.
Note that checkpoints saved due to `keep_checkpoint_every_n_hours` will not
show up in this list (to avoid ever-growing filename lists).
Returns:
A list of filenames, sorted from oldest to newest.
"""
return list(self._maybe_delete.keys())
def _sweep(self):
"""Deletes or preserves managed checkpoints."""
if not self._max_to_keep:
# Does not update self._last_preserved_timestamp, since everything is kept
# in the active set.
return
while len(self._maybe_delete) > self._max_to_keep:
filename, timestamp = self._maybe_delete.popitem(last=False)
# Even if we're keeping this checkpoint due to
# keep_checkpoint_every_n_hours, we won't reference it to avoid
# infinitely-growing CheckpointState protos.
if (self._keep_checkpoint_every_n_hours
and (timestamp - self._keep_checkpoint_every_n_hours * 3600.
>= self._last_preserved_timestamp)):
self._last_preserved_timestamp = timestamp
continue
_delete_file_if_exists(filename + ".index")
_delete_file_if_exists(filename + ".data-?????-of-?????")
def _record_state(self):
"""Saves the `CheckpointManager`'s state in `directory`."""
filenames, timestamps = zip(*self._maybe_delete.items())
update_checkpoint_state_internal(
self._directory,
model_checkpoint_path=self.latest_checkpoint,
all_model_checkpoint_paths=filenames,
all_model_checkpoint_timestamps=timestamps,
last_preserved_timestamp=self._last_preserved_timestamp,
save_relative_paths=True)
@property
def _prefix(self):
"""A common prefix for all checkpoints saved with this manager.
For example, if `directory` (a constructor argument) were `"/tmp/tf-model"`,
`prefix` would be `"/tmp/tf-model/ckpt"` and checkpoints would generally be
numbered `"/tmp/tf-model/ckpt-1"`, `"/tmp/tf-model/ckpt-2"`, and so on. Each
checkpoint has several associated files
(e.g. `"/tmp/tf-model/ckpt-2.index"`).
Returns:
A string prefix.
"""
return self._checkpoint_prefix
def save(self, checkpoint_number=None):
"""Creates a new checkpoint and manages it.
Args:
checkpoint_number: An optional integer, or an integer-dtype `Variable` or
`Tensor`, used to number the checkpoint. If `None` (default),
checkpoints are numbered using `checkpoint.save_counter`. Even if
`checkpoint_number` is provided, `save_counter` is still incremented. A
user-provided `checkpoint_number` is not incremented even if it is a
`Variable`.
Returns:
The path to the new checkpoint. It is also recorded in the `checkpoints`
and `latest_checkpoint` properties.
"""
# Save counter logic duplicated from tf.train.Checkpoint, soon to diverge
# slightly with a custom numbering option.
if context.executing_eagerly():
save_counter = self._checkpoint.save_counter
save_counter.assign_add(1)
session = None
else:
session = ops.get_default_session()
def _initializing_creator(next_creator, **kwargs):
"""Initialize the save counter if it has been newly created."""
v = next_creator(**kwargs)
session.run(v.initializer)
return v
with variable_scope.variable_creator_scope(_initializing_creator):
save_counter = self._checkpoint.save_counter
if self._save_counter_assign is None:
self._save_counter_assign = save_counter.assign_add(1, read_value=False)
session.run(self._save_counter_assign)
if checkpoint_number is None:
checkpoint_number = save_counter
if not isinstance(checkpoint_number, compat.integral_types):
checkpoint_number = training_util.global_step(
sess=session, global_step_tensor=checkpoint_number)
prefix = "%s-%d" % (self._prefix, checkpoint_number)
save_path = self._checkpoint.write(prefix)
timestamp = time.time()
# If this is an overwritten checkpoint we were previously tracking, delete
# and reinsert it to make sure it goes to the end of the queue.
if save_path in self._maybe_delete:
del self._maybe_delete[save_path]
self._maybe_delete[save_path] = timestamp
self._latest_checkpoint = save_path
# Before deleting anything we update the Checkpoint proto with the new
# checkpoint. We'll go back and correct it after cleaning up old files, but
# a preemption while deleting will be more likely to see the new checkpoint
# this way.
self._record_state()
self._sweep()
# Write out the Checkpoint proto a second time, now without the deleted
# checkpoints.
self._record_state()
return save_path
|
|
"""Implementation of python-orcid library."""
import simplejson as json
import requests
import sys
if sys.version_info[0] == 2:
from urllib import urlencode
string_types = basestring,
else:
from urllib.parse import urlencode
string_types = str,
SEARCH_VERSION = "/v1.2"
VERSION = "/v2.0_rc1"
__version__ = "0.5.1"
class PublicAPI(object):
"""Public API."""
def __init__(self, sandbox=False):
"""Initialize public API.
Parameters
----------
:param sandbox: boolean
Should the sandbox be used. False (default) indicates production
mode.
"""
if sandbox:
self._endpoint_public = "https://pub.sandbox.orcid.org"
else:
self._endpoint_public = "https://pub.orcid.org"
def read_record_public(self, orcid_id, request_type, put_code=None):
"""Get the public info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param put_code: string
The id of the queried work. Must be given if 'request_type' is not
'activities'.
Returns
-------
:returns: dict
Records.
"""
return self._get_info(orcid_id, self._get_public_info, request_type,
put_code)
def search_public(self, query, method="lucene", start=None, rows=None,
search_field="orcid-bio"):
"""Search the ORCID database.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param start: string
Index of the first record requested. Use for pagination.
:param rows: string
Number of records requested. Use for pagination.
:param search_field: string
Scope used for seaching. The default one allows to search
everywhere.
Returns
-------
:returns: dict
Search result with error description available. The results can
be obtained by accessing keys 'orcid-search-results' and
then 'orcid-search-result'. To get the number of all results,
access the key 'orcid-search-results' and then 'num-found'.
"""
headers = {'Accept': 'application/orcid+json'}
return self._search(query, method, start, rows, search_field,
headers, self._endpoint_public)
def search_public_generator(self, query, method="lucene",
search_field="orcid-bio", pagination=10):
"""Search the ORCID database with a generator.
The generator will yield every result.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param search_field: string
Scope used for seaching. The default one allows to search
everywhere.
:param pagination: integer
How many papers should be fetched with ine request.
Yields
-------
:yields: dict
Single profile from the search results.
"""
headers = {'Accept': 'application/orcid+json'}
index = 0
while True:
paginated_result = self._search(query, method, index, pagination,
search_field, headers,
self._endpoint_public)
if not paginated_result['orcid-search-results'][
'orcid-search-result']:
return
for result in paginated_result['orcid-search-results'][
'orcid-search-result']:
yield result
index += pagination
def _get_info(self, orcid_id, function, request_type, put_code=None):
if request_type != "activities" and not put_code:
raise ValueError("""In order to fetch specific record,
please specify the 'put_code' argument.""")
elif request_type == "activities" and put_code:
raise ValueError("""In order to fetch activities summary, the 'id'
argument is redundant.""")
response = function(orcid_id, request_type, put_code)
response.raise_for_status()
return response.json()
def _get_public_info(self, orcid_id, request_type, put_code):
request_url = '%s/%s/%s' % (self._endpoint_public + VERSION,
orcid_id, request_type)
if put_code:
request_url += '/%s' % put_code
headers = {'Accept': 'application/orcid+json'}
return requests.get(request_url, headers=headers)
def _search(self, query, method, start, rows, search_field, headers,
endpoint):
url = endpoint + SEARCH_VERSION + "/search/" + \
search_field + "/?defType=" + method + "&q=" + query
if start:
url += "&start=%s" % start
if rows:
url += "&rows=%s" % rows
response = requests.get(url, headers=headers)
response.raise_for_status()
return response.json()
class MemberAPI(PublicAPI):
"""Member API."""
def __init__(self, institution_key, institution_secret, sandbox=False):
"""Initialize member API.
Parameters
----------
:param sandbox: boolean
Should the sandbox be used. False (default) indicates production
mode.
"""
self._key = institution_key
self._secret = institution_secret
if sandbox:
self._endpoint_member = "https://api.sandbox.orcid.org"
self._auth_url = 'https://sandbox.orcid.org/signin/auth.json'
self._authorize_url = \
'https://sandbox.orcid.org/oauth/custom/authorize.json'
self._login_or_register_endpoint = \
"https://sandbox.orcid.org/oauth/authorize"
self._token_url = "https://api.sandbox.orcid.org/oauth/token"
else:
self._endpoint_member = "https://api.orcid.org"
self._auth_url = 'https://orcid.org/signin/auth.json'
self._authorize_url = \
'https://orcid.org/oauth/custom/authorize.json'
self._login_or_register_endpoint = \
"https://orcid.org/oauth/authorize"
self._token_url = "https://api.orcid.org/oauth/token"
PublicAPI.__init__(self, sandbox)
def add_record(self, orcid_id, token, request_type, data):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict
The record in Python-friendly format. Required if xml is not
provided.
Returns
-------
:returns: string
Put-code of the new work.
"""
return self._update_activities(orcid_id, token, requests.post,
request_type, data)
def get_user_orcid(self, user_id, password, redirect_uri):
"""Get the user orcid from authentication process.
Parameters
----------
:param user_id: string
The id of the user used for authentication.
:param password: string
The user password.
:param redirect_uri: string
The redirect uri of the institution.
Returns
-------
:returns: string
The orcid.
"""
session = requests.session()
response = self._authenticate(user_id, password, redirect_uri, session,
'/authenticate')
return response['orcid']
def get_token(self, user_id, password, redirect_uri):
"""Get the token for updating the records.
Parameters
----------
:param user_id: string
The id of the user used for authentication.
:param password: string
The user password.
:param redirect_uri: string
The redirect uri of the institution.
Returns
-------
:returns: string
The token.
"""
session = requests.session()
response = self._authenticate(user_id, password, redirect_uri, session,
'/activities/update')
return response['access_token']
def get_token_from_authorization_code(self, authorization_code,
redirect_uri):
"""Like `get_token`, but using an OAuth 2 authorization code.
Use this method if you run a webserver that serves as an endpoint for
the redirect URI. The webserver can retrieve the authorization code
from the URL that is requested by ORCID.
Parameters
----------
:param redirect_uri: string
The redirect uri of the institution.
:param authorization_code: string
The authorization code.
Returns
-------
:returns: dict
All data of the access token. The access token itself is in the
``"access_token"`` key.
"""
session = requests.session()
token_dict = {
"client_id": self._key,
"client_secret": self._secret,
"grant_type": "authorization_code",
"code": authorization_code,
"redirect_uri": redirect_uri,
}
response = session.post(self._token_url, data=token_dict,
headers={'Accept': 'application/json'})
response.raise_for_status()
return json.loads(response.text)
def read_record_member(self, orcid_id, request_type, put_code=None):
"""Get the member info about the researcher.
Parameters
----------
:param orcid_id: string
Id of the queried author.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param response_format: string
One of json, xml.
:param put_code: string
The id of the queried work. Must be given if 'request_type' is not
'activities'.
Returns
-------
:returns: dictionary
Records.
"""
return self._get_info(orcid_id, self._get_member_info, request_type,
put_code)
def remove_record(self, orcid_id, token, request_type, put_code):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param put_code: string
The id of the record. Can be retrieved using read_record_* method.
In the result of it, it will be called 'put-code'.
"""
self._update_activities(orcid_id, token, requests.delete, request_type,
put_code=put_code)
def search_member(self, query, method="lucene", start=None, rows=None,
search_field="orcid-bio"):
"""Search the ORCID database.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param start: string
Index of the first record requested. Use for pagination.
:param rows: string
Number of records requested. Use for pagination.
:search_field: string
Scope used for seaching. The default one allows to search
everywhere.
Returns
-------
:returns: dict
Search result with error description available. The results can
be obtained by accessing keys 'orcid-search-results' and
then 'orcid-search-result'. To get the number of all results,
access the key 'orcid-search-results' and then 'num-found'.
"""
access_token = self. \
_get_access_token_from_orcid('/read-public')
headers = {'Accept': 'application/orcid+json',
'Authorization': 'Bearer %s' % access_token}
return self._search(query, method, start, rows, search_field, headers,
self._endpoint_member)
def search_member_generator(self, query, method="lucene",
search_field="orcid-bio", pagination=10):
"""Search the ORCID database with a generator.
The generator will yield every result.
Parameters
----------
:param query: string
Query in line with the chosen method.
:param method: string
One of 'lucene', 'edismax', 'dismax'
:param search_field: string
Scope used for seaching. The default one allows to search
everywhere.
:param pagination: integer
How many papers should be fetched with ine request.
"""
access_token = self. \
_get_access_token_from_orcid('/read-public')
headers = {'Accept': 'application/orcid+json',
'Authorization': 'Bearer %s' % access_token}
index = 0
while True:
paginated_result = self._search(query, method, index, pagination,
search_field, headers,
self._endpoint_member)
if not paginated_result['orcid-search-results'][
'orcid-search-result']:
return
for result in paginated_result['orcid-search-results'][
'orcid-search-result']:
yield result
index += pagination
def update_record(self, orcid_id, token, request_type, data, put_code):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param data: dict
The record in Python-friendly format. Required if xml is not
provided.
:param put_code: string
The id of the record. Can be retrieved using read_record_* method.
In the result of it, it will be called 'put-code'.
"""
self._update_activities(orcid_id, token, requests.put, request_type,
data, put_code)
def get_login_url(self, scope, redirect_uri, state=None,
family_names=None, given_names=None, email=None,
lang=None, show_login=None):
"""Return a URL for a user to login/register with ORCID.
Parameters
----------
:param scope: string or iterable of strings
The scope(s) of the authorization request.
:param redirect_uri: string
The URI to which the user's browser should be redirected after the
login.
:param state: string
An arbitrary token to prevent CSRF. See the OAuth 2 docs for
details.
:param family_names: string
The user's family name, used to fill the registration form.
:param given_names: string
The user's given name, used to fill the registration form.
:param email: string
The user's email address, used to fill the sign-in or registration
form.
:param lang: string
The language in which to display the authorization page.
:param show_login: bool
Determines whether the log-in or registration form will be shown by
default.
Returns
-------
:returns: string
The URL ready to be offered as a link to the user.
"""
if not isinstance(scope, string_types):
scope = " ".join(sorted(set(scope)))
data = [("client_id", self._key), ("scope", scope),
("response_type", "code"), ("redirect_uri", redirect_uri)]
if state:
data.append(("state", state))
if family_names:
data.append(("family_names", family_names))
if given_names:
data.append(("given_names", given_names))
if email:
data.append(("email", email))
if lang:
data.append(("lang", lang))
if show_login is not None:
data.append(("show_login", "true" if show_login else "false"))
return self._login_or_register_endpoint + "?" + urlencode(data)
def _authenticate(self, user_id, password, redirect_uri, session, scope):
response = session.post(self._auth_url,
data={'userId': user_id, 'password': password})
response.raise_for_status()
response = session.get('https://sandbox.orcid.org/oauth/' +
'authorize?client_id=' + self._key +
'&response_type=code&scope=' + scope +
'&redirect_uri=' + redirect_uri)
response.raise_for_status()
session.close()
json_dict = {
"clientId": self._key,
"redirectUri": redirect_uri,
"scope": scope,
"responseType": "code",
"approved": "true",
"persistentTokenEnabled": "true"
}
headers = {
'Accept': 'text/plain',
'Content-Type': 'application/json;charset=UTF-8'
}
response = session.post(self._authorize_url,
data=json.dumps(json_dict),
headers=headers
)
response.raise_for_status()
session.close()
uri = json.loads(response.text)['redirectUri']['value']
authorization_code = uri[uri.rfind('=') + 1:]
return self.get_token_from_authorization_code(redirect_uri,
authorization_code)
def _get_access_token_from_orcid(self, scope):
payload = {'client_id': self._key,
'client_secret': self._secret,
'scope': scope,
'grant_type': 'client_credentials'
}
url = "%s/oauth/token" % self._endpoint_member
headers = {'Accept': 'application/json'}
response = requests.post(url, data=payload, headers=headers)
response.raise_for_status()
return response.json()['access_token']
def _get_member_info(self, orcid_id, request_type, put_code):
access_token = self. \
_get_access_token_from_orcid('/activities/read-limited')
request_url = '%s/%s/%s' % (self._endpoint_member + VERSION,
orcid_id, request_type)
if put_code:
request_url += '/%s' % put_code
headers = {'Accept': 'application/orcid+json',
'Authorization': 'Bearer %s' % access_token}
return requests.get(request_url, headers=headers)
def _update_activities(self, orcid_id, token, method, request_type,
data=None, put_code=None):
url = "%s/%s/%s" % (self._endpoint_member + VERSION, orcid_id,
request_type)
if put_code:
url += ('/%s' % put_code)
if data:
data['put-code'] = put_code
headers = {'Accept': 'application/orcid+json',
'Content-Type': 'application/orcid+json',
'Authorization': 'Bearer ' + token}
if method == requests.delete:
response = method(url, headers=headers)
else:
xml = json.dumps(data)
response = method(url, xml, headers=headers)
response.raise_for_status()
if 'location' in response.headers:
# Return the new put-code
return response.headers['location'].split('/')[-1]
|
|
from mpl_toolkits.basemap import Basemap, cm
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm
from matplotlib.colors import Normalize
from osgeo import gdalconst
from PIL import Image, ImageFont, ImageDraw
import numpy as np
import matplotlib.pyplot as plt
import gdal
import sys
#write_data=True
#lat1=-75.5
#lon1=124.
#lat2=-75.
#lon2=121.
lon1_bm2=-135
lat1_bm2=-48.458667
lon2_bm2=45
lat2_bm2=-48.458667
##
plot_era40=False
plot_spwd=False
plot_cry=False
#Setting RadarLines directory
RLDir=sys.argv[1]
if RLDir[-1]!='/':
RLDir=RLDir+'/'
execfile(RLDir+'parameters-Maps.py')
#Reading isochrones' ages
readarray=np.loadtxt(RLDir+'ages.txt')
iso_age=np.concatenate((np.array([0]),readarray[:,0]))
#Running model for each radar line
if run_model:
for i,RLlabel in enumerate(list_RL+list_RL_extra):
directory=RLDir+RLlabel
sys.argv=['AgeModel.py',directory]
execfile('AgeModel.py')
plt.close("all")
#Reading data for each radar line
for i,RLlabel in enumerate(list_RL):
directory=RLDir+RLlabel
accu_array1=np.loadtxt(directory+'/a.txt')
botage_array1=np.loadtxt(directory+'/agebottom.txt')
m_array1=np.loadtxt(directory+'/m.txt')
G0_array1=np.loadtxt(directory+'/G0.txt')
pprime_array1=np.loadtxt(directory+'/pprime.txt')
hor_array1=np.loadtxt(directory+'/agehorizons.txt')
if i==0:
accu_array=accu_array1
botage_array=botage_array1
m_array=m_array1
G0_array=G0_array1
pprime_array=pprime_array1
hor_array=hor_array1
else:
accu_array=np.concatenate((accu_array,accu_array1))
botage_array=np.concatenate((botage_array,botage_array1))
#save individual lines to highlight in maps
if directory==RLDir+'VCD_JKB2g_DVD01a/':
dvd01a_array=np.loadtxt(directory+'/agebottom.txt')
if directory==RLDir+'OIA_JKB2n_Y77a/':
y77a_array=np.loadtxt(directory+'/agebottom.txt')
m_array=np.concatenate((m_array,m_array1))
G0_array=np.concatenate((G0_array,G0_array1))
pprime_array=np.concatenate((pprime_array,pprime_array1))
hor_array=np.concatenate((hor_array,hor_array1))
#Reading data for extra radar lines
for i,RLlabel in enumerate(list_RL_extra):
directory=RLDir+RLlabel
botage_array1=np.loadtxt(directory+'/agebottom.txt')
m_array1=np.loadtxt(directory+'/m.txt')
G0_array1=np.loadtxt(directory+'/G0.txt')
pprime_array1=np.loadtxt(directory+'/pprime.txt')
botage_array=np.concatenate((botage_array,botage_array1))
m_array=np.concatenate((m_array,m_array1))
G0_array=np.concatenate((G0_array,G0_array1))
pprime_array=np.concatenate((pprime_array,pprime_array1))
#Importing tif files
def readRasterBandAsArray(filename, bandnum):
raster = gdal.Open(filename, gdalconst.GA_ReadOnly)
rasterBand = raster.GetRasterBand(bandnum)
rasterBandArray = rasterBand.ReadAsArray(0, 0, raster.RasterXSize, raster.RasterYSize).astype(np.float)
rasterBandNoDataValue = rasterBand.GetNoDataValue()
if rasterBandNoDataValue is not None:
rasterBandArray[rasterBandArray == rasterBandNoDataValue] = np.nan
return rasterBandArray
#
#list_maps=['accu-steady']
list_maps=['bare-bed','radar-lines','melting','melting-sigma','Height-Above-Bed-0.8Myr','Height-Above-Bed-1Myr','Height-Above-Bed-1.2Myr','Height-Above-Bed-1.5Myr','bottom-age','min-bottom-age','age-100m','age-150m','age-200m','age-250m','resolution-1Myr','resolution-1.2Myr','resolution-1.5Myr', 'geothermal-heat-flux','geothermal-heat-flux-sigma','pprime','pprime-sigma','accu-sigma','accu-steady']
list_length=len(list_maps)
for i in range(nbiso):
list_maps.append('accu-layer'+ "%02i"%(i+1) +'_'+str(int(iso_age[i]/1000.))+'-'+str(int(iso_age[i+1]/1000.))+'kyr' )
for i in range(nbhor):
list_maps.append('age-hor'+"%02i"%(i+1))
for i,MapLabel in enumerate(list_maps):
print MapLabel+' map'
fig=plt.figure(MapLabel,figsize=(21/2.54,21/2.54))
plt.title(MapLabel, y=1.05)
# map0 = Basemap(projection='spstere', lat_ts=-71, boundinglat=-59.996849, lon_0=180, rsphere=(6378137.00,6356752.3142))
map0 = Basemap(projection='stere', lat_ts=-71, lat_0=-90, lon_0=180, llcrnrlon=lon1_bm2,llcrnrlat=lat1_bm2, urcrnrlon=lon2_bm2,urcrnrlat=lat2_bm2, rsphere=(6378137.00,6356752.3142))
# lon,lat=map0(-3333500, 0, inverse=True)
# print lat
# print map0(0,-60.)
# map0 = Basemap(projection='spstere', lat_ts=-71, boundinglat=lat, lon_0=180, rsphere=(6378137.00,6356752.3142))
# urcrnrlon,urcrnrlat=map0(6667000, 6667000, inverse=True)
# print llcrnrlon,llcrnrlat,urcrnrlon,urcrnrlat
# map0 = Basemap(projection='stere', lat_ts=-71, lat_0=-90, lon_0=180, llcrnrlat=llcrnrlat, llcrnrlon=llcrnrlon, urcrnrlat=urcrnrlat, urcrnrlon=urcrnrlon, rsphere=(6378137.00,6356752.3142))
map1 = Basemap(projection='stere', lat_ts=-71, lat_0=-90, lon_0=180, llcrnrlat=lat1, llcrnrlon=lon1, urcrnrlat=lat2, urcrnrlon=lon2, rsphere=(6378137.00,6356752.3142))
#map1 = Basemap(projection='spstere', boundinglat=-60, lon_0=180, llcrnrx=-4.5e6, llcrnry=-2.3e6, urcrnrx=-5e6, urcrnry=-2.8e6)
#m = Basemap(projection='stere', lat_0=-75, lon_0=123., width=1e6, height=1e6)
#m.drawcoastlines()
#m.fillcontinents(color='white',lake_color='aqua')
#m.drawmapboundary(fill_color='aqua')
map1.drawparallels(np.arange(-90.,81.,1.), labels=[True, False, False, True], dashes=[1, 5], color='0.5')
map1.drawmeridians(np.arange(-180.,180.,2.), latmax=85., labels=[False, True, True, False], dashes=[1, 5], color='0.5')
map1.drawmapscale(lon1-1.2, lat1+0.2, lon1, lat1, 50, yoffset=10., barstyle='simple')
ax = plt.axes()
##Draw bed topography
#raster = gdal.Open('bedmap2/bedmap2_bed.txt')
#band = raster.GetRasterBand(1)
#array = band.ReadAsArray()
#array=np.where(array==-9999,np.nan,array)
#map1.imshow(array[::-1,:])
#map1.colorbar()
##Draw bedmap2 surface contours
raster2 = gdal.Open(RLDir+'bedmap2/bedmap2_surface.txt')
band2 = raster2.GetRasterBand(1)
array2 = band2.ReadAsArray()
array2=np.where(array2==-9999,np.nan,array2)
zz=array2[::-1,:]
x = np.linspace(0, map0.urcrnrx, array2.shape[1])
y = np.linspace(0, map0.urcrnry, array2.shape[0])
# print map0.urcrnrx,map0.urcrnry
# x = np.linspace(0, -6667000, array2.shape[1])
# y = np.linspace(0, -6667000, array2.shape[0])
x1,y1=map0(lon1,lat1)
x2,y2=map0(lon2,lat2)
x=x-x1
y=y-y1
xx, yy = np.meshgrid(x, y)
if MapLabel[:4]<>'accu' and MapLabel<>'bare-bed':
levels=np.concatenate(( np.arange(3150, 3260, 10),np.arange(3260,3270, 2) ))
else:
levels=np.concatenate(( np.arange(3150, 3240, 4),np.arange(3240,3270, 2) ))
cs=map1.contour(xx,yy, zz, colors='red', levels=levels, alpha=0.7) #color=0.5, new=0.6, a=0.25 or 0.7
# plt.clabel(cs, inline=1, fontsize=10,fmt='%1.0f')
##Draw cryosat surface elevations
surf_cryosat=readRasterBandAsArray(RLDir+'bedmap2/cryosat-dem_clipped.tif',1)
zz=surf_cryosat[::-1,::-1]
latmax=-75.391976
latmin=-74.026265
lonmax=132.993563
lonmin=115.141755
hmin,vmin=map1(lonmin,latmin)
hmax,vmax=map1(lonmax,latmax)
extent=(hmin, hmax, vmin, vmax)
x = np.linspace(hmin, hmax, surf_cryosat.shape[1])
y = np.linspace(vmin, vmax, surf_cryosat.shape[0])
xx, yy = np.meshgrid(x, y)
# plt.imshow(surf_cryosat, origin='upper', cmap=cmocean.cm.gray, extent=extent, norm=Normalize(vmin=3050, vmax=3300)) #'terrain', 0.4, -1000, 900
# cb1=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0)
# cb1.set_label('Cryosat-2 surface elevation (m)')
if MapLabel[:4]<>'accu' and MapLabel<>'bare-bed':
levels=np.concatenate(( np.arange(3150, 3260, 10),np.arange(3260,3270, 2) ))
else:
levels=np.concatenate(( np.arange(3150, 3260, 4),np.arange(3260,3270, 2) ))
cs=map1.contour(xx,yy,zz[::-1,:], origin='upper', colors='0.2',levels=levels, alpha=0.4)
# plt.clabel(cs, inline=1, fontsize=10)
# import pdb; pdb.set_trace()
# cs2=map1.contour(xx,yy,zz,levels[1:10],linewidths=2)
##Draw bedrock map and contours
raster2 = gdal.Open(RLDir+'bedmap2/bedmap2_bed.txt')
band2 = raster2.GetRasterBand(1)
array2 = band2.ReadAsArray()
array2=np.where(array2==-9999,np.nan,array2)
zz=array2[::-1,:]
x = np.linspace(0, map0.urcrnrx, array2.shape[1])
y = np.linspace(0, map0.urcrnry, array2.shape[0])
x1,y1=map0(lon1,lat1)
x2,y2=map0(lon2,lat2)
x=x-x1
y=y-y1
xx, yy = np.meshgrid(x, y)
levels=np.arange(-1000., 900., 100.)
plt.imshow(zz[::-1,:], extent=[max(x),min(x),max(y),min(y)], cmap='terrain', norm=Normalize(vmin=-700, vmax=600), alpha=0.4) #'terrain', 0.4, -1000, 900
# levels=np.arange(-900, 900, 50)
# cs=map1.contour(xx,yy,zz, colors='0.1', levels=levels, alpha=0.4) #color=0.5, a=0.25
# x1_bm2,y1_bm2=map1(lon1_bm2,lat1_bm2)
# x2_bm2,y2_bm2=map1(lon2_bm2,lat2_bm2)
# print 'x1_bm2: ', x1_bm2
# print 'x2_bm2: ', x2_bm2
# x1=map1.llcrnrx
# print 'x1: ',x1
# y1=map1.llcrnry
# x2=map1.urcrnrx
# y2=map1.urcrnry
# plt.imshow(zz, cmap='terrain', extent=[x1_bm2,x2_bm2,y1_bm2,y2_bm2])
# map1.llcrnrx=x1
# map1.llcrnry=y1
# map1.urcrnrx=x2
# map1.urcrnry=y2
# from matplotlib.colors import LightSource
# ls = LightSource(azdeg = 90, altdeg = 20)
# rgb = ls.shade(zz, plt.cm.terrain)
# im = map1.imshow(rgb, cmap='terrain', alpha=0.25)
# cs=map1.contour(xx,yy, zz, colors='m', levels=levels, alpha=0.25)
# plt.clabel(cs, inline=1, fontsize=10,fmt='%1.0f')
###Draw OIA refined bedrock
# Bed_BlobA_Geoid4=readRasterBandAsArray(RLDir+'bedmap2/Bed_BlobA_Geoid4.tif',1)
## hmin=1298450.
## hmax=1391550.
## vmin=-840950.
## vmax=-888950.
## lonmin,latmin=map0(hmin,vmin, inverse=True)
## lonmax,latmax=map0(hmax,vmax, inverse=True)
# latmax=-75.1164861
# latmin=-75.5905194
# lonmax=121.1456639
# lonmin=124.3964778
# hmin,vmin=map1(lonmin,latmin)
# hmax,vmax=map1(lonmax,latmax)
# extent=(hmin, hmax, vmin, vmax)
# plt.imshow(2000*np.ones(np.shape(Bed_BlobA_Geoid4)), cmap='terrain', extent=extent,norm=Normalize(vmin=-1000, vmax=900))
# plt.imshow(Bed_BlobA_Geoid4, origin='upper', cmap='terrain', extent=extent,norm=Normalize(vmin=-1000, vmax=900), alpha=0.4) #terrain, 0.4, -1000, 900
## levels=np.arange(-900, 900, 50)
## cs=map1.contour(xx,yy,zz, colors='0.1',levels=levels, alpha=0.4)
##Draw compiled radar refined bedrock
# bed_compiled_Duncan=readRasterBandAsArray(RLDir+'bedmap2/compiled_oia_bed_Duncan.tif',1)
# # hmin=1298450.
# # hmax=1391550.
# # vmin=-840950.
# # vmax=-888950.
# # lonmin,latmin=map0(hmin,vmin, inverse=True)
# # lonmax,latmax=map0(hmax,vmax, inverse=True)
# latmax=-74.842827
# latmin=-75.623262
# lonmax=118.874831
# lonmin=127.247618
# hmin,vmin=map1(lonmin,latmin)
# hmax,vmax=map1(lonmax,latmax)
# extent=(hmin, hmax, vmin, vmax)
# bed_compiled_Duncan_val = np.ma.array(bed_compiled_Duncan, mask=np.isnan(bed_compiled_Duncan))
# plt.imshow(2000+0*(np.empty_like(bed_compiled_Duncan_val)), cmap='terrain',extent=extent,norm=Normalize(vmin=-700,vmax=600))
# plt.imshow(bed_compiled_Duncan_val, origin='upper', cmap='terrain', extent=extent,norm=Normalize(vmin=-700, vmax=600), alpha=0.4) #'terrain', 0.4, -1000, 900
# # levels=np.arange(-900, 900, 50)
# # cs=map1.contour(xx,yy,zz, colors='0.1',levels=levels, alpha=0.4)
if plot_era40==False and plot_spwd==False:
# Plot box around the refined bed
xborders=np.array([hmin,hmax,hmax,hmin,hmin])
yborders=np.array([vmin,vmin,vmax,vmax,vmin])
plt.plot(xborders,yborders,color='k',linestyle='dashed',alpha=0.4)
# Draw color bar
cb0=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0, alpha=0.05)
cb0.set_label('Bedrock elevation (m)')
#Draw continent's contour
#raster3 = gdal.Open('bedmap2/bedmap2_icemask_grounded_and_shelves.txt')
#band3 = raster3.GetRasterBand(1)
#array3 = band3.ReadAsArray()
#x = np.linspace(0, map1.urcrnrx, array3.shape[1])
#y = np.linspace(0, map1.urcrnry, array3.shape[0])
#xx, yy = np.meshgrid(x, y)
#map1.contour(xx,yy, array3[::-1,:], colors='k')
if plot_era40==True:
#Draw ERA40 detrended present accu
ERA40=readRasterBandAsArray(RLDir+'bedmap2/accu-ERA40mixed_Cat5.tif',1)
latmax=-75.3335278
latmin=-74.8885111
lonmax=126.3942028
lonmin=119.9579389
hmin,vmin=map1(lonmin,latmin)
hmax,vmax=map1(lonmax,latmax)
extent=(hmax, hmin, vmin, vmax)
norm = Normalize(vmin=21.,vmax=41.)
# norm = Normalize(vmin=10.,vmax=30.)
# plt.imshow(20*np.ones(np.shape(ERA40)), cmap='seismic_r', extent=extent,norm=Normalize(vmin=-1000, vmax=900))
plt.imshow(ERA40, origin='lower', cmap=cmocean.cm.thermal, norm=norm, extent=extent, alpha=1) #'seismic_r'
# map1.scatter(x,y, c=accu*1000*0.917, marker='o', lw=4, edgecolor='', s=4, norm=norm, cmap='seismic_r')
xborders=np.array([hmin,hmax,hmax,hmin,hmin])
yborders=np.array([vmin,vmin,vmax,vmax,vmin])
# plt.plot(xborders,yborders,color='k',linestyle='dashed',alpha=0.4)
cb1=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0)
cb1.set_label('ERA40 accumulation rate (mm-we/yr)')
if plot_spwd==True:
#Draw ERA40 detrended present accu
SPWD=readRasterBandAsArray(RLDir+'bedmap2/spwd_dc_Max.tif',1)
# hmin=972360.329
# hmax=1747360.329
# vmin=-1299430.751
# vmax=-515430.751
# lonmin,latmin=map0(hmin,vmin, inverse=True)
# lonmax,latmax=map0(hmax,vmax, inverse=True)
latmax=-75.1432556
latmin=-73.3463278
lonmax=143.192594
lonmin=106.434836
hmin,vmin=map1(lonmin,latmin)
hmax,vmax=map1(lonmax,latmax)
extent=(hmax, hmin, vmin, vmax)
norm = Normalize(vmin=-1.5,vmax=1.5)
# plt.imshow(20*np.ones(np.shape(SPWD)), cmap=cmocean.cm.gray, extent=extent, norm=norm)
plt.imshow(SPWD, origin='lower', cmap=cmocean.cm.balance, extent=extent, norm=norm, alpha=1) #cmocean.cm.dense_r
# map1.scatter(x,y, c=accu*1000*0.917, marker='o', lw=4, edgecolor='', s=4, norm=norm, cmap='seismic_r')
xborders=np.array([hmin,hmax,hmax,hmin,hmin])
yborders=np.array([vmin,vmin,vmax,vmax,vmin])
# plt.plot(xborders,yborders,color='k',linestyle='dashed',alpha=0.4)
cb1=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0)
cb1.set_label('SPWD (m/km)')
#Draw Bedmap2 surface curvature y (Cat dataset)
# cry=readRasterBandAsArray(RLDir+'bedmap2/curvature_bm2_cry_Cat3.tif',1)
# latmax=-75.9745299
# latmin=-74.2942778
# lonmax=119.9816394
# lonmin=126.3691982
# hmin,vmin=map1(lonmin,latmin)
# hmax,vmax=map1(lonmax,latmax)
# extent=(hmax, hmin, vmin, vmax)
## norm = Normalize(vmin=21.,vmax=41.)
# # norm = Normalize(vmin=10.,vmax=30.)
# # plt.imshow(20*np.ones(np.shape(ERA40)), cmap='seismic_r', extent=extent,norm=Normalize(vmin=-1000, vmax=900))
# plt.imshow(cry, origin='lower', cmap='Greys', extent=extent, alpha=1)
# # map1.scatter(x,y, c=accu*1000*0.917, marker='o', lw=4, edgecolor='', s=4, norm=norm, cmap='seismic_r')
# xborders=np.array([hmin,hmax,hmax,hmin,hmin])
# yborders=np.array([vmin,vmin,vmax,vmax,vmin])
# # plt.plot(xborders,yborders,color='k',linestyle='dashed',alpha=0.4)
# cb1=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0)
# cb1.set_label('Curvature in y')
levels='auto'
if MapLabel=='bare-bed':
if plot_era40==True:
cblabel='Bedrock elevation (m)'
if plot_spwd==True:
cblabel='SPWD (m/km)'
else:
cblabel='Bedrock elevation (km)'
if MapLabel=='radar-lines':
LON=botage_array[:,0]
LAT=botage_array[:,1]
x,y=map1(LON,LAT)
map1.scatter(x,y, c='b', marker='o', lw=0., edgecolor='', s=dotsize)
#highlight lines of interest
LON=dvd01a_array[:,0]
LAT=dvd01a_array[:,1]
x,y=map1(LON,LAT)
highlight=['#d05048']
map1.scatter(x,y, c='r', marker='o',lw=0., edgecolor='', s=7)
LON=y77a_array[:,0]
LAT=y77a_array[:,1]
x,y=map1(LON,LAT)
map1.scatter(x,y, c='r', marker='o',lw=0., edgecolor='', s=7)
#Add text for paper
ax2 = plt.axes()
ax2.text(0.485,0.38,'A', color='red', fontweight='bold', transform=ax2.transAxes)
ax2.text(0.48,0.96,"A'", color='red', fontweight='bold', transform=ax2.transAxes)
#bbox=dict(facecolor='white',edgecolor='red',alpha=0.6)
ax2.text(0.19,0.57,"B", color='red', fontweight='bold', transform=ax2.transAxes)
ax2.text(0.67,0.63,"B'", color='red', fontweight='bold', transform=ax2.transAxes)
ax2.text(0.35,0.28,'Ridge',color='black',fontweight='normal',transform=ax2.transAxes,rotation=-32)
ax2.text(0.35,0.37,'Concordia Subglacial Trench',color='black',fontweight='normal',transform=ax2.transAxes,rotation=-32)
# ax2.text(0.15,0.67,'LDC',color='black',fontweight='normal',transform=ax2.transAxes,backgroundcolor='white')
if MapLabel=='bottom-age':
LON=botage_array[:,0]
LAT=botage_array[:,1]
botage=botage_array[:,4]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=botage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Bottom age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
# output=np.transpose(np.vstack((LON,LAT,botage)))
# with open(RLDir+'agebottom.txt','w') as f:
# f.write('#LON\tLAT\tbottom age (yr)\n')
# np.savetxt(f,output, delimiter="\t")
if MapLabel=='min-bottom-age':
LON=botage_array[:,0]
LAT=botage_array[:,1]
minbotage=botage_array[:,5]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=minbotage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Minimum bottom age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
# output=np.transpose(np.vstack((LON,LAT,minbotage)))
# with open(RLDir+'minagebottom.txt','w') as f:
# f.write('#LON\tLAT\tmin bottom age (yr)\n')
# np.savetxt(f,output, delimiter="\t")
if MapLabel=='age-100m':
LON=botage_array[:,0]
LAT=botage_array[:,1]
botage=botage_array[:,6]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=botage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
if MapLabel=='age-150m':
LON=botage_array[:,0]
LAT=botage_array[:,1]
botage=botage_array[:,7]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=botage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
if MapLabel=='age-200m':
LON=botage_array[:,0]
LAT=botage_array[:,1]
botage=botage_array[:,8]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=botage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
if MapLabel=='age-250m':
LON=botage_array[:,0]
LAT=botage_array[:,1]
botage=botage_array[:,9]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=0.7,vmax=5.)
map1.scatter(x,y, c=botage/1e6, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Age (Myr)'
levels=np.array([0.7, 0.8, 0.9, 1.0, 1.2, 1.4, 1.6, 2, 2.5, 3, 3.5, 4, 5])
if MapLabel=='resolution-1Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
resolution=botage_array[:,10]
x,y=map1(LON,LAT)
norm = LogNorm(vmin=1.,vmax=20.)
map1.scatter(x,y, c=resolution/1e3, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Resolution at 1Myr (kyr/m)'
levels=np.array([1., 2., 4., 6., 8., 10., 20., 40.])
# output=np.transpose(np.vstack((LON,LAT,resolution/1e3)))
# with open(RLDir+'resolution1Myr.txt','w') as f:
# f.write('#LON\tLAT\tresolution (kyr/m)\n')
# np.savetxt(f,output, delimiter="\t")
if MapLabel=='resolution-1.2Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
resolution=botage_array[:,11]
x,y=map1(LON,LAT)
map1.scatter(x,y, c=resolution/1e3, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Resolution at 1.2Myr (kyr/m)'
levels=np.array([1., 2., 4., 6., 8., 10., 20., 40.])
# output=np.transpose(np.vstack((LON,LAT,resolution/1e3)))
# with open(RLDir+'resolution1.2Myr.txt','w') as f:
# f.write('#LON\tLAT\tresolution (kyr/m)\n')
# np.savetxt(f,output, delimiter="\t")
if MapLabel=='resolution-1.5Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
resolution=botage_array[:,12]
x,y=map1(LON,LAT)
map1.scatter(x,y, c=resolution/1e3, marker='o', lw=0., edgecolor='', norm = norm, s=dotsize)
cblabel='Resolution at 1.5Myr (kyr/m)'
levels=np.array([1., 2., 4., 6., 8., 10., 20., 40.])
# output=np.transpose(np.vstack((LON,LAT,resolution/1e3)))
# with open(RLDir+'resolution1.5Myr.txt','w') as f:
# f.write('#LON\tLAT\tresolution (kyr/m)\n')
# np.savetxt(f,output, delimiter="\t")
if MapLabel=='Height-Above-Bed-0.8Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
height=botage_array[:,14]
x,y=map1(LON,LAT)
res=map1.scatter(x,y, c=height, marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='Height above bed (m)'
if MapLabel=='Height-Above-Bed-1Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
height=botage_array[:,15]
x,y=map1(LON,LAT)
res=map1.scatter(x,y, c=height, marker='o', lw=0., edgecolor='', s=dotsize)
cb.set_label='Height above bed (m)'
if MapLabel=='Height-Above-Bed-1.2Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
height=botage_array[:,16]
x,y=map1(LON,LAT)
res=map1.scatter(x,y, c=height, marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='Height above bed (m)'
if MapLabel=='Height-Above-Bed-1.5Myr':
LON=botage_array[:,0]
LAT=botage_array[:,1]
height=botage_array[:,17]
x,y=map1(LON,LAT)
res=map1.scatter(x,y, c=height, marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='Height above bed (m)'
# levels=np.array([1., 2., 4., 6., 8., 10., 20., 40.])
# cb.set_ticks(levels)
# cb.set_ticklabels(levels)
elif MapLabel=='melting':
LON=m_array[:,0]
LAT=m_array[:,1]
melting=m_array[:,3]
x,y=map1(LON,LAT)
norm = Normalize(vmin=0.,vmax=5.)
map1.scatter(x,y, c=melting*1e3, marker='o', lw=0., edgecolor='', s=dotsize, norm=norm)
cblabel='Melting (mm/yr)'
# output=np.transpose(np.vstack((LON,LAT,melting*1e3)))
# with open(RLDir+'m.txt','w') as f:
# f.write('#LON\tLAT\tmelting (mm/yr)\n')
# np.savetxt(f,output, delimiter="\t")
elif MapLabel=='melting-sigma':
LON=m_array[:,0]
LAT=m_array[:,1]
sigma_melting=m_array[:,4]
x,y=map1(LON,LAT)
norm = Normalize(vmin=0.,vmax=1.)
map1.scatter(x,y, c=sigma_melting*1e3, marker='o', lw=0., edgecolor='', s=dotsize, norm=norm)
cblabel='$\sigma$ Melting (mm/yr)'
elif MapLabel=='geothermal-heat-flux':
LON=G0_array[:,0]
LAT=G0_array[:,1]
G0=G0_array[:,3]
x,y=map1(LON,LAT)
map1.scatter(x,y, c=G0*1e3, marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='G0 (mW/m$^2$)'
elif MapLabel=='geothermal-heat-flux-sigma':
LON=G0_array[:,0]
LAT=G0_array[:,1]
sigma_G0=G0_array[:,4]
x,y=map1(LON,LAT)
map1.scatter(x,y, c=sigma_G0*1e3, marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='$\sigma_{G0}$ (mW/m$^2$)'
elif MapLabel=='pprime':
LON=pprime_array[:,0]
LAT=pprime_array[:,1]
pprime=pprime_array[:,3]
x,y=map1(LON,LAT)
# levels=np.arange(-1,3.1, 0.1)
norm = Normalize(vmin=-1.,vmax=3.)
map1.scatter(x,y, c=pprime, marker='o', lw=0., edgecolor='', s=dotsize, norm=norm)
cblabel='pprime'
# cb.set_ticks(levels)
# output=np.transpose(np.vstack((LON,LAT,pprime)))
# with open(RLDir+'p.txt','w') as f:
# f.write('#LON\tLAT\tpprime\n')
# np.savetxt(f,output, delimiter="\t")
elif MapLabel=='pprime-sigma':
LON=pprime_array[:,0]
LAT=pprime_array[:,1]
sigma_pprime=pprime_array[:,4]
x,y=map1(LON,LAT)
norm = Normalize(vmin=0.,vmax=1.)
map1.scatter(x,y, c=sigma_pprime, marker='o', lw=0., edgecolor='', s=dotsize, norm=norm)
cblabel='$\sigma$ pprime'
elif i>=list_length-2 and i<list_length+nbiso:
LON=accu_array[:,0]
LAT=accu_array[:,1]
x,y=map1(LON,LAT)
norm = Normalize(vmin=12.,vmax=22.) #10-30; 12-22 for newest
if MapLabel=='accu-sigma':
accu=accu_array[:,4]
norm = Normalize(vmin=0.,vmax=1.)
elif MapLabel=='accu-steady':
if plot_era40==True:
accu=accu_array[:,3]/0.65 #divide by 0.65 to go from steady-state to present day (computed by Fred from AICC12)
else:
accu=accu_array[:,3]
# output=np.transpose(np.vstack((LON,LAT,accu*100)))
# with open(RLDir+'a.txt','w') as f:
# f.write('#LON\tLAT\taccu (cm/yr)\n')
# np.savetxt(f,output, delimiter="\t")
else:
accu=accu_array[:,i-list_length+5]
if plot_era40==True:
map1.scatter(x,y, c=accu*1000*0.917, marker='o', lw=4, edgecolor='', s=4, vmin=21, vmax=41, cmap=cmocean.cm.thermal) #if use steady-state accu modified to get present day acc, 'seismic_r', vmin=21, vmax=41
else:
map1.scatter(x,y, c=accu*1000*0.917, marker='o', lw=4, edgecolor='', s=4, norm=norm)
# accu_low = np.ma.masked_where((accu*1000*0.917)>17,accu)
# accu_high = np.ma.masked_where((accu*1000*0.917)<=17,accu)
# map1.scatter(x,y, c=accu_high*1000*0.917, marker='o', lw=4, edgecolor='', s=4, vmin=-18, vmax=22, cmap='Reds')
# map1.scatter(x,y, c=accu_low*1000*0.917, marker='o', lw=4, edgecolor='', s=4, norm=norm , cmap=cmocean.cm.thermal) #'seismic_r',cmocean.cm.dense_r, 'seismic_r', norm=norm
cblabel='accu (mm-we/yr)'
elif i>=list_length+nbiso:
LON=hor_array[:,0]
LAT=hor_array[:,1]
x,y=map1(LON,LAT)
age=hor_array[:,i-(list_length+nbiso)+3]
map1.scatter(x,y, c=age/1000., marker='o', lw=0., edgecolor='', s=dotsize)
cblabel='age (kyr B1950)'
if MapLabel<>'radar-lines':
cb=plt.colorbar(orientation='horizontal', shrink=0.7, pad=0.1)
cb.set_label(cblabel)
if levels<>'auto':
cb.set_ticks(levels)
cb.set_ticklabels(levels)
if is_drill:
xdrill,ydrill=map1(lon_drill,lat_drill)
map1.scatter(xdrill,ydrill, marker='*', c='y', edgecolor='k', s=70)
plt.tight_layout()
pp=PdfPages(RLDir+MapLabel+'.pdf')
pp.savefig(plt.figure(MapLabel))
pp.close()
plt.savefig(RLDir+MapLabel+'.'+output_format, format=output_format, bbox_inches='tight')
plt.close(fig)
plt.show()
|
|
from __future__ import division, print_function, absolute_import
import warnings
from numpy import finfo, sign, sqrt
from . import _zeros
_iter = 100
_xtol = 2e-12
_rtol = 4 * finfo(float).eps
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth']
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR}
class RootResults(object):
""" Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == 0
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def __repr__(self):
attrs = ['converged', 'flag', 'function_calls',
'iterations', 'root']
m = max(map(len, attrs)) + 1
return '\n'.join([a.rjust(m) + ': ' + repr(getattr(self, a))
for a in attrs])
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
# Newton-Raphson method
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if maxiter < 1:
raise ValueError("maxiter must be greater than 0")
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2 * fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0 * (1 + 1e-4) + 1e-4
else:
p1 = x0 * (1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0) / 2.0
else:
p = p1 - q1 * (p1 - p0) / (q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : number, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent rountines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in a bracketing interval using Brent's method.
Uses the classic Brent's method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. The function :math:`f`
must be continuous, and :math:`f(a)` and :math:`f(b)` must
have opposite signs.
a : number
One end of the bracketing interval :math:`[a, b]`.
b : number
The other end of the bracketing interval :math:`[a, b]`.
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. For nice functions, Brent's
method will often satisfy the above condition will ``xtol/2``
and ``rtol/2``. [Brent1973]_
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find root of f in [a,b].
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter must be nonnegative. As with `brentq`, for nice
functions the method will often satisfy the above condition
will ``xtol/2`` and ``rtol/2``.
rtol : number, optional
The computed root ``x0`` will satisfy ``np.allclose(x, x0,
atol=xtol, rtol=rtol)``, where ``x`` is the exact root. The
parameter cannot be smaller than its default value of
``4*np.finfo(float).eps``. As with `brentq`, for nice functions
the method will often satisfy the above condition will
``xtol/2`` and ``rtol/2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : n-dimensional root-finding
brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f, a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
|
|
import curses
from resources.digits import bigDigits,bigDigitsIndexes
NumericWidth = 142
class windowManager:
def __init__(self,stdscr):
curses.curs_set(0)
self.initializeColors()
self.initializeWindows(stdscr)
self.resizeWindows()
self.blinking = False;
def initializeColors(self):
if curses.can_change_color():
curses.init_color(1,100,100,100) #color 1 is grey
curses.init_pair(1,curses.COLOR_CYAN,1) #timer
curses.init_pair(2,curses.COLOR_WHITE,1) # background
curses.init_pair(3,1,curses.COLOR_CYAN) # scramble
else:
curses.init_pair(1,curses.COLOR_WHITE,curses.COLOR_BLACK)
curses.init_pair(2,curses.COLOR_WHITE,curses.COLOR_BLACK)
def initializeWindows(self,stdscr):
self.mainScreen = stdscr
self.winTimer = curses.newwin(1,1,0,0)
self.winLog = curses.newwin(1,1,0,0)
self.winOptions = curses.newwin(1,1,0,0)
self.winScramble = curses.newwin(1,1,0,0)
self.winStats = curses.newwin(1,1,0,0)
def resizeWindows(self):
(maxY,maxX) = self.mainScreen.getmaxyx()
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
if(maxX>NumericWidth):
self.winTimer.mvwin(1,int((maxX-NumericWidth)/2))
self.winTimer.resize(16,NumericWidth+1)
self.winTimer.bkgd(' ',curses.color_pair(1))
self.winScramble.mvwin(17,0)
self.winScramble.resize(3,maxX)
self.winScramble.bkgd(' ',curses.color_pair(1))
self.winOptions.mvwin(21,0)
self.winOptions.resize(7,maxX)
self.winOptions.bkgd(' ',curses.color_pair(1))
self.winLog.mvwin(30,2)
self.winLog.resize(30,60)
self.winLog.bkgd(' ',curses.color_pair(1))
else:
raise ValueError('toosmall')
curses.doupdate()
def centerTime(self):
(maxY,maxX) = self.mainScreen.getmaxyx()
self.winTimer.mvwin(int((maxY-16)/3),int((maxX-NumericWidth)/3))
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
def showScramble(self,scramble):
#self.winScramble.erase()
(maxY,maxX)=self.winScramble.getmaxyx()
startXCoord = int((maxX-len(scramble))/2)
startYCoord = maxY-1
self.winScramble.erase()
self.winScramble.border()
self.winScramble.addstr(1,startXCoord,scramble)
self.winScramble.refresh()
def showLog(self,dataObj):
self.winLog.clear()
self.winLog.border()
line = 1
for i in dataObj:
stringToWrite = str(i[1])+ ". "
time=i[0]
if time == None:
stringToWrite += " DNF"
else:
mins = int(time / 60)
sex = time % 60
timeToWrite=""
if mins > 0:
timeToWrite += str(mins) + ":"
timeToWrite += "{:0>5.2f}".format(sex)
else:
timeToWrite += "{0:.2f}".format(sex)
stringToWrite += timeToWrite.rjust(8)
if i[2]:
stringToWrite+="+"
self.winLog.addstr(line,2,stringToWrite)
line +=1
self.winLog.refresh()
def showSessions(self,names,current):
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(4,1,"(Q)uit , (P)lus 2 , (D)NF , (E)rase Session , (R)emove Time, (space) Start")
column = 10
for curNum,curName in sorted(names.items()):
attributes = curses.A_NORMAL
if curNum == str(current):
attributes = curses.A_REVERSE
strToWrite = '{:^30}'.format(curNum +'. ' + curName)
self.winOptions.addstr(2,column,strToWrite,attributes)
column += len(strToWrite)
self.winOptions.refresh()
def ask(self,question,context):
if question == 'add':
strToWrite = "Do you want to create a new session? (y/n): "
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(2,7,strToWrite)
self.winOptions.refresh()
response = self.winOptions.getkey()
if response.lower() == 'y':
curses.echo()
curses.curs_set(1)
self.winOptions.addstr(" Name: ")
seshName = self.winOptions.getstr()
curses.curs_set(0)
curses.noecho()
return seshName
else:
return None
if question == 'removeSession':
strToWrite = "Do you want to delete this session and all of its times? (y/n): "
self.winOptions.clear()
self.winOptions.border()
self.winOptions.addstr(2,7,strToWrite)
self.winOptions.refresh()
response = self.winOptions.getkey()
if response.lower() == 'y':
return True
else:
return False
def drawTime(self,time,positive):
if not positive:
if int(time) == 3 or int(time) == 2 or int(time) == 4:
if not self.blinking:
if (int(time*10) % 10) == 1:
self.mainScreen.bkgd(' ',curses.color_pair(3))
self.mainScreen.refresh()
self.blinking = not self.blinking
if self.blinking:
if (int(time*10) % 10) == 0:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
if int(time) == 1 or int(time) == 0:
if not self.blinking:
if (int(time*10) % 4) == 2:
self.mainScreen.bkgd(' ',curses.color_pair(3))
self.mainScreen.refresh()
self.blinking = not self.blinking
if self.blinking:
if (int(time*10) % 4) == 0:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
if positive and self.blinking:
self.mainScreen.bkgd(' ',curses.color_pair(1))
self.mainScreen.refresh()
self.blinking = not self.blinking
digits = self.secondsToDigits(time)
i=0
for digitsLine in bigDigits:
lineToWrite = ""
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tenmins'],time>600) #tens place of mins
lineToWrite += self.fetchDigitChunk(digitsLine,digits['minutes'],time>60) #singles of mins
lineToWrite += self.fetchDigitChunk(digitsLine,11,time>60) # add colon
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tensPlace'],time>10) # add tensPlace
lineToWrite += self.fetchDigitChunk(digitsLine,digits['onesPlace'],True) # add onesPlace
lineToWrite += self.fetchDigitChunk(digitsLine,10,True) # add decimal
lineToWrite += self.fetchDigitChunk(digitsLine,digits['tenths'],True) # add tenths
lineToWrite += self.fetchDigitChunk(digitsLine,digits['hundredths'],positive) # add hundredths
indentation = (NumericWidth - len(lineToWrite))//2
self.winTimer.addstr(i,indentation,lineToWrite)
i += 1
def secondsToDigits(self,time):
timeDigits = {}
timeDigits['tenmins'] = int(time/600)
timeDigits['minutes'] = int(time/60) % 10
seconds = time%60
timeDigits['tensPlace'] = int(seconds/10)
timeDigits['onesPlace'] = int(seconds%10)
jiffies = seconds % 1
timeDigits['tenths'] = int(jiffies*10)
timeDigits['hundredths'] = int(jiffies*100 % 10)
return timeDigits
def fetchDigitChunk(self,line,number,show):
# 10 gets . 11 get :
if show:
return line[bigDigitsIndexes[number]:bigDigitsIndexes[number+1]]
else:
size = bigDigitsIndexes[number+1]-bigDigitsIndexes[number]
space = ""
for i in range(0,size):
space += " "
return space
def getKey(self):
return self.winTimer.getkey() # wait for ch input from user
def getCh(self):
return self.winTimer.getch() # wait for ch input from user
def noDelayOn(self,onSwitch):
self.winTimer.nodelay(onSwitch)
|
|
'''Skype settings.
'''
import weakref
import sys
from utils import *
class ISettings(object):
'''Represents Skype settings. Access using L{ISkype.Settings<skype.ISkype.Settings>}.
'''
def __init__(self, Skype):
'''__init__.
@param Skype: Skype
@type Skype: L{ISkype}
'''
self._SkypeRef = weakref.ref(Skype)
def Avatar(self, Id=1, Set=None):
'''Sets user avatar picture from file.
@param Id: Optional avatar Id.
@type Id: int
@param Set: New avatar file name.
@type Set: unicode
@deprecated: Use L{LoadAvatarFromFile} instead.
'''
from warnings import warn
warn('ISettings.Avatar: Use ISettings.LoadAvatarFromFile instead.', DeprecationWarning, stacklevel=2)
if Set is None:
raise TypeError('Argument \'Set\' is mandatory!')
self.LoadAvatarFromFile(Set, Id)
def LoadAvatarFromFile(self, Filename, AvatarId=1):
'''Loads user avatar picture from file.
@param Filename: Name of the avatar file.
@type Filename: unicode
@param AvatarId: Optional avatar Id.
@type AvatarId: int
'''
s = 'AVATAR %s %s' % (AvatarId, Filename)
self._Skype._DoCommand('SET %s' % s, s)
def ResetIdleTimer(self):
'''Reset Skype idle timer.
'''
self._Skype._DoCommand('RESETIDLETIMER')
def RingTone(self, Id=1, Set=None):
'''Returns/sets a ringtone.
@param Id: Ringtone Id
@type Id: int
@param Set: Path to new ringtone or None if the current path should be queried.
@type Set: unicode
@return: Current path if Set=None, None otherwise.
@rtype: unicode or None
'''
return self._Skype._Property('RINGTONE', Id, '', Set)
def RingToneStatus(self, Id=1, Set=None):
'''Enables/disables a ringtone.
@param Id: Ringtone Id
@type Id: int
@param Set: True/False if the ringtone should be enabled/disabled or None if the current
status should be queried.
@type Set: bool
@return: Current status if Set=None, None otherwise.
@rtype: bool
'''
if Set is None:
return self._Skype._Property('RINGTONE', Id, 'STATUS') == 'ON'
return self._Skype._Property('RINGTONE', Id, 'STATUS', cndexp(Set, 'ON', 'OFF'))
def SaveAvatarToFile(self, Filename, AvatarId=1):
'''Saves user avatar picture to file.
@param Filename: Destination path.
@type Filename: unicode
@param AvatarId: Avatar Id
@type AvatarId: int
'''
s = 'AVATAR %s %s' % (AvatarId, Filename)
self._Skype._DoCommand('GET %s' % s, s)
def _Get_Skype(self):
skype = self._SkypeRef()
if skype:
return skype
raise Exception()
_Skype = property(_Get_Skype)
def _GetAEC(self):
return self._Skype.Variable('AEC') == 'ON'
def _SetAEC(self, value):
self._Skype.Variable('AEC', cndexp(value, 'ON', 'OFF'))
AEC = property(_GetAEC, _SetAEC,
doc='''Automatic echo cancellation state.
@type: bool
@warning: Starting with Skype for Windows 3.6, this property has no effect.
It can still be set for backwards compatibility reasons.
''')
def _GetAGC(self):
return self._Skype.Variable('AGC') == 'ON'
def _SetAGC(self, value):
self._Skype.Variable('AGC', cndexp(value, 'ON', 'OFF'))
AGC = property(_GetAGC, _SetAGC,
doc='''Automatic gain control state.
@type: bool
@warning: Starting with Skype for Windows 3.6, this property has no effect.
It can still be set for backwards compatibility reasons.
''')
def _GetAudioIn(self):
return self._Skype.Variable('AUDIO_IN')
def _SetAudioIn(self, value):
self._Skype.Variable('AUDIO_IN', value)
AudioIn = property(_GetAudioIn, _SetAudioIn,
doc='''Name of an audio input device.
@type: unicode
''')
def _GetAudioOut(self):
return self._Skype.Variable('AUDIO_OUT')
def _SetAudioOut(self, value):
self._Skype.Variable('AUDIO_OUT', value)
AudioOut = property(_GetAudioOut, _SetAudioOut,
doc='''Name of an audio output device.
@type: unicode
''')
def _GetAutoAway(self):
return self._Skype.Variable('AUTOAWAY') == 'ON'
def _SetAutoAway(self, value):
self._Skype.Variable('AUTOAWAY', cndexp(value, 'ON', 'OFF'))
AutoAway = property(_GetAutoAway, _SetAutoAway,
doc='''Auto away status.
@type: bool
''')
def _GetLanguage(self):
return self._Skype.Variable('UI_LANGUAGE')
def _SetLanguage(self, value):
self._Skype.Variable('UI_LANGUAGE', value)
Language = property(_GetLanguage, _SetLanguage,
doc='''Language of the Skype client as an ISO code.
@type: unicode
''')
def _GetPCSpeaker(self):
return self._Skype.Variable('PCSPEAKER') == 'ON'
def _SetPCSpeaker(self, value):
self._Skype.Variable('PCSPEAKER', cndexp(value, 'ON', 'OFF'))
PCSpeaker = property(_GetPCSpeaker, _SetPCSpeaker,
doc='''PCSpeaker status.
@type: bool
''')
def _GetRinger(self):
return self._Skype.Variable('RINGER')
def _SetRinger(self, value):
self._Skype.Variable('RINGER', value)
Ringer = property(_GetRinger, _SetRinger,
doc='''Name of a ringer device.
@type: unicode
''')
def _GetVideoIn(self):
return self._Skype.Variable('VIDEO_IN')
def _SetVideoIn(self, value):
self._Skype.Variable('VIDEO_IN', value)
VideoIn = property(_GetVideoIn, _SetVideoIn,
doc='''Name of a video input device.
@type: unicode
''')
|
|
from django.db import models
import reversion
# Create your models here.
@reversion.register()
class Story(models.Model):
title = models.CharField(max_length=1024)
shortname = models.CharField(max_length=64)
def __str__(self):
return self.title
class StoryBased(models.Model):
story = models.ForeignKey(Story, on_delete=models.CASCADE)
class Meta:
abstract = True
class TimeFrame(models.Model):
tf_name = models.CharField(max_length=512)
tf_start = models.DateTimeField(null=True)
tf_end = models.DateTimeField(null=True)
def __str__(self):
return self.tf_name
class FramedInTime(models.Model):
timeframe = models.ForeignKey(
TimeFrame,
related_name='%(class)s_fit_related',
on_delete=models.PROTECT)
class Meta:
abstract = True
class Annotated(models.Model):
'''
An abstract class which describes an annotated object.
As such, an Annotated has a "notes" attributes which is a Content
'''
notes = models.TextField()
class Meta:
abstract = True
class Described(models.Model):
'''
A described object is an object which has both
notes attached and a description
'''
description = models.TextField()
class Meta:
abstract = True
class Statused(models.Model):
STATUS_LIST = (
(1, "Started"),
(2, "Draft"),
(3, "Work In Progress"),
(4, "Beta"),
(5, "Done")
)
status = models.IntegerField(choices=STATUS_LIST)
class Meta:
abstract = True
@reversion.register()
class Attribute(StoryBased):
att_key = models.CharField(max_length=256)
att_value = models.CharField(max_length=2048)
@reversion.register()
class Chapter(Annotated, Described):
chap_no = models.IntegerField()
chap_title = models.CharField(max_length=256)
def __str__(self):
return self.chap_title
@reversion.register()
class Gender(StoryBased):
gender_name = models.CharField(max_length=32)
def __str__(self):
return self.gender_name
@reversion.register()
class Idea(StoryBased, Annotated, Statused):
idea_name = models.CharField(max_length=1024)
def __str__(self):
return self.idea_name
@reversion.register()
class Occupation(StoryBased, Annotated, Described):
occ_name = models.CharField(max_length=1024)
def __str__(self):
return self.occ_name
@reversion.register()
class Character(StoryBased, Annotated, Described, FramedInTime):
chara_whole_name = models.CharField(max_length=512)
chara_nickname = models.CharField(max_length=32)
chara_gender = models.ForeignKey(Gender, on_delete=models.PROTECT)
def __str__(self):
return self.chara_whole_name
@reversion.register()
class Location(StoryBased, Annotated, Described, FramedInTime):
loc_name = models.CharField(max_length=128)
loc_latitude = models.DecimalField(
max_digits=9, decimal_places=6, null=True)
loc_longitude = models.DecimalField(max_digits=9,
decimal_places=6,
null=True)
loc_altitude = models.IntegerField(null=True)
def __str__(self):
return self.loc_name
@reversion.register()
class Artifact(StoryBased, Annotated, Described):
art_name = models.CharField(max_length=512)
def __str__(self):
return self.art_name
@reversion.register()
class Strand(StoryBased, Annotated, Described):
strand_name = models.CharField(max_length=512)
def __str__(self):
return self.strand_name
@reversion.register()
class Scene(StoryBased, Annotated, Described, FramedInTime, Statused):
scene_title = models.CharField(max_length=512)
# scene_location = models.ForeignKey(Location, on_delete=models.PROTECT)
scene_mainstrand = models.ForeignKey(Strand, on_delete=models.PROTECT)
def __str__(self):
return self.scene_title
@reversion.register()
class SceneFrame(models.Model):
'''
This one is a bit tricky.
Some time frames would be described by a start scene and an end scene
That's what I call a SceneFrame
'''
sf_name = models.CharField(max_length=512)
sf_start_scene = models.ForeignKey(
Scene, related_name='sc_frame_start_related', on_delete=models.CASCADE)
sf_end_scene = models.ForeignKey(
Scene, related_name='sc_frame_end_related', on_delete=models.CASCADE)
@reversion.register()
class Part(Annotated):
part_name = models.CharField(max_length=512)
part_number = models.IntegerField()
'''
Associations
'''
@reversion.register()
class CharacterOccupation(Annotated, FramedInTime):
cocc_character = models.ForeignKey(Character, on_delete=models.CASCADE)
cocc_occupation = models.ForeignKey(Occupation, on_delete=models.CASCADE)
@reversion.register()
class Relationship(Annotated, Described):
rel_char1 = models.ForeignKey(Character,
related_name="char1_related",
on_delete=models.CASCADE)
rel_char2 = models.ForeignKey(Character,
related_name="char2_related",
on_delete=models.CASCADE)
rel_scene_frame = models.ForeignKey(SceneFrame,
null=True,
on_delete=models.SET_NULL)
@reversion.register()
class SceneCharacter(models.Model):
scene = models.ForeignKey(Scene, on_delete=models.CASCADE)
character = models.ForeignKey(Character, on_delete=models.CASCADE)
@reversion.register()
class SceneStrand(models.Model):
scene = models.ForeignKey(Scene, on_delete=models.CASCADE)
strand = models.ForeignKey(Strand, on_delete=models.CASCADE)
@reversion.register()
class SceneLocation(models.Model):
scene = models.ForeignKey(Scene, on_delete=models.CASCADE)
location = models.ForeignKey(Location, on_delete=models.CASCADE)
|
|
from telegram.ext import Updater, MessageHandler, Filters, CommandHandler
from .permissioncommandhandler import PermissionCommandHandler
from .users import UserManager
from .conversations import ConversationManager, ConversationHandler
from .chats import ChatManager
from .blockdispatcher import BlockDispatcher
from threading import Thread
from functools import partial
import redis
import argparse
import logging
import configparser
class NPTelegramBot(object):
FLAGS = ["admin", "def_edit", "user_flags"]
def __init__(self, config):
self.logger = logging.getLogger(__name__)
self.config = config
if "token" not in config:
print("Cannot load token!")
raise RuntimeError()
tg_token = config["token"]
if "redis_host" in config:
redis_args = {}
redis_args["host"] = config["redis_host"]
redis_args["db"] = config["redis_db_num"]
if "redis_port" in config:
redis_args["port"] = config["redis_port"]
if "redis_password" in config:
redis_args["password"] = config["redis_password"]
self.store = redis.StrictRedis(decode_responses=True,
**redis_args)
else:
print("No backing store specified in config file!")
raise RuntimeError()
self.conversations = ConversationManager()
self.users = UserManager(self.store)
self.chats = ChatManager(self.store)
self.chats.add_join_filter(self.chats.block_filter)
self.thread = None
self.updater = Updater(token=tg_token)
self.dispatcher = BlockDispatcher(self.updater, self.users)
@staticmethod
def parse_cli_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config", dest="config",
help="Configuration File to use")
parser.add_argument("-b", "--bot", dest="bot",
help="Bot name from configuration file to use")
args = parser.parse_args()
if not args.config:
parser.print_help()
raise RuntimeError("Config file argument required!")
if not args.bot:
parser.print_help()
raise RuntimeError("Bot name argument required!")
try:
config = configparser.ConfigParser()
config.read(args.config)
except:
raise RuntimeError("Cannot read config file!")
if args.bot not in config.sections():
raise RuntimeError("Bot {0} not in config file!".format(args.bot))
return config[args.bot]
def setup_commands(self):
# Make sure the message handlers are in different groups so they are
# always run
self.dispatcher.add_handler(MessageHandler([Filters.text],
self.handle_message),
group=1)
self.dispatcher.add_handler(MessageHandler([Filters.status_update],
self.chats.process_status_update),
group=2)
# Default commands These all require private message by default, just
# so they don't possibly spam groups.
self.dispatcher.add_handler(PermissionCommandHandler('start',
[self.require_privmsg],
self.handle_help))
self.dispatcher.add_handler(PermissionCommandHandler('help',
[self.require_privmsg],
self.handle_help))
self.dispatcher.add_handler(PermissionCommandHandler('settings',
[self.require_privmsg],
self.handle_help))
self.dispatcher.add_handler(CommandHandler('cancel',
self.conversations.cancel))
self.dispatcher.add_handler(PermissionCommandHandler('userregister',
[self.require_privmsg],
self.users.register))
self.dispatcher.add_handler(ConversationHandler('useraddflag',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
self.users.add_flag))
self.dispatcher.add_handler(ConversationHandler('userrmflag',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
self.users.remove_flag))
self.dispatcher.add_handler(ConversationHandler('userblock',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
self.users.block_user))
self.dispatcher.add_handler(ConversationHandler('groupbroadcast',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
self.chats.broadcast))
self.dispatcher.add_handler(PermissionCommandHandler('grouplist',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.chats.list_known_chats))
self.dispatcher.add_handler(ConversationHandler('groupleave',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
self.chats.leave_chat))
self.dispatcher.add_handler(ConversationHandler('groupblock',
[self.require_privmsg,
partial(self.require_flag, flag="admin")],
self.conversations,
partial(self.chats.leave_chat, block=True)))
# self.dispatcher.add_handler(PermissionCommandHandler('outputcommands',
# [self.require_privmsg,
# partial(self.require_flag, flag="admin")],
# self.output_commands))
# On errors, just print to console and hope someone sees it
self.dispatcher.add_error_handler(self.handle_error)
def handle_help(self, bot, update):
help_text = ["Hi! I'm an NP Telegram Bot! If I'm displaying this message, it means whoever wrote me didn't override the handle_help function. They should do that!"]
bot.sendMessage(update.message.chat.id,
"\n".join(help_text),
parse_mode="HTML",
disable_web_page_preview=True)
def handle_error(self, bot, update, error):
# TODO Add ability for bot to message owner with stack traces
self.logger.warn("Exception thrown! %s", error)
def try_register(self, bot, update):
user_id = update.message.from_user.id
if not self.users.is_valid_user(user_id):
self.users.register(bot, update)
# Always returns true, as running any command will mean the user is
# registered. We just want to make sure they're in the DB so flags can
# be added if needed.
return True
# When used with PermissionCommandHandler, Function requires currying with
# flag we want to check for.
def require_flag(self, bot, update, flag):
user_id = update.message.from_user.id
if not self.users.is_valid_user(user_id) or not self.users.has_flag(user_id, flag):
bot.sendMessage(update.message.chat.id,
text="You do not have the required permissions to run this command.")
return False
return True
def require_privmsg(self, bot, update):
if update.message.chat.id < 0:
bot.sendMessage(update.message.chat.id,
reply_to_message_id=update.message.id,
text="Please message that command to me. Only the following commands are allowed in public chats:\n- /def")
return False
return True
def output_commands(self, bot, update):
command_str = ""
for m in self.modules:
command_str += m.commands() + "\n"
bot.sendMessage(update.message.chat.id,
text=command_str)
def handle_message(self, bot, update):
# Ignore messages from groups
if update.message.chat.id < 0:
return
if self.conversations.check(bot, update):
return
self.try_register(bot, update)
self.handle_help(bot, update)
def start_webhook_thread(self):
if "webhook_url" not in self.config:
print("No webhook URL to bind to!")
raise RuntimeError()
self.updater.bot.setWebhook(webhook_url=self.config["webhook_url"])
# Steal the queue from the updater.
self.update_queue = self.updater.update_queue
self.thread = Thread(target=self.dispatcher.start, name='dispatcher')
self.thread.start()
def add_webhook_update(self, update):
self.update_queue.put(update)
def start_loop(self):
self.updater.start_polling()
self.updater.idle()
def shutdown(self):
if self.thread:
self.thread.join(1)
def create_webhook_bot(config):
bot = NPTelegramBot(config)
bot.setup_commands()
bot.start_webhook_thread()
return bot
|
|
import numpy as np
import mxnet as mx
import math
from mxnet.test_utils import *
# Common
def test_lr_wd_mult():
data = mx.sym.Variable('data')
bias = mx.sym.Variable('fc1_bias', lr_mult=1.0)
fc1 = mx.sym.FullyConnected(data=data, bias=bias, name='fc1', num_hidden=10, lr_mult=0)
fc2 = mx.sym.FullyConnected(data=fc1, name='fc2', num_hidden=10, wd_mult=0.5)
mod = mx.mod.Module(symbol=fc2, label_names=None, context=default_context())
mod.bind(data_shapes=[('data', (5,10))])
mod.init_params(initializer=mx.init.Uniform(1.0))
mod.init_optimizer(optimizer_params={'learning_rate': 1.0})
args1, _ = mod.get_params()
args1 = {k: v.asnumpy() for k, v in args1.items()}
mod.forward(mx.io.DataBatch(data=[mx.random.uniform(low=-1.0, high=1.0, shape=(5,10))], label=None), is_train=True)
mod.backward(mod.get_outputs())
mod.update()
args2, _ = mod.get_params()
args2 = {k: v.asnumpy() for k, v in args2.items()}
assert mod._optimizer.lr_mult == {'fc1_bias': 1.0, 'fc1_weight': 0.0}
assert mod._optimizer.wd_mult == {'fc2_bias': 0.5, 'fc2_weight': 0.5, 'fc1_bias': 0.0}
assert mx.test_utils.almost_equal(args1['fc1_weight'], args2['fc1_weight'], 1e-10)
assert not mx.test_utils.almost_equal(args1['fc1_bias'], args2['fc1_bias'], 1e-1)
assert not mx.test_utils.almost_equal(args1['fc2_weight'], args2['fc2_weight'], 1e-1)
def compare_optimizer(opt1, opt2, shape, dtype):
w1 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
g1 = mx.random.uniform(shape=shape, ctx=default_context(), dtype=dtype)
w2 = w1.copyto(default_context())
g2 = g1.copyto(default_context())
state1 = opt1.create_state(0, w1)
state2 = opt2.create_state(0, w2)
if state1 is not None and state2 is not None:
for s1, s2, in zip(state1, state2):
if s1 is not None or s2 is not None:
assert(same(s1.asnumpy(), s2.asnumpy()))
opt1.update(0, w1, g1, state1)
opt2.update(0, w2, g2, state2)
if state1 is not None and state2 is not None:
for s1, s2, in zip(state1, state2):
if s1 is not None or s2 is not None:
assert_almost_equal(s1.asnumpy(), s2.asnumpy(), rtol=1e-4, atol=1e-5)
assert_almost_equal(w1.asnumpy(), w2.asnumpy(), rtol=1e-4, atol=1e-5)
# SGD
class PySGD(mx.optimizer.Optimizer):
"""python reference implemenation of sgd"""
def __init__(self, learning_rate=0.01, momentum=0.0, multi_precision=False, **kwargs):
super(PySGD, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.multi_precision = multi_precision
def create_state(self, index, weight):
"""Create additional optimizer state: momentum
Parameters
----------
weight : NDArray
The weight data
"""
momentum = None
weight_master_copy = None
do_multi_precision = self.multi_precision and weight.dtype == np.float16
if do_multi_precision:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=np.float32)
weight_master_copy = array(weight, ctx=weight.context, dtype=np.float32)
return (momentum, weight_master_copy)
else:
if self.momentum != 0.0:
momentum = mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
use_multi_precision = isinstance(state, list) or isinstance(state, tuple)
if not use_multi_precision:
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight[:] = ((1 - lr*wd)*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight[:] = (1 - lr*wd)*weight - lr*self.rescale_grad*grad
else:
mom = state
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight -
lr*mx.nd.clip(grad*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight - lr*self.rescale_grad*grad
weight += mom
else:
grad32 = array(grad, ctx=grad.context, dtype=np.float32)
mom = state[0]
weight32 = state[1]
if self.momentum == 0.0:
if self.clip_gradient is not None:
weight32[:] = ((1 - lr*wd)*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
else:
weight32[:] = (1 - lr*wd)*weight32 - lr*self.rescale_grad*grad32
else:
if self.clip_gradient is not None:
mom[:] = (self.momentum*mom - lr*wd*weight32 -
lr*mx.nd.clip(grad32*self.rescale_grad, -self.clip_gradient, self.clip_gradient))
weight32 += mom
else:
mom[:] = self.momentum*mom - lr*wd*weight32 - lr*self.rescale_grad*grad32
weight32 += mom
tmp = weight32.astype(weight.dtype)
tmp.copyto(weight)
def test_sgd():
mx.random.seed(0)
opt1 = PySGD
opt2 = mx.optimizer.SGD
shape = (3, 4, 5)
mom_options = [{}, {'momentum': 0.9}]
cg_options = [{}, {'clip_gradient': 0.4}, {'clip_gradient': 0.5}]
rg_options = [{}, {'rescale_grad': 0.14}, {'rescale_grad': 0.8}]
wd_options = [{}, {'wd': 0.03}, {'wd': 0.05}, {'wd': 0.07}]
mp_options = [{}, {'multi_precision': False}, {'multi_precision': True}]
for dtype in [np.float16, np.float32, np.float64]:
for mom_option in mom_options:
for cg_option in cg_options:
for rg_option in rg_options:
for wd_option in wd_options:
for mp_option in mp_options:
kwarg = {}
kwarg.update(mom_option)
kwarg.update(cg_option)
kwarg.update(rg_option)
kwarg.update(wd_option)
kwarg.update(mp_option)
if (dtype == np.float16 and
('multi_precision' not in kwarg or
not kwarg['multi_precision'])):
continue
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, dtype)
# ADAM
class PyAdam(mx.optimizer.Optimizer):
"""python reference implemenation of adam"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
decay_factor=(1 - 1e-8), **kwargs):
super(PyAdam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.decay_factor = decay_factor
def create_state(self, index, weight):
"""Create additional optimizer state: mean, variance
Parameters
----------
weight : NDArray
The weight data
"""
return (mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
mx.nd.zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
self._update_count(index)
t = self._index_update_count[index]
mean, variance = state
wd = self._get_wd(index)
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient, out=grad)
mean *= self.beta1
mean += grad * (1. - self.beta1)
variance *= self.beta2
variance += (1 - self.beta2) * mx.nd.square(grad, out=grad)
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
weight -= lr*mean/(mx.nd.sqrt(variance) + self.epsilon)
def test_adam():
mx.random.seed(0)
opt1 = PyAdam
opt2 = mx.optimizer.Adam
shape = (3, 4, 5)
kwargs = [{},
{'clip_gradient': 0.5},
{'clip_gradient': 0.4, 'rescale_grad': 0.14},
{'rescale_grad': 0.8},
{'clip_gradient': 0.5, 'wd': 0.07},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},
{'rescale_grad': 0.8, 'wd': 0.05}]
for kwarg in kwargs:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)
# RMSProp
class PyRMSProp(mx.optimizer.Optimizer):
"""RMSProp optimizer of Tieleman & Hinton, 2012,
For centered=False, the code follows the version in
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012
For centered=True, the code follows the version in
http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013.
Parameters
----------
learning_rate : float, optional
Step size.
Default value is set to 0.001.
gamma1: float, optional
decay factor of moving average for gradient, gradient^2.
Default value is set to 0.9.
gamma2: float, optional
"momentum" factor.
Default value if set to 0.9.
Only used if centered=True
epsilon : float, optional
Default value is set to 1e-8.
centered : boolean, optional
Use Graves or Tielemans & Hintons version of RMSProp
wd : float, optional
L2 regularization coefficient add to all the weights
rescale_grad : float, optional
rescaling factor of gradient.
clip_gradient : float, optional
clip gradient in range [-clip_gradient, clip_gradient]
clip_weights : float, optional
clip weights in range [-clip_weights, clip_weights]
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(PyRMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.centered = centered
self.gamma1 = gamma1
self.gamma2 = gamma2
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
"""Create additional optimizer state.
For centered=False: n
For centered=True: n, g, delta
Parameters
----------
weight : NDArray
The weight data
"""
if self.centered:
return (mx.nd.zeros(weight.shape, weight.context), # n
mx.nd.zeros(weight.shape, weight.context), # g
mx.nd.zeros(weight.shape, weight.context)) # delta
else:
return (mx.nd.zeros(weight.shape, weight.context), ) # n
def update(self, index, weight, grad, state):
"""Update the parameters.
Parameters
----------
index : int
An unique integer key used to index the parameters
weight : NDArray
weight ndarray
grad : NDArray
grad ndarray
state : NDArray or other objects returned by init_state
The auxiliary state used in optimization.
"""
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
grad = grad * self.rescale_grad + wd * weight
if not self.centered:
(n, ) = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
weight[:] -= lr * grad/(mx.nd.sqrt(n + self.epsilon))
else:
n, g, delta = state
if self.clip_gradient is not None:
grad = mx.nd.clip(grad, -self.clip_gradient, self.clip_gradient)
n[:] = (1 - self.gamma1) * (grad * grad) + self.gamma1 * n
g[:] = (1 - self.gamma1) * grad + self.gamma1 * g
delta[:] = (self.gamma2) * delta - lr * grad/(mx.nd.sqrt(n - g*g + self.epsilon))
weight[:] += delta
if self.clip_weights:
mx.ndarray.clip(weight, -self.clip_weights, self.clip_weights, out=weight)
def test_rms():
mx.random.seed(0)
opt1 = PyRMSProp
opt2 = mx.optimizer.RMSProp
shape = (3, 4, 5)
kwargs = [{},
{'clip_gradient': 0.5},
{'clip_gradient': 0.4, 'rescale_grad': 0.14},
{'rescale_grad': 0.8},
{'clip_gradient': 0.5, 'wd': 0.07},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03},
{'rescale_grad': 0.8, 'wd': 0.05},
{'centered': True},
{'clip_gradient': 0.5, 'centered': True},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'centered': True},
{'rescale_grad': 0.8, 'centered': True},
{'clip_gradient': 0.5, 'wd': 0.07, 'centered': True},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03, 'centered': True},
{'rescale_grad': 0.8, 'wd': 0.05, 'centered': True},
{'clip_gradient': 0.5, 'clip_weights': 0.01},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'clip_weights': 0.01},
{'rescale_grad': 0.8, 'clip_weights': 0.01},
{'clip_gradient': 0.5, 'wd': 0.07, 'clip_weights': 0.01},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03, 'clip_weights': 0.01},
{'rescale_grad': 0.8, 'wd': 0.05, 'clip_weights': 0.01},
{'centered': True, 'clip_weights': 0.01},
{'clip_gradient': 0.5, 'centered': True, 'clip_weights': 0.01},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'centered': True, 'clip_weights': 0.01},
{'rescale_grad': 0.8, 'centered': True, 'clip_weights': 0.01},
{'clip_gradient': 0.5, 'wd': 0.07, 'centered': True, 'clip_weights': 0.01},
{'clip_gradient': 0.4, 'rescale_grad': 0.14, 'wd': 0.03, 'centered': True, 'clip_weights': 0.01},
{'rescale_grad': 0.8, 'wd': 0.05, 'centered': True, 'clip_weights': 0.01}]
for kwarg in kwargs:
compare_optimizer(opt1(**kwarg), opt2(**kwarg), shape, np.float32)
if __name__ == '__main__':
test_adam()
test_rms()
test_sgd()
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2011 - 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import itertools
import time
import eventlet
import greenlet
from oslo.config import cfg
import six
from keystone.openstack.common import excutils
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log as logging
from keystone.openstack.common.rpc import amqp as rpc_amqp
from keystone.openstack.common.rpc import common as rpc_common
qpid_codec = importutils.try_import("qpid.codec010")
qpid_messaging = importutils.try_import("qpid.messaging")
qpid_exceptions = importutils.try_import("qpid.messaging.exceptions")
LOG = logging.getLogger(__name__)
qpid_opts = [
cfg.StrOpt('qpid_hostname',
default='localhost',
help='Qpid broker hostname'),
cfg.IntOpt('qpid_port',
default=5672,
help='Qpid broker port'),
cfg.ListOpt('qpid_hosts',
default=['$qpid_hostname:$qpid_port'],
help='Qpid HA cluster host:port pairs'),
cfg.StrOpt('qpid_username',
default='',
help='Username for qpid connection'),
cfg.StrOpt('qpid_password',
default='',
help='Password for qpid connection',
secret=True),
cfg.StrOpt('qpid_sasl_mechanisms',
default='',
help='Space separated list of SASL mechanisms to use for auth'),
cfg.IntOpt('qpid_heartbeat',
default=60,
help='Seconds between connection keepalive heartbeats'),
cfg.StrOpt('qpid_protocol',
default='tcp',
help="Transport to use, either 'tcp' or 'ssl'"),
cfg.BoolOpt('qpid_tcp_nodelay',
default=True,
help='Disable Nagle algorithm'),
# NOTE(russellb) If any additional versions are added (beyond 1 and 2),
# this file could probably use some additional refactoring so that the
# differences between each version are split into different classes.
cfg.IntOpt('qpid_topology_version',
default=1,
help="The qpid topology version to use. Version 1 is what "
"was originally used by impl_qpid. Version 2 includes "
"some backwards-incompatible changes that allow broker "
"federation to work. Users should update to version 2 "
"when they are able to take everything down, as it "
"requires a clean break."),
]
cfg.CONF.register_opts(qpid_opts)
JSON_CONTENT_TYPE = 'application/json; charset=utf8'
def raise_invalid_topology_version(conf):
msg = (_("Invalid value for qpid_topology_version: %d") %
conf.qpid_topology_version)
LOG.error(msg)
raise Exception(msg)
class ConsumerBase(object):
"""Consumer base class."""
def __init__(self, conf, session, callback, node_name, node_opts,
link_name, link_opts):
"""Declare a queue on an amqp session.
'session' is the amqp session to use
'callback' is the callback to call when messages are received
'node_name' is the first part of the Qpid address string, before ';'
'node_opts' will be applied to the "x-declare" section of "node"
in the address string.
'link_name' goes into the "name" field of the "link" in the address
string
'link_opts' will be applied to the "x-declare" section of "link"
in the address string.
"""
self.callback = callback
self.receiver = None
self.session = None
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True,
},
},
"link": {
"durable": True,
"x-declare": {
"durable": False,
"auto-delete": True,
"exclusive": False,
},
},
}
addr_opts["node"]["x-declare"].update(node_opts)
elif conf.qpid_topology_version == 2:
addr_opts = {
"link": {
"x-declare": {
"auto-delete": True,
"exclusive": False,
},
},
}
else:
raise_invalid_topology_version()
addr_opts["link"]["x-declare"].update(link_opts)
if link_name:
addr_opts["link"]["name"] = link_name
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
self.connect(session)
def connect(self, session):
"""Declare the receiver on connect."""
self._declare_receiver(session)
def reconnect(self, session):
"""Re-declare the receiver after a qpid reconnect."""
self._declare_receiver(session)
def _declare_receiver(self, session):
self.session = session
self.receiver = session.receiver(self.address)
self.receiver.capacity = 1
def _unpack_json_msg(self, msg):
"""Load the JSON data in msg if msg.content_type indicates that it
is necessary. Put the loaded data back into msg.content and
update msg.content_type appropriately.
A Qpid Message containing a dict will have a content_type of
'amqp/map', whereas one containing a string that needs to be converted
back from JSON will have a content_type of JSON_CONTENT_TYPE.
:param msg: a Qpid Message object
:returns: None
"""
if msg.content_type == JSON_CONTENT_TYPE:
msg.content = jsonutils.loads(msg.content)
msg.content_type = 'amqp/map'
def consume(self):
"""Fetch the message and pass it to the callback object."""
message = self.receiver.fetch()
try:
self._unpack_json_msg(message)
msg = rpc_common.deserialize_msg(message.content)
self.callback(msg)
except Exception:
LOG.exception(_("Failed to process message... skipping it."))
finally:
# TODO(sandy): Need support for optional ack_on_error.
self.session.acknowledge(message)
def get_receiver(self):
return self.receiver
def get_node_name(self):
return self.address.split(';')[0]
class DirectConsumer(ConsumerBase):
"""Queue/consumer class for 'direct'."""
def __init__(self, conf, session, msg_id, callback):
"""Init a 'direct' queue.
'session' is the amqp session to use
'msg_id' is the msg_id to listen on
'callback' is the callback to call when messages are received
"""
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"exclusive": True,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (msg_id, msg_id)
node_opts = {"type": "direct"}
link_name = msg_id
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
link_name = None
else:
raise_invalid_topology_version()
super(DirectConsumer, self).__init__(conf, session, callback,
node_name, node_opts, link_name,
link_opts)
class TopicConsumer(ConsumerBase):
"""Consumer class for 'topic'."""
def __init__(self, conf, session, topic, callback, name=None,
exchange_name=None):
"""Init a 'topic' queue.
:param session: the amqp session to use
:param topic: is the topic to listen on
:paramtype topic: str
:param callback: the callback to call when messages are received
:param name: optional queue name, defaults to topic
"""
exchange_name = exchange_name or rpc_amqp.get_control_exchange(conf)
link_opts = {
"auto-delete": conf.amqp_auto_delete,
"durable": conf.amqp_durable_queues,
}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicConsumer, self).__init__(conf, session, callback, node_name,
{}, name or topic, link_opts)
class FanoutConsumer(ConsumerBase):
"""Consumer class for 'fanout'."""
def __init__(self, conf, session, topic, callback):
"""Init a 'fanout' queue.
'session' is the amqp session to use
'topic' is the topic to listen on
'callback' is the callback to call when messages are received
"""
self.conf = conf
link_opts = {"exclusive": True}
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"durable": False, "type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutConsumer, self).__init__(conf, session, callback,
node_name, node_opts, None,
link_opts)
class Publisher(object):
"""Base Publisher class."""
def __init__(self, conf, session, node_name, node_opts=None):
"""Init the Publisher class with the exchange_name, routing_key,
and other options
"""
self.sender = None
self.session = session
if conf.qpid_topology_version == 1:
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": False,
# auto-delete isn't implemented for exchanges in qpid,
# but put in here anyway
"auto-delete": True,
},
},
}
if node_opts:
addr_opts["node"]["x-declare"].update(node_opts)
self.address = "%s ; %s" % (node_name, jsonutils.dumps(addr_opts))
elif conf.qpid_topology_version == 2:
self.address = node_name
else:
raise_invalid_topology_version()
self.reconnect(session)
def reconnect(self, session):
"""Re-establish the Sender after a reconnection."""
self.sender = session.sender(self.address)
def _pack_json_msg(self, msg):
"""Qpid cannot serialize dicts containing strings longer than 65535
characters. This function dumps the message content to a JSON
string, which Qpid is able to handle.
:param msg: May be either a Qpid Message object or a bare dict.
:returns: A Qpid Message with its content field JSON encoded.
"""
try:
msg.content = jsonutils.dumps(msg.content)
except AttributeError:
# Need to have a Qpid message so we can set the content_type.
msg = qpid_messaging.Message(jsonutils.dumps(msg))
msg.content_type = JSON_CONTENT_TYPE
return msg
def send(self, msg):
"""Send a message."""
try:
# Check if Qpid can encode the message
check_msg = msg
if not hasattr(check_msg, 'content_type'):
check_msg = qpid_messaging.Message(msg)
content_type = check_msg.content_type
enc, dec = qpid_messaging.message.get_codec(content_type)
enc(check_msg.content)
except qpid_codec.CodecException:
# This means the message couldn't be serialized as a dict.
msg = self._pack_json_msg(msg)
self.sender.send(msg)
class DirectPublisher(Publisher):
"""Publisher class for 'direct'."""
def __init__(self, conf, session, msg_id):
"""Init a 'direct' publisher."""
if conf.qpid_topology_version == 1:
node_name = msg_id
node_opts = {"type": "direct"}
elif conf.qpid_topology_version == 2:
node_name = "amq.direct/%s" % msg_id
node_opts = {}
else:
raise_invalid_topology_version()
super(DirectPublisher, self).__init__(conf, session, node_name,
node_opts)
class TopicPublisher(Publisher):
"""Publisher class for 'topic'."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(TopicPublisher, self).__init__(conf, session, node_name)
class FanoutPublisher(Publisher):
"""Publisher class for 'fanout'."""
def __init__(self, conf, session, topic):
"""Init a 'fanout' publisher.
"""
if conf.qpid_topology_version == 1:
node_name = "%s_fanout" % topic
node_opts = {"type": "fanout"}
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/fanout/%s" % topic
node_opts = {}
else:
raise_invalid_topology_version()
super(FanoutPublisher, self).__init__(conf, session, node_name,
node_opts)
class NotifyPublisher(Publisher):
"""Publisher class for notifications."""
def __init__(self, conf, session, topic):
"""Init a 'topic' publisher.
"""
exchange_name = rpc_amqp.get_control_exchange(conf)
node_opts = {"durable": True}
if conf.qpid_topology_version == 1:
node_name = "%s/%s" % (exchange_name, topic)
elif conf.qpid_topology_version == 2:
node_name = "amq.topic/topic/%s/%s" % (exchange_name, topic)
else:
raise_invalid_topology_version()
super(NotifyPublisher, self).__init__(conf, session, node_name,
node_opts)
class Connection(object):
"""Connection object."""
pool = None
def __init__(self, conf, server_params=None):
if not qpid_messaging:
raise ImportError("Failed to import qpid.messaging")
self.session = None
self.consumers = {}
self.consumer_thread = None
self.proxy_callbacks = []
self.conf = conf
if server_params and 'hostname' in server_params:
# NOTE(russellb) This enables support for cast_to_server.
server_params['qpid_hosts'] = [
'%s:%d' % (server_params['hostname'],
server_params.get('port', 5672))
]
params = {
'qpid_hosts': self.conf.qpid_hosts,
'username': self.conf.qpid_username,
'password': self.conf.qpid_password,
}
params.update(server_params or {})
self.brokers = params['qpid_hosts']
self.username = params['username']
self.password = params['password']
self.connection_create(self.brokers[0])
self.reconnect()
def connection_create(self, broker):
# Create the connection - this does not open the connection
self.connection = qpid_messaging.Connection(broker)
# Check if flags are set and if so set them for the connection
# before we call open
self.connection.username = self.username
self.connection.password = self.password
self.connection.sasl_mechanisms = self.conf.qpid_sasl_mechanisms
# Reconnection is done by self.reconnect()
self.connection.reconnect = False
self.connection.heartbeat = self.conf.qpid_heartbeat
self.connection.transport = self.conf.qpid_protocol
self.connection.tcp_nodelay = self.conf.qpid_tcp_nodelay
def _register_consumer(self, consumer):
self.consumers[str(consumer.get_receiver())] = consumer
def _lookup_consumer(self, receiver):
return self.consumers[str(receiver)]
def reconnect(self):
"""Handles reconnecting and re-establishing sessions and queues."""
attempt = 0
delay = 1
while True:
# Close the session if necessary
if self.connection.opened():
try:
self.connection.close()
except qpid_exceptions.ConnectionError:
pass
broker = self.brokers[attempt % len(self.brokers)]
attempt += 1
try:
self.connection_create(broker)
self.connection.open()
except qpid_exceptions.ConnectionError as e:
msg_dict = dict(e=e, delay=delay)
msg = _("Unable to connect to AMQP server: %(e)s. "
"Sleeping %(delay)s seconds") % msg_dict
LOG.error(msg)
time.sleep(delay)
delay = min(2 * delay, 60)
else:
LOG.info(_('Connected to AMQP server on %s'), broker)
break
self.session = self.connection.session()
if self.consumers:
consumers = self.consumers
self.consumers = {}
for consumer in six.itervalues(consumers):
consumer.reconnect(self.session)
self._register_consumer(consumer)
LOG.debug(_("Re-established AMQP queues"))
def ensure(self, error_callback, method, *args, **kwargs):
while True:
try:
return method(*args, **kwargs)
except (qpid_exceptions.Empty,
qpid_exceptions.ConnectionError) as e:
if error_callback:
error_callback(e)
self.reconnect()
def close(self):
"""Close/release this connection."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
try:
self.connection.close()
except Exception:
# NOTE(dripton) Logging exceptions that happen during cleanup just
# causes confusion; there's really nothing useful we can do with
# them.
pass
self.connection = None
def reset(self):
"""Reset a connection so it can be used again."""
self.cancel_consumer_thread()
self.wait_on_proxy_callbacks()
self.session.close()
self.session = self.connection.session()
self.consumers = {}
def declare_consumer(self, consumer_cls, topic, callback):
"""Create a Consumer using the class that was passed in and
add it to our list of consumers
"""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.error(_("Failed to declare consumer for topic '%(topic)s': "
"%(err_str)s") % log_info)
def _declare_consumer():
consumer = consumer_cls(self.conf, self.session, topic, callback)
self._register_consumer(consumer)
return consumer
return self.ensure(_connect_error, _declare_consumer)
def iterconsume(self, limit=None, timeout=None):
"""Return an iterator that will consume from all queues/consumers."""
def _error_callback(exc):
if isinstance(exc, qpid_exceptions.Empty):
LOG.debug(_('Timed out waiting for RPC response: %s') %
str(exc))
raise rpc_common.Timeout()
else:
LOG.exception(_('Failed to consume message from queue: %s') %
str(exc))
def _consume():
nxt_receiver = self.session.next_receiver(timeout=timeout)
try:
self._lookup_consumer(nxt_receiver).consume()
except Exception:
LOG.exception(_("Error processing message. Skipping it."))
for iteration in itertools.count(0):
if limit and iteration >= limit:
raise StopIteration
yield self.ensure(_error_callback, _consume)
def cancel_consumer_thread(self):
"""Cancel a consumer thread."""
if self.consumer_thread is not None:
self.consumer_thread.kill()
try:
self.consumer_thread.wait()
except greenlet.GreenletExit:
pass
self.consumer_thread = None
def wait_on_proxy_callbacks(self):
"""Wait for all proxy callback threads to exit."""
for proxy_cb in self.proxy_callbacks:
proxy_cb.wait()
def publisher_send(self, cls, topic, msg):
"""Send to a publisher based on the publisher class."""
def _connect_error(exc):
log_info = {'topic': topic, 'err_str': str(exc)}
LOG.exception(_("Failed to publish message to topic "
"'%(topic)s': %(err_str)s") % log_info)
def _publisher_send():
publisher = cls(self.conf, self.session, topic)
publisher.send(msg)
return self.ensure(_connect_error, _publisher_send)
def declare_direct_consumer(self, topic, callback):
"""Create a 'direct' queue.
In nova's use, this is generally a msg_id queue used for
responses for call/multicall
"""
self.declare_consumer(DirectConsumer, topic, callback)
def declare_topic_consumer(self, topic, callback=None, queue_name=None,
exchange_name=None):
"""Create a 'topic' consumer."""
self.declare_consumer(functools.partial(TopicConsumer,
name=queue_name,
exchange_name=exchange_name,
),
topic, callback)
def declare_fanout_consumer(self, topic, callback):
"""Create a 'fanout' consumer."""
self.declare_consumer(FanoutConsumer, topic, callback)
def direct_send(self, msg_id, msg):
"""Send a 'direct' message."""
self.publisher_send(DirectPublisher, msg_id, msg)
def topic_send(self, topic, msg, timeout=None):
"""Send a 'topic' message."""
#
# We want to create a message with attributes, e.g. a TTL. We
# don't really need to keep 'msg' in its JSON format any longer
# so let's create an actual qpid message here and get some
# value-add on the go.
#
# WARNING: Request timeout happens to be in the same units as
# qpid's TTL (seconds). If this changes in the future, then this
# will need to be altered accordingly.
#
qpid_message = qpid_messaging.Message(content=msg, ttl=timeout)
self.publisher_send(TopicPublisher, topic, qpid_message)
def fanout_send(self, topic, msg):
"""Send a 'fanout' message."""
self.publisher_send(FanoutPublisher, topic, msg)
def notify_send(self, topic, msg, **kwargs):
"""Send a notify message on a topic."""
self.publisher_send(NotifyPublisher, topic, msg)
def consume(self, limit=None):
"""Consume from all queues/consumers."""
it = self.iterconsume(limit=limit)
while True:
try:
six.next(it)
except StopIteration:
return
def consume_in_thread(self):
"""Consumer from all queues/consumers in a greenthread."""
@excutils.forever_retry_uncaught_exceptions
def _consumer_thread():
try:
self.consume()
except greenlet.GreenletExit:
return
if self.consumer_thread is None:
self.consumer_thread = eventlet.spawn(_consumer_thread)
return self.consumer_thread
def create_consumer(self, topic, proxy, fanout=False):
"""Create a consumer that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
if fanout:
consumer = FanoutConsumer(self.conf, self.session, topic, proxy_cb)
else:
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb)
self._register_consumer(consumer)
return consumer
def create_worker(self, topic, proxy, pool_name):
"""Create a worker that calls a method in a proxy object."""
proxy_cb = rpc_amqp.ProxyCallback(
self.conf, proxy,
rpc_amqp.get_connection_pool(self.conf, Connection))
self.proxy_callbacks.append(proxy_cb)
consumer = TopicConsumer(self.conf, self.session, topic, proxy_cb,
name=pool_name)
self._register_consumer(consumer)
return consumer
def join_consumer_pool(self, callback, pool_name, topic,
exchange_name=None, ack_on_error=True):
"""Register as a member of a group of consumers for a given topic from
the specified exchange.
Exactly one member of a given pool will receive each message.
A message will be delivered to multiple pools, if more than
one is created.
"""
callback_wrapper = rpc_amqp.CallbackWrapper(
conf=self.conf,
callback=callback,
connection_pool=rpc_amqp.get_connection_pool(self.conf,
Connection),
wait_for_consumers=not ack_on_error
)
self.proxy_callbacks.append(callback_wrapper)
consumer = TopicConsumer(conf=self.conf,
session=self.session,
topic=topic,
callback=callback_wrapper,
name=pool_name,
exchange_name=exchange_name)
self._register_consumer(consumer)
return consumer
def create_connection(conf, new=True):
"""Create a connection."""
return rpc_amqp.create_connection(
conf, new,
rpc_amqp.get_connection_pool(conf, Connection))
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
return rpc_amqp.multicall(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
return rpc_amqp.call(
conf, context, topic, msg, timeout,
rpc_amqp.get_connection_pool(conf, Connection))
def cast(conf, context, topic, msg):
"""Sends a message on a topic without waiting for a response."""
return rpc_amqp.cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast(conf, context, topic, msg):
"""Sends a message on a fanout exchange without waiting for a response."""
return rpc_amqp.fanout_cast(
conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a topic to a specific server."""
return rpc_amqp.cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def fanout_cast_to_server(conf, context, server_params, topic, msg):
"""Sends a message on a fanout exchange to a specific server."""
return rpc_amqp.fanout_cast_to_server(
conf, context, server_params, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection))
def notify(conf, context, topic, msg, envelope):
"""Sends a notification event on a topic."""
return rpc_amqp.notify(conf, context, topic, msg,
rpc_amqp.get_connection_pool(conf, Connection),
envelope)
def cleanup():
return rpc_amqp.cleanup(Connection.pool)
|
|
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
polcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
polcoind and polcoin-cli must be in search path.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
devnull = open(os.devnull, "w")
# Create cache directories, run polcoinds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("polcoind", "polcoind"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
polcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: polcoind started, calling polcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "polcoin-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: polcoin-cli -rpcwait getblockcount completed"
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_polcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a polcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("polcoind", "polcoind")
args = [ binary, "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
polcoind_processes[i] = subprocess.Popen(args)
devnull = open(os.devnull, "w")
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: polcoind started, calling polcoin-cli -rpcwait getblockcount"
subprocess.check_call([ os.getenv("BITCOINCLI", "polcoin-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: calling polcoin-cli -rpcwait getblockcount returned"
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
if timewait is not None:
proxy = AuthServiceProxy(url, timeout=timewait)
else:
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple polcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
polcoind_processes[i].wait()
del polcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_polcoinds():
# Wait for all polcoinds to cleanly exit
for polcoind in polcoind_processes.values():
polcoind.wait()
polcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
|
from __future__ import absolute_import, unicode_literals
import json
import mock
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.contrib.wagtailapi import signal_handlers
from wagtail.wagtailimages import get_image_model
class TestImageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v1:images:listing'), params)
def get_image_id_list(self, content):
return [page['id'] for page in content['images']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_image_model().objects.count())
# Check that the images section is there
self.assertIn('images', content)
self.assertIsInstance(content['images'], list)
# Check that each image has a meta section with type and detail_url attributes
for image in content['images']:
self.assertIn('meta', image)
self.assertIsInstance(image['meta'], dict)
self.assertEqual(set(image['meta'].keys()), {'type', 'detail_url'})
# Type should always be wagtailimages.Image
self.assertEqual(image['meta']['type'], 'wagtailimages.Image')
# Check detail url
self.assertEqual(image['meta']['detail_url'], 'http://localhost/api/v1/images/%d/' % image['id'])
# EXTRA FIELDS
def test_extra_fields_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
for image in content['images']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title'})
def test_extra_fields(self):
response = self.get_response(fields='title,width,height')
content = json.loads(response.content.decode('UTF-8'))
for image in content['images']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_extra_fields_tags(self):
response = self.get_response(fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for image in content['images']:
self.assertEqual(set(image.keys()), {'id', 'meta', 'tags'})
self.assertIsInstance(image['tags'], list)
def test_extra_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='uploaded_by_user')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: uploaded_by_user"})
def test_extra_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='James Joyce')
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list, [5])
def test_filtering_on_id(self):
response = self.get_response(id=10)
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list, [10])
def test_filtering_tags(self):
get_image_model().objects.get(id=6).tags.add('test')
response = self.get_response(tags='test')
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list, [6])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
# ORDERING
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list, [6, 15, 13, 5, 10, 11, 8, 7, 4, 14, 12, 9])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list, [9, 12, 14, 4, 7, 8, 11, 10, 5, 13, 15, 6])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
image_id_list_1 = self.get_image_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
image_id_list_2 = self.get_image_id_list(content_2)
self.assertNotEqual(image_id_list_1, image_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_results_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['images']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_image_model().objects.count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['images']), 2)
# OFFSET
def test_offset_10_usually_appears_7th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list.index(10), 6)
def test_offset_10_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(image_id_list.index(10), 2)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_image_model().objects.count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_james_joyce(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
image_id_list = self.get_image_id_list(content)
self.assertEqual(set(image_id_list), set([5]))
def test_search_when_ordering_gives_error(self):
response = self.get_response(search='james', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ordering with a search query is not supported"})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(search='james', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
class TestImageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, image_id, **params):
return self.client.get(reverse('wagtailapi_v1:images:detail', args=(image_id, )), params)
def test_basic(self):
response = self.get_response(5)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 5)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'wagtailimages.Image')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v1/images/5/')
# Check the title field
self.assertIn('title', content)
self.assertEqual(content['title'], "James Joyce")
# Check the width and height fields
self.assertIn('width', content)
self.assertIn('height', content)
self.assertEqual(content['width'], 500)
self.assertEqual(content['height'], 392)
# Check the tags field
self.assertIn('tags', content)
self.assertEqual(content['tags'], [])
def test_tags(self):
image = get_image_model().objects.get(id=5)
image.tags.add('hello')
image.tags.add('world')
response = self.get_response(5)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('tags', content)
self.assertEqual(content['tags'], ['hello', 'world'])
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestImageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestImageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestImageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_resave_image_purges(self, purge):
get_image_model().objects.get(id=5).save()
purge.assert_any_call('http://api.example.com/api/v1/images/5/')
def test_delete_image_purges(self, purge):
get_image_model().objects.get(id=5).delete()
purge.assert_any_call('http://api.example.com/api/v1/images/5/')
|
|
import sqlite3
import shutil
import glob
import json
import time
import logging
import os
from os.path import join
from itertools import chain
from .get_subreddit_submissions import GetSubredditSubmissions
# utility
from .general_utility \
import slugify, convert_to_readable_time, move_file, \
get_file_extension, shorten_file_path_if_needed, get_datetime_now
from .manage_subreddit_last_id import history_log, process_subreddit_last_id
from colorama import init as colorama_init
from colorama import Fore, Style
from gallery_dl import config as gallery_dl_config
# database
from turbo_palm_tree.database_manager.tpt_database import TPTDatabaseManager
try:
from .image_match_manager import ImageMatchManager
except ImportError:
# image-match is not installed
print("Note: image-match module is not installed.")
# Exceptions
from imgur_downloader.imgurdownloader import (
FileExistsException,
ImgurException)
from urllib.error import HTTPError
from ssl import SSLError
from gallery_dl.exception import NoExtractorError
from turbo_palm_tree.utility.exception import TurboPalmTreeException
# downloaders
from imgur_downloader import ImgurDownloader
from gallery_dl.job import DownloadJob # gfycat, but supports many
from turbo_palm_tree.downloaders.deviantart import download_deviantart_url
colorama_init()
class DownloadSubredditSubmissions(GetSubredditSubmissions):
"""Downloads subreddit submissions, deletes older reposts/duplicate images,
& stores data of each download in db
.. todo:: Make logging log to its own separate file
"""
OS_MAX_PATH_LENGTH = 260
def __init__(self, subreddit, path, sort_type, limit, previous_id=None,
debug=False, disable_db=False, disable_im=False):
# call constructor of GetSubredditSubmissions class passing args
super().__init__(subreddit, path, sort_type, limit, previous_id, debug)
self.log = logging.getLogger('DownloadSubredditSubmissions')
self.Exceptions = (FileExistsException, FileExistsError,
ImgurException, HTTPError, ValueError,
SSLError, NoExtractorError, TurboPalmTreeException)
self.disable_im = disable_im
if not self.disable_im:
# elastic search variables
self.es_index, self.es_doc_type = 'tpt_images', 'image'
# object used to add, search and compare images in elasticsearch
# for duplicate deletion
self.im = ImageMatchManager(index=self.es_index,
doc_type=self.es_doc_type,
distance_cutoff=0.40)
self.disable_db = disable_db
if not self.disable_db:
# get db manager object for inserting and saving data to db
try:
self.db = TPTDatabaseManager()
except sqlite3.OperationalError as e:
self.log.error("{}: {}".format(e.__class__.__name__, str(e)))
self.disable_db = True
# used to check if url ends with any of these
self.image_extensions = ('.png', '.jpg', '.jpeg', '.gif')
video_extensions = ('.webm', '.mp4')
self.media_extensions = tuple(chain(self.image_extensions,
video_extensions))
# prevent gallery-dl module from printing to std output
gallery_dl_config.set(("output",), "mode", "null")
def download(self):
"""Download media from submissions"""
continue_downloading = True
# var limit is constant, self.limit is not constant
limit = self.limit
# counters to keep track of how many submissions downloaded & more
download_count, error_count, skip_count = 0, 0, 0
# load last-id of submission downloaded from or create new file for id
log_filename = '._history.txt'
log_data, prev_id = process_subreddit_last_id(
subreddit=self.subreddit, sort_type=self.sort_type,
dir=self.path, log_file=log_filename, verbose=True)
if not self.previous_id:
self.set_previous_id(prev_id)
# ensures the amount of submissions downloaded from is equal to limit
while continue_downloading:
errors, skips = 0, 0
# get submissions (dict containing info) & use data to download
submissions = self.get_submissions_info()
for submission in submissions:
url = submission['url']
title = submission['title']
# makes an assumption that len(file_extension) <= 5
_, filename = shorten_file_path_if_needed(
slugify(title),
max_length=self.OS_MAX_PATH_LENGTH - len(self.path) - 5)
dl_directory = submission['dl_directory']
submission_id = submission['id']
# filename is '' or None, filename = datetime.now()
if not filename:
_, filename = shorten_file_path_if_needed(
get_datetime_now(),
max_length=
self.OS_MAX_PATH_LENGTH - len(self.path) - 5)
# if an entire imgur album was downloaded,
# filenames will be stored here
final_filenames = []
self.log.info('Attempting to save {} as {}'
.format(url, dl_directory))
# check domain and call corresponding downloader
# download functions or methods
try:
if 'imgur.com' in url:
imgur = ImgurDownloader(imgur_url=url,
dir_download=self.path,
file_name=filename,
delete_dne=True,
debug=False)
final_filenames, skipped = imgur.save_images()
if len(final_filenames) == 1:
filename = final_filenames[0]
dl_directory = os.path.join(
os.path.dirname(dl_directory), filename)
elif 'deviantart.com' in url:
download_deviantart_url(url, dl_directory)
else:
job = DownloadJob(url)
job.run()
# text submission on a subreddit
if job.pathfmt is None:
raise TurboPalmTreeException(
'No path for gallery-dl DownloadJob\n'
'\turl = {}'.format(url))
dl_directory = os.path.abspath(job.pathfmt.path)
dl_directory = move_file(
dl_directory,
join(self.path,
filename + get_file_extension(dl_directory)))
print('downloaded: {title}; {url}'
.format(title=filename, url=url))
# get time if file is created, else just use the time now
if dl_directory and os.path.exists(dl_directory):
creation_time = os.path.getctime(dl_directory)
else:
creation_time = time.time()
if not self.disable_im:
metadata = {'source_url': url,
'creation_time': creation_time}
# add img, locate & delete older duplicates
self.im.delete_duplicates(dl_directory, metadata=metadata)
if not self.disable_db:
# add some data to dict insert data into database
submission['download_date'] = convert_to_readable_time(
creation_time)
self.db.insert(submission)
except self.Exceptions as e:
msg = '{}: {}'.format(type(e).__name__, e.args)
self.log.warning(msg)
print(Fore.RED + msg + Style.RESET_ALL)
errors += 1
except KeyboardInterrupt:
msg = 'KeyboardInterrupt caught, exiting program'
self.log.info(msg)
print(msg)
continue_downloading = False
break
# update previous id downloaded
if 'submission_id' in locals().keys():
self.set_previous_id(submission_id)
# update count of media successfully downloaded
download_count += self.limit - errors - skips
error_count += errors
skip_count += skips
# update attribute limit which is used when getting submissions
if download_count < limit:
self.set_limit(limit - download_count)
elif download_count >= limit or not continue_downloading:
if 'submission_id' in locals().keys():
log_data[self.subreddit][self.sort_type]['last-id'] = \
submission_id
history_log(self.path, log_filename, 'write', log_data)
continue_downloading = False
# continue_downloading is false
if not self.disable_db:
self.db.close()
self._cleanup_files()
print("{}{} errors occured".format(Fore.YELLOW, error_count))
print("{}Downloaded from {} submissions from {}/{}{reset}"
.format(Fore.GREEN, download_count, self.subreddit,
self.sort_type, reset=Style.RESET_ALL))
@staticmethod
def _cleanup_files():
"""Remove gallery-dl folder if it's there"""
for path in glob.glob(os.path.join(os.getcwd(), '*')):
if os.path.basename(path) == 'gallery-dl':
try:
shutil.rmtree(path)
except OSError:
pass
break
@staticmethod
def write_to_file(path=os.path.join(os.getcwd(), str(int(time.time()))),
data=None):
"""
:param path: path (including filename) of file that's to be written to
:param data: data that gets written in file
"""
if not data:
return
with open(path, 'w') as f:
f.write(json.dumps(data))
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for converting parsed doc content into markdown pages."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def build_md_page(page_info):
"""Given a PageInfo object, return markdown for the page.
Args:
page_info: must be a `parser.FunctionPageInfo`, `parser.ClassPageInfo`, or
`parser.ModulePageInfo`
Returns:
Markdown for the page
Raises:
ValueError: if `page_info` is an instance of an unrecognized class
"""
if page_info.for_function():
return _build_function_page(page_info)
if page_info.for_class():
return _build_class_page(page_info)
if page_info.for_module():
return _build_module_page(page_info)
raise ValueError('Unknown Page Info Type: %s' % type(page_info))
def _build_function_page(page_info):
"""Given a FunctionPageInfo object Return the page as an md string."""
parts = ['# %s\n\n' % page_info.full_name]
if page_info.aliases:
parts.extend('### `%s`\n' % name
for name in page_info.aliases)
parts.append('\n')
if page_info.signature is not None:
parts.append(_build_signature(page_info))
if page_info.defined_in:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
parts.append(_build_compatibility(page_info.doc.compatibility))
return ''.join(parts)
def _build_class_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# {page_info.full_name}\n\n'.format(page_info=page_info)]
if page_info.aliases:
parts.extend('### `class %s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.guides)
parts.append(page_info.doc.docstring)
parts.append(_build_function_details(page_info.doc.function_details))
assert not page_info.doc.compatibility
parts.append('\n\n')
if page_info.classes:
parts.append('## Child Classes\n')
link_template = ('[`class {class_info.short_name}`]'
'({class_info.url})\n\n')
class_links = sorted(
link_template.format(class_info=class_info)
for class_info in page_info.classes)
parts.extend(class_links)
if page_info.properties:
parts.append('## Properties\n\n')
for prop_info in sorted(page_info.properties):
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
parts.append(h3.format(short_name=prop_info.short_name))
parts.append(prop_info.doc.docstring)
parts.append(_build_function_details(prop_info.doc.function_details))
assert not prop_info.doc.compatibility
parts.append('\n\n')
parts.append('\n\n')
if page_info.methods:
parts.append('## Methods\n\n')
# Sort the methods list, but make sure constructors come first.
constructors = ['__init__', '__new__']
inits = [method for method in page_info.methods
if method.short_name in constructors]
others = [method for method in page_info.methods
if method.short_name not in constructors]
for method_info in sorted(inits) + sorted(others):
h3 = ('<h3 id="{short_name}">'
'<code>{short_name}</code>'
'</h3>\n\n')
parts.append(h3.format(**method_info.__dict__))
if method_info.signature is not None:
parts.append(_build_signature(method_info))
parts.append(method_info.doc.docstring)
parts.append(_build_function_details(method_info.doc.function_details))
parts.append(_build_compatibility(method_info.doc.compatibility))
parts.append('\n\n')
parts.append('\n\n')
if page_info.other_members:
parts.append('## Class Members\n\n')
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
h3 = '<h3 id="{short_name}"><code>{short_name}</code></h3>\n\n'
others_member_headings = (h3.format(short_name=info.short_name)
for info in sorted(page_info.other_members))
parts.extend(others_member_headings)
return ''.join(parts)
def _build_module_page(page_info):
"""Given a ClassPageInfo object Return the page as an md string."""
parts = ['# Module: {full_name}\n\n'.format(full_name=page_info.full_name)]
if page_info.aliases:
parts.extend('### Module `%s`\n' % name for name in page_info.aliases)
parts.append('\n')
if page_info.defined_in is not None:
parts.append('\n\n')
parts.append(str(page_info.defined_in))
parts.append(page_info.doc.docstring)
parts.append('\n\n')
if page_info.modules:
parts.append('## Modules\n\n')
template = '[`{short_name}`]({url}) module'
for item in page_info.modules:
parts.append(template.format(**item.__dict__))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.classes:
parts.append('## Classes\n\n')
template = '[`class {short_name}`]({url})'
for item in page_info.classes:
parts.append(template.format(**item.__dict__))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.functions:
parts.append('## Functions\n\n')
template = '[`{short_name}(...)`]({url})'
for item in page_info.functions:
parts.append(template.format(**item.__dict__))
if item.doc.brief:
parts.append(': ' + item.doc.brief)
parts.append('\n\n')
if page_info.other_members:
# TODO(markdaoust): Document the value of the members,
# at least for basic types.
parts.append('## Other Members\n\n')
for item in page_info.other_members:
parts.append('`{short_name}`\n\n'.format(**item.__dict__))
return ''.join(parts)
def _build_signature(obj_info):
"""Returns a md code block showing the function signature."""
signature_template = '\n'.join([
'``` python',
'{name}({sig})',
'```\n\n'])
if not obj_info.signature:
sig = ''
elif len(obj_info.signature) == 1:
sig = obj_info.signature[0]
else:
sig = ',\n'.join(' %s' % sig_item for sig_item in obj_info.signature)
sig = '\n'+sig+'\n'
return signature_template.format(name=obj_info.short_name, sig=sig)
def _build_compatibility(compatibility):
"""Return the compatibility section as an md string."""
parts = []
sorted_keys = sorted(compatibility.keys())
for key in sorted_keys:
value = compatibility[key]
parts.append('\n\n#### %s compatibility\n%s\n' % (key, value))
return ''.join(parts)
def _build_function_details(function_details):
"""Return the function details section as an md string."""
parts = []
for detail in function_details:
sub = []
sub.append('#### ' + detail.keyword + ':\n\n')
sub.append(detail.header)
for key, value in detail.items:
sub.append('* <b>`%s`</b>:%s' % (key, value))
parts.append(''.join(sub))
return '\n'.join(parts)
|
|
"""
Django settings for composersCouch project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = os.environ.get('SECRET_KEY', 'k$s+jts3d$349yo&ojfqo1wvs!f##2w!p&h$4&qd$uz_5&a7%q')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
DEVELOPMENT = os.environ.get('DEVELOPMENT', False)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = DEVELOPMENT
ALLOWED_HOSTS = []
# Application definition
SITE_ID = 1
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'autocomplete_light',
'jinja2',
'pipeline',
'social.apps.django_app.default',
'robots',
#'test_without_migrations',
'accounts',
'appointments',
'drchronoAPI',
'pipeline_jinja2',
'social_auth_drchrono',
'utilities',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.gzip.GZipMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'pipeline_jinja2.middleware.MinifyHTMLMiddleware',
)
ROOT_URLCONF = 'appointment_booking_drchrono.urls'
WSGI_APPLICATION = 'appointment_booking_drchrono.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
if DEVELOPMENT:
POSTGIS_VERSION = (2, 1, 4)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
DEFAULT_FROM_EMAIL = 'testing@example.com'
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'pipeline.storage.PipelineStorage'
MEDIA_URL = '/media/'
PIPELINE_COMPILERS = 'pipeline.compilers.sass.SASSCompiler',
PIPELINE_CSS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_CSS = {
'sass': {
'source_filenames': (
'stylesheets/theme.scss',
),
'output_filename': 'stylesheets/style.min.css',
'extra_context': {
'media': 'screen',
},
},
}
else:
# TODO: add production settings
pass
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.environ.get('DB_NAME', 'appointmentsDB'),
'OPTIONS': {
'options': '-c search_path=gis,public,pg_catalog'
},
'USER': os.environ.get('DS_USERNAME', 'postgres'),
'PASSWORD': os.environ.get('DS_PASSWORD', 'devDatabase'),
'HOST': os.environ.get('DS_HOSTNAME', 'localhost'),
'PORT': os.environ.get('DS_PORT', ''),
'ATOMIC_REQUESTS': True,
}
}
SITE_ID = 1
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
TIME_ZONE = 'UCT'
USE_TZ = True
MEDIA_ROOT = os.path.join(BASE_DIR, 'appointment_booking_drchrono/media')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = os.path.join( BASE_DIR, 'appointment_booking_drchrono/staticfiles/' )
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join( BASE_DIR, 'appointment_booking_drchrono/static' ),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
# Templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'DIRS': [os.path.join(os.path.dirname(__file__), 'templates').replace('\\','/'),],
'APP_DIRS': True,
'OPTIONS': {
'environment' : 'appointment_booking_drchrono.jinja2.environment',
}
},
]
FILE_UPLOAD_HANDLERS = (
"django.core.files.uploadhandler.MemoryFileUploadHandler",
"django.core.files.uploadhandler.TemporaryFileUploadHandler",
)
AUTHENTICATION_BACKENDS = (
'social_auth_drchrono.backends.drchronoOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.csrf',
'django.core.context_processors.request',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
# over ride user defaults
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: "/redirect/%s/" % u.username,
}
PIPELINE_ENABLED= True
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.yuglify.YuglifyCompressor'
PIPELINE_JS = {
'scripts': {
'source_filenames': (
'scripts/includes/jquery-1.11.0.min.js',
'scripts/includes/fastclick.js',
'scripts/includes/foundation.min.js',
'scripts/includes/foundation-datepicker.js',
'scripts/includes/jquery.floatThead.js',
'scripts/appointments.js',
),
'output_filename': 'scripts/scripts.min.js',
'extra_context': {
'async': True,
},
}
}
ROBOTS_SITEMAP_URLS = [
'http://www.composerscouch.com/sitemap.xml',
]
try:
from accounts.settings import *
except ImportError:
pass
|
|
'''
Helper functions
'''
from datetime import datetime as dt
from datetime import timedelta as td
import re
from InfluxThreader import InfluxThreader
import traceback
def getInfluxStr(**args):
try:
response = args['rsp']
except Exception as e:
pass
hostname = args['hst']
system = args['stm']
nanodate = args['ndate']
responsedb = args['measure']
if args['log'] == "DB":
operation = args['op']
table = args['tbl']
JSON = {
"measurement" : responsedb,
"tags": {
"host": hostname,
"system": system,
"operation" : operation,
"table": table
},
"time": nanodate,
"fields": {
"value": int(response)
}
}
elif args['log'] == "VODS":
function = args['func']
category = args['cat']
JSON = {
"measurement" : responsedb,
"tags": {
"host": hostname,
"function": function,
"system": system,
"category": category
},
"time": nanodate,
"fields": {
"value": int(response)
}
}
elif args['log'] == 'gomezthread':
cipher = args['cipher']
protocol = args['proto']
platform = args['pform']
version = args['v']
JSON = {
"measurement" : responsedb,
"tags": {
"host": hostname,
"system": system,
"cipher": cipher,
"protocol": protocol,
"platform": platform,
"version": version
},
"time": nanodate,
"fields": {
"value": int(1)
}
}
elif args['log'] == 'gomezqueue':
function = args['func']
JSON = {
"measurement" : responsedb,
"tags": {
"host" : hostname,
"system": system,
"function": function
},
"time": nanodate,
"fields": {
"value": int(response)
}
}
else:
function = args['func']
JSON = {
"measurement" : responsedb,
"tags": {
"host": hostname,
"system": system,
"function": function
},
"time": nanodate,
"fields": {
"value": int(response)
}
}
#print JSON
return JSON
def getFunction(line, log):
line = line.replace('[', '(')
if "call" in line.split(log)[1]:
function = re.sub(' ', '', line.split(log)[1].split("call")[0].split("(")[0])
else:
function = re.sub(' ', '', line.split(log)[2].split("call")[0].split("(")[0])
return function
def getDbStuff(line):
dbOps = {'insert': 'into', 'select': 'from', 'delete': 'from', 'update': 'update'}
for ops in dbOps.keys():
if ops in line.lower():
operation = ops
if dbOps[ops] in line.lower():
table = line.lower().split(dbOps[ops])[1].split()[0].split("(")[0].split(")")[0]
else:
table = line.lower().split('insert')[1].split()[0].split("(")[0].split(")")[0]
break
return operation, table
def convertDate(dateStr):
dateFormat = "%Y-%m-%d %H:%M:%S,%f"
influxFormat = "%Y-%m-%dT%H:%M:%S.%fZ"
myDate = dateStr[1:-1]
influxDate = (dt.strptime(myDate, dateFormat) - td(hours=2)).strftime(influxFormat)
return influxDate
def getTheLine(obj, measure, line, queue=None, node=None):
try:
response = re.sub('ms', '', line.rsplit(None, 1)[-1])
except Exception as e:
pass
hostname = line.split()[3]
system = hostname.split('-')[1]
splitStr = "MSG" if "_MSG " in line else "TIME"
nanodate = re.sub('\n', '', convertDate(line.split(splitStr)[1].split("DEBUG")[0]))
if measure == 'gomezthread':
cipher = re.search('Cipher Suite\[(.+?)\]', line).group(1)
protocol = re.search('Protocol Version\[(.+?)\]', line).group(1)
version = re.search('Message Version\[(.+?)\]', line).group(1)
platform = re.search('Device Platform\[(.+?)\]', line).group(1)
influx = getInfluxStr(measure=measure, hst=hostname, stm=system, ndate=nanodate, cipher=cipher,proto=protocol, v=version, pform=platform, log=measure)
return influx
elif measure == 'gomezqueue':
function = 'n' + node + "." + queue.split()[0]
try:
response = re.findall(r"\['?([0-9]+)'?\]", line.rsplit(None, 1)[-1])[0]
except:
response = int(1)
influx = getInfluxStr(measure=measure, rsp=response, hst=hostname, stm=system, ndate=nanodate, func=function, log=measure)
return influx
else:
for log in obj:
if log in line:
if log == "DB":
operation, table = getDbStuff(line)
influx = getInfluxStr(measure=measure[log], rsp=response, hst=hostname, stm=system, ndate=nanodate, op=operation, tbl=table, log=log)
return influx
break
elif log == "VODS" and 'XAVIER' not in line:
function = getFunction(line, log)
category = function.split('.')[0][:3].lower()
influx = getInfluxStr(measure=measure[log], rsp=response, hst=hostname, stm=system, ndate=nanodate, cat=category, func=function, log=log)
return influx
break
else:
function = getFunction(line, log)
influx = getInfluxStr(measure=measure[log], rsp=response, hst=hostname, stm=system, ndate=nanodate, func=function, log=log)
return influx
break
def processlines(obj, dbhost, dbport):
#logs = [XAVIER, VODS, etc]
#measure = {VODS: vodsresponses, CYCLOPS: cyclopsresponses, etc}
logs = obj.logs
measure = obj.dbDict
dataList = []
for line in obj.full:
try:
sendToInflux = getTheLine(logs, measure, line)
dataList.append(sendToInflux)
except Exception as e:
print "ERROR in processlines(): %s\n" % e
print "--- %s" % line
#traceback.print_exc(file=sys.stdout)
#raise e
pass
try:
threading = InfluxThreader(dbhost, dbport, dataList)
except Exception as e:
print "ERROR in processlines().threader: %s" % e
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def chunkyList(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
|
|
ground_truth = (
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"open dock drawer",
(
"dock-body",
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","dock-body")),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_5_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"move object",
(
"c1",
"right",
(),
(),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c1")),
),
"press dock toggle",
(
1.000000,
"dock-body_7_2",
2.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c1")),
),
"move object",
(
"c3",
"dock-body_5_1",
(
(0.999870, 0.016083, 0.000838, ),
(-0.016084, 0.999871, 0.000255, ),
(-0.000833, -0.000268, 1.000000, ),
),
(
(-0.408389, ),
(-0.041364, ),
(0.133196, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","c1")),
),
"move object",
(
"c1",
"dock-body_7_1",
(
(0.999948, -0.010235, 0.000229, ),
(0.010236, 0.999948, -0.000412, ),
(-0.000225, 0.000414, 1.000000, ),
),
(
(-0.455108, ),
(0.035197, ),
(0.106850, ),
),
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_7_2",
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"press dock toggle",
(
2.000000,
"dock-body_5_2",
1.000000,
)
),
(
(
("workspace", "Workspace"),
("table", "Block"),
("dock-case", "DockCase"),
("dock-case_1", "Block"),
("dock-body", "DockDrawer"),
("dock-body_2", "DockFrontPanel"),
("dock-body_2_1", "Prism"),
("dock-body_2_2", "Block"),
("dock-body_2_3", "Block"),
("dock-body_4", "DockHandle"),
("dock-body_4_1", "Prism"),
("dock-body_4_2", "Prism"),
("dock-body_5", "DockModule"),
("dock-body_5_1", "DockSlot"),
("c1", "Cartridge"),
("dock-body_5_2", "DockSwitch"),
("dock-body_5_3", "DockLED"),
("dock-body_6", "DockModule"),
("dock-body_6_1", "DockSlot"),
("dock-body_6_2", "DockSwitch"),
("dock-body_6_3", "DockLED"),
("dock-body_7", "DockModule"),
("dock-body_7_1", "DockSlot"),
("c3", "Cartridge"),
("dock-body_7_2", "DockSwitch"),
("dock-body_7_3", "DockLED"),
("dock-body_8", "DockModule"),
("dock-body_8_1", "DockSlot"),
("dock-body_8_2", "DockSwitch"),
("dock-body_8_3", "DockLED"),
("dock-case_2", "Block"),
("dock-case_3", "Block"),
("dock-case_4", "Block"),
("dock-case_5", "Prism"),
("dock-case_6", "Block"),
("c5", "Cartridge"),
("discard-bin", "Block"),
("gripping", ("nothing","nothing")),
),
"close dock drawer",
(
"dock-body",
)
),
)
|
|
# Copyright 2019 Nexenta by DDN, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import posixpath
from oslo_log import log
from oslo_utils import units
from manila.common import constants as common
from manila import exception
from manila.i18n import _
from manila.share import driver
from manila.share.drivers.nexenta.ns5 import jsonrpc
from manila.share.drivers.nexenta import options
from manila.share.drivers.nexenta import utils
VERSION = '1.1'
LOG = log.getLogger(__name__)
ZFS_MULTIPLIER = 1.1 # ZFS quotas do not take metadata into account.
class NexentaNasDriver(driver.ShareDriver):
"""Nexenta Share Driver.
Executes commands relating to Shares.
API version history:
1.0 - Initial version.
1.1 - Failover support.
- Unshare filesystem completely after last securityContext
is removed.
- Moved all http/url code to jsonrpc.
- Manage existing support.
- Revert to snapshot support.
"""
driver_prefix = 'nexenta'
def __init__(self, *args, **kwargs):
"""Do initialization."""
LOG.debug('Initializing Nexenta driver.')
super(NexentaNasDriver, self).__init__(False, *args, **kwargs)
self.configuration = kwargs.get('configuration')
if self.configuration:
self.configuration.append_config_values(
options.nexenta_connection_opts)
self.configuration.append_config_values(
options.nexenta_nfs_opts)
self.configuration.append_config_values(
options.nexenta_dataset_opts)
else:
raise exception.BadConfigurationException(
reason=_('Nexenta configuration missing.'))
self.nef = None
self.verify_ssl = self.configuration.nexenta_ssl_cert_verify
self.nas_host = self.configuration.nexenta_nas_host
self.nef_port = self.configuration.nexenta_rest_port
self.nef_user = self.configuration.nexenta_user
self.nef_password = self.configuration.nexenta_password
self.pool_name = self.configuration.nexenta_pool
self.parent_fs = self.configuration.nexenta_folder
self.nfs_mount_point_base = self.configuration.nexenta_mount_point_base
self.dataset_compression = (
self.configuration.nexenta_dataset_compression)
self.provisioned_capacity = 0
@property
def storage_protocol(self):
protocol = ''
if self.configuration.nexenta_nfs:
protocol = 'NFS'
else:
msg = _('At least 1 storage protocol must be enabled.')
raise exception.NexentaException(msg)
return protocol
@property
def root_path(self):
return posixpath.join(self.pool_name, self.parent_fs)
@property
def share_backend_name(self):
if not hasattr(self, '_share_backend_name'):
self._share_backend_name = None
if self.configuration:
self._share_backend_name = self.configuration.safe_get(
'share_backend_name')
if not self._share_backend_name:
self._share_backend_name = 'NexentaStor5'
return self._share_backend_name
def do_setup(self, context):
self.nef = jsonrpc.NefProxy(self.storage_protocol,
self.root_path,
self.configuration)
def check_for_setup_error(self):
"""Check root filesystem, NFS service and NFS share."""
filesystem = self.nef.filesystems.get(self.root_path)
if filesystem['mountPoint'] == 'none':
message = (_('NFS root filesystem %(path)s is not writable')
% {'path': filesystem['mountPoint']})
raise jsonrpc.NefException(code='ENOENT', message=message)
if not filesystem['isMounted']:
message = (_('NFS root filesystem %(path)s is not mounted')
% {'path': filesystem['mountPoint']})
raise jsonrpc.NefException(code='ENOTDIR', message=message)
payload = {}
if filesystem['nonBlockingMandatoryMode']:
payload['nonBlockingMandatoryMode'] = False
if filesystem['smartCompression']:
payload['smartCompression'] = False
if payload:
self.nef.filesystems.set(self.root_path, payload)
service = self.nef.services.get('nfs')
if service['state'] != 'online':
message = (_('NFS server service is not online: %(state)s')
% {'state': service['state']})
raise jsonrpc.NefException(code='ESRCH', message=message)
self._get_provisioned_capacity()
def _get_provisioned_capacity(self):
payload = {'fields': 'referencedQuotaSize'}
self.provisioned_capacity += self.nef.filesystems.get(
self.root_path, payload)['referencedQuotaSize']
def ensure_share(self, context, share, share_server=None):
pass
def create_share(self, context, share, share_server=None):
"""Create a share."""
LOG.debug('Creating share: %s.', self._get_share_name(share))
dataset_path = self._get_dataset_path(share)
size = int(share['size'] * units.Gi * ZFS_MULTIPLIER)
payload = {
'recordSize': self.configuration.nexenta_dataset_record_size,
'compressionMode': self.dataset_compression,
'path': dataset_path,
'referencedQuotaSize': size,
'nonBlockingMandatoryMode': False
}
if not self.configuration.nexenta_thin_provisioning:
payload['referencedReservationSize'] = size
self.nef.filesystems.create(payload)
try:
mount_path = self._mount_filesystem(share)
except jsonrpc.NefException as create_error:
try:
payload = {'force': True}
self.nef.filesystems.delete(dataset_path, payload)
except jsonrpc.NefException as delete_error:
LOG.debug('Failed to delete share %(path)s: %(error)s',
{'path': dataset_path, 'error': delete_error})
raise create_error
self.provisioned_capacity += share['size']
location = {
'path': mount_path,
'id': self._get_share_name(share)
}
return [location]
def _mount_filesystem(self, share):
"""Ensure that filesystem is activated and mounted on the host."""
dataset_path = self._get_dataset_path(share)
payload = {'fields': 'mountPoint,isMounted'}
filesystem = self.nef.filesystems.get(dataset_path, payload)
if filesystem['mountPoint'] == 'none':
payload = {'datasetName': dataset_path}
self.nef.hpr.activate(payload)
filesystem = self.nef.filesystems.get(dataset_path, payload)
elif not filesystem['isMounted']:
self.nef.filesystems.mount(dataset_path)
return '%s:%s' % (self.nas_host, filesystem['mountPoint'])
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Is called to create share from snapshot."""
snapshot_path = self._get_snapshot_path(snapshot)
LOG.debug('Creating share from snapshot %s.', snapshot_path)
clone_path = self._get_dataset_path(share)
size = int(share['size'] * units.Gi * ZFS_MULTIPLIER)
payload = {
'targetPath': clone_path,
'referencedQuotaSize': size,
'recordSize': self.configuration.nexenta_dataset_record_size,
'compressionMode': self.dataset_compression,
'nonBlockingMandatoryMode': False
}
if not self.configuration.nexenta_thin_provisioning:
payload['referencedReservationSize'] = size
self.nef.snapshots.clone(snapshot_path, payload)
self._remount_filesystem(clone_path)
self.provisioned_capacity += share['size']
try:
mount_path = self._mount_filesystem(share)
except jsonrpc.NefException as create_error:
try:
payload = {'force': True}
self.nef.filesystems.delete(clone_path, payload)
except jsonrpc.NefException as delete_error:
LOG.debug('Failed to delete share %(path)s: %(error)s',
{'path': clone_path, 'error': delete_error})
raise create_error
location = {
'path': mount_path,
'id': self._get_share_name(share)
}
return [location]
def _remount_filesystem(self, clone_path):
"""Workaround for NEF bug: cloned share has offline NFS status"""
self.nef.filesystems.unmount(clone_path)
self.nef.filesystems.mount(clone_path)
def _get_dataset_path(self, share):
share_name = self._get_share_name(share)
return posixpath.join(self.root_path, share_name)
def _get_share_name(self, share):
"""Get share name with share name prefix."""
return ('%(prefix)s%(share_id)s' % {
'prefix': self.configuration.nexenta_share_name_prefix,
'share_id': share['share_id']})
def _get_snapshot_path(self, snapshot):
"""Return ZFS snapshot path for the snapshot."""
snapshot_id = (
snapshot['snapshot_id'] or snapshot['share_group_snapshot_id'])
share = snapshot.get('share') or snapshot.get('share_instance')
fs_path = self._get_dataset_path(share)
return '%s@snapshot-%s' % (fs_path, snapshot_id)
def delete_share(self, context, share, share_server=None):
"""Delete a share."""
LOG.debug('Deleting share: %s.', self._get_share_name(share))
share_path = self._get_dataset_path(share)
delete_payload = {'force': True, 'snapshots': True}
try:
self.nef.filesystems.delete(share_path, delete_payload)
except jsonrpc.NefException as error:
if error.code != 'EEXIST':
raise error
snapshots_tree = {}
snapshots_payload = {'parent': share_path, 'fields': 'path'}
snapshots = self.nef.snapshots.list(snapshots_payload)
for snapshot in snapshots:
clones_payload = {'fields': 'clones,creationTxg'}
data = self.nef.snapshots.get(snapshot['path'], clones_payload)
if data['clones']:
snapshots_tree[data['creationTxg']] = data['clones'][0]
if snapshots_tree:
clone_path = snapshots_tree[max(snapshots_tree)]
self.nef.filesystems.promote(clone_path)
self.nef.filesystems.delete(share_path, delete_payload)
self.provisioned_capacity -= share['size']
def extend_share(self, share, new_size, share_server=None):
"""Extends a share."""
LOG.debug(
'Extending share: %(name)s to %(size)sG.', (
{'name': self._get_share_name(share), 'size': new_size}))
self._set_quota(share, new_size)
if not self.configuration.nexenta_thin_provisioning:
self._set_reservation(share, new_size)
self.provisioned_capacity += (new_size - share['size'])
def shrink_share(self, share, new_size, share_server=None):
"""Shrinks size of existing share."""
LOG.debug(
'Shrinking share: %(name)s to %(size)sG.', {
'name': self._get_share_name(share), 'size': new_size})
share_path = self._get_dataset_path(share)
share_data = self.nef.filesystems.get(share_path)
used = share_data['bytesUsedBySelf'] / units.Gi
if used > new_size:
raise exception.ShareShrinkingPossibleDataLoss(
share_id=self._get_share_name(share))
if not self.configuration.nexenta_thin_provisioning:
self._set_reservation(share, new_size)
self._set_quota(share, new_size)
self.provisioned_capacity += (share['size'] - new_size)
def create_snapshot(self, context, snapshot, share_server=None):
"""Create a snapshot."""
snapshot_path = self._get_snapshot_path(snapshot)
LOG.debug('Creating snapshot: %s.', snapshot_path)
payload = {'path': snapshot_path}
self.nef.snapshots.create(payload)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot.
:param snapshot: snapshot reference
"""
snapshot_path = self._get_snapshot_path(snapshot)
LOG.debug('Deleting snapshot: %s.', snapshot_path)
payload = {'defer': True}
self.nef.snapshots.delete(snapshot_path, payload)
def revert_to_snapshot(self, context, snapshot, share_access_rules,
snapshot_access_rules, share_server=None):
"""Reverts a share (in place) to the specified snapshot.
Does not delete the share snapshot. The share and snapshot must both
be 'available' for the restore to be attempted. The snapshot must be
the most recent one taken by Manila; the API layer performs this check
so the driver doesn't have to.
The share must be reverted in place to the contents of the snapshot.
Application admins should quiesce or otherwise prepare the application
for the shared file system contents to change suddenly.
:param context: Current context
:param snapshot: The snapshot to be restored
:param share_access_rules: List of all access rules for the affected
share
:param snapshot_access_rules: List of all access rules for the affected
snapshot
:param share_server: Optional -- Share server model or None
"""
snapshot_path = self._get_snapshot_path(snapshot).split('@')[1]
LOG.debug('Reverting to snapshot: %s.', snapshot_path)
share_path = self._get_dataset_path(snapshot['share'])
payload = {'snapshot': snapshot_path}
self.nef.filesystems.rollback(share_path, payload)
def manage_existing(self, share, driver_options):
"""Brings an existing share under Manila management.
If the provided share is not valid, then raise a
ManageInvalidShare exception, specifying a reason for the failure.
If the provided share is not in a state that can be managed, such as
being replicated on the backend, the driver *MUST* raise
ManageInvalidShare exception with an appropriate message.
The share has a share_type, and the driver can inspect that and
compare against the properties of the referenced backend share.
If they are incompatible, raise a
ManageExistingShareTypeMismatch, specifying a reason for the failure.
:param share: Share model
:param driver_options: Driver-specific options provided by admin.
:return: share_update dictionary with required key 'size',
which should contain size of the share.
"""
LOG.debug('Manage share %s.', self._get_share_name(share))
export_path = share['export_locations'][0]['path']
# check that filesystem with provided export exists.
fs_path = export_path.split(':/')[1]
fs_data = self.nef.filesystems.get(fs_path)
if not fs_data:
# wrong export path, raise exception.
msg = _('Share %s does not exist on Nexenta Store appliance, '
'cannot manage.') % export_path
raise exception.NexentaException(msg)
# get dataset properties.
if fs_data['referencedQuotaSize']:
size = (fs_data['referencedQuotaSize'] / units.Gi) + 1
else:
size = fs_data['bytesReferenced'] / units.Gi + 1
# rename filesystem on appliance to correlate with manila ID.
new_path = '%s/%s' % (self.root_path, self._get_share_name(share))
self.nef.filesystems.rename(fs_path, {'newPath': new_path})
# make sure quotas and reservations are correct.
if not self.configuration.nexenta_thin_provisioning:
self._set_reservation(share, size)
self._set_quota(share, size)
return {'size': size, 'export_locations': [{
'path': '%s:/%s' % (self.nas_host, new_path)
}]}
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
Using access_rules list for both adding and deleting rules.
:param context: The `context.RequestContext` object for the request
:param share: Share that will have its access rules updated.
:param access_rules: All access rules for given share. This list
is enough to update the access rules for given share.
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules. Not used by this
driver.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules. Not used by
this driver.
:param share_server: Data structure with share server information.
Not used by this driver.
"""
LOG.debug('Updating access to share %(id)s with following access '
'rules: %(rules)s', {
'id': self._get_share_name(share),
'rules': [(
rule.get('access_type'), rule.get('access_level'),
rule.get('access_to')) for rule in access_rules]})
rw_list = []
ro_list = []
update_dict = {}
if share['share_proto'] == 'NFS':
for rule in access_rules:
if rule['access_type'].lower() != 'ip':
msg = _(
'Only IP access control type is supported for NFS.')
LOG.warning(msg)
update_dict[rule['access_id']] = {
'state': 'error',
}
else:
update_dict[rule['access_id']] = {
'state': 'active',
}
if rule['access_level'] == common.ACCESS_LEVEL_RW:
rw_list.append(rule['access_to'])
else:
ro_list.append(rule['access_to'])
self._update_nfs_access(share, rw_list, ro_list)
return update_dict
def _update_nfs_access(self, share, rw_list, ro_list):
# Define allowed security context types to be able to tell whether
# the 'security_contexts' dict contains any rules at all
context_types = {'none', 'root', 'readOnlyList', 'readWriteList'}
security_contexts = {'securityModes': ['sys']}
def add_sc(addr_list, sc_type):
if sc_type not in context_types:
return
rule_list = []
for addr in addr_list:
address_mask = addr.strip().split('/', 1)
address = address_mask[0]
ls = {"allow": True, "etype": "fqdn", "entity": address}
if len(address_mask) == 2:
mask = int(address_mask[1])
if 0 <= mask < 31:
ls['mask'] = mask
ls['etype'] = 'network'
rule_list.append(ls)
# Context type with no addresses will result in an API error
if rule_list:
security_contexts[sc_type] = rule_list
add_sc(rw_list, 'readWriteList')
add_sc(ro_list, 'readOnlyList')
payload = {'securityContexts': [security_contexts]}
share_path = self._get_dataset_path(share)
if self.nef.nfs.list({'filesystem': share_path}):
if not set(security_contexts.keys()) & context_types:
self.nef.nfs.delete(share_path)
else:
self.nef.nfs.set(share_path, payload)
else:
payload['filesystem'] = share_path
self.nef.nfs.create(payload)
payload = {
'flags': ['file_inherit', 'dir_inherit'],
'permissions': ['full_set'],
'principal': 'everyone@',
'type': 'allow'
}
self.nef.filesystems.acl(share_path, payload)
def _set_quota(self, share, new_size):
quota = int(new_size * units.Gi * ZFS_MULTIPLIER)
share_path = self._get_dataset_path(share)
payload = {'referencedQuotaSize': quota}
LOG.debug('Setting quota for dataset %s.', share_path)
self.nef.filesystems.set(share_path, payload)
def _set_reservation(self, share, new_size):
res_size = int(new_size * units.Gi * ZFS_MULTIPLIER)
share_path = self._get_dataset_path(share)
payload = {'referencedReservationSize': res_size}
self.nef.filesystems.set(share_path, payload)
def _update_share_stats(self, data=None):
super(NexentaNasDriver, self)._update_share_stats()
total, free, allocated = self._get_capacity_info()
compression = not self.dataset_compression == 'off'
data = {
'vendor_name': 'Nexenta',
'storage_protocol': self.storage_protocol,
'share_backend_name': self.share_backend_name,
'nfs_mount_point_base': self.nfs_mount_point_base,
'driver_version': VERSION,
'snapshot_support': True,
'create_share_from_snapshot_support': True,
'revert_to_snapshot_support': True,
'pools': [{
'pool_name': self.pool_name,
'compression': compression,
'total_capacity_gb': total,
'free_capacity_gb': free,
'reserved_percentage': (
self.configuration.reserved_share_percentage),
'reserved_snapshot_percentage':
(self.configuration.reserved_share_from_snapshot_percentage
or self.configuration.reserved_share_percentage),
'max_over_subscription_ratio': (
self.configuration.safe_get(
'max_over_subscription_ratio')),
'thin_provisioning':
self.configuration.nexenta_thin_provisioning,
'provisioned_capacity_gb': self.provisioned_capacity,
}],
}
self._stats.update(data)
def _get_capacity_info(self):
"""Calculate available space on the NFS share."""
data = self.nef.filesystems.get(self.root_path)
free = int(utils.bytes_to_gb(data['bytesAvailable']))
allocated = int(utils.bytes_to_gb(data['bytesUsed']))
total = free + allocated
return total, free, allocated
|
|
#!/usr/bin/python
###############################################################################################################
# This library is for using the Jetduino with Scratch
# http://www.NeuroRoboticTech.com/Projects/Jetduino
# History
# ------------------------------------------------
# Author Date Comments
# Karan 29 June 15 Initial Authoring
# Cofer 01 Feb 16 Modified for use with the Jetduino
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Jetduino for the Jetson TK1/TX1: an open source platform for connecting
Grove Sensors to the Jetson embedded supercomputers.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
'''
#
# Based on the BrickPi Scratch Library written by Jaikrishna
#
# The Python program acts as the Bridge between Scratch & Jetduino and must be running for the Scratch program to run.
##############################################################################################################
'''
import scratch,sys,threading,math
import jetduino
import time
en_jetduino=1
en_debug=1
try:
s = scratch.Scratch()
if s.connected:
print "Jetduino Scratch: Connected to Scratch successfully"
#else:
#sys.exit(0)
except scratch.ScratchError:
print "Jetduino Scratch: Scratch is either not opened or remote sensor connections aren't enabled"
#sys.exit(0)
class myThread (threading.Thread):
def __init__(self, threadID, name, counter):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.counter = counter
def run(self):
while running:
time.sleep(.2) # sleep for 200 ms
thread1 = myThread(1, "Thread-1", 1) #Setup and start the thread
thread1.setDaemon(True)
analog_sensors=['analogRead','rotary','sound','light']
digitalInp=['button']
digitalOp=['led','relay']
pwm=['LEDPower','buzzer','analogWrite']
def match_sensors(msg,lst):
for i,e in enumerate(lst):
if msg[:len(e)].lower()==e.lower():
return i
return -1
try:
s.broadcast('READY')
except NameError:
print "Jetduino Scratch: Unable to Broadcast"
while True:
try:
m = s.receive()
while m[0] == 'sensor-update' :
m = s.receive()
msg = m[1]
if en_debug:
print "Rx:",msg
if msg == 'SETUP' :
print "Setting up sensors done"
elif msg == 'START' :
running = True
if thread1.is_alive() == False:
thread1.start()
print "Service Started"
elif match_sensors(msg,analog_sensors) >=0:
if en_jetduino:
s_no=match_sensors(msg,analog_sensors)
sens=analog_sensors[s_no]
port=int(msg[len(sens):])
a_read=jetduino.analogRead(port)
s.sensorupdate({sens:a_read})
if en_debug:
print msg
print sens +'op:'+ str(a_read)
elif msg[:8].lower()=="setInput".lower():
if en_jetduino:
port=int(msg[8:])
jetduino.pinMode(port,"INPUT")
if en_debug:
print msg
elif msg[:9].lower()=="setOutput".lower():
if en_jetduino:
port=int(msg[9:])
jetduino.pinMode(port,"OUTPUT")
if en_debug:
print msg
elif msg[:11].lower()=="digitalRead".lower():
if en_jetduino:
port=int(msg[11:])
d_read=jetduino.digitalRead(port)
s.sensorupdate({'digitalRead':d_read})
if en_debug:
print msg
print "Digital Reading: " + str(d_read)
elif match_sensors(msg,digitalInp) >=0:
if en_jetduino:
s_no=match_sensors(msg,digitalInp)
sens=digitalInp[s_no]
port=int(msg[len(sens):])
sens += str(port)
jetduino.pinMode(port,"INPUT")
d_read=jetduino.digitalRead(port)
s.sensorupdate({sens:d_read})
if en_debug:
print msg,
print sens +' output:'+ str(d_read)
elif msg[:16].lower()=="digitalWriteHigh".lower():
if en_jetduino:
port=int(msg[16:])
jetduino.digitalWrite(port,1)
if en_debug:
print msg
elif msg[:15].lower()=="digitalWriteLow".lower():
if en_jetduino:
port=int(msg[15:])
jetduino.digitalWrite(port,0)
if en_debug:
print msg
elif match_sensors(msg,pwm) >=0:
if en_jetduino:
s_no=match_sensors(msg,pwm)
sens=pwm[s_no]
l=len(sens)
port=int(msg[l:l+1])
power=int(msg[l+1:])
jetduino.pinMode(port,"OUTPUT")
jetduino.analogWrite(port,power)
if en_debug:
print msg
elif match_sensors(msg,digitalOp) >=0:
if en_jetduino:
s_no=match_sensors(msg,digitalOp)
sens=digitalOp[s_no]
l=len(sens)
port=int(msg[l:l+1])
state=msg[l+1:]
jetduino.pinMode(port,"OUTPUT")
if state=='on':
jetduino.digitalWrite(port,1)
else:
jetduino.digitalWrite(port,0)
if en_debug:
print msg
elif msg[:4].lower()=="temp".lower():
if en_jetduino:
port=int(msg[4:])
[temp,humidity] = jetduino.dht(port,0)
s.sensorupdate({'temp':temp})
if en_debug:
print msg
print "temp: ",temp
elif msg[:8].lower()=="humidity".lower():
if en_jetduino:
port=int(msg[8:])
[temp,humidity] = jetduino.dht(port,0)
s.sensorupdate({'humidity':humidity})
if en_debug:
print msg
print "humidity:",humidity
elif msg[:8].lower()=="distance".lower():
if en_jetduino:
port=int(msg[8:])
dist=jetduino.ultrasonicRead(port)
s.sensorupdate({'distance':dist})
if en_debug:
print msg
print "distance=",dist
elif msg[:3].lower()=="lcd".lower():
if en_jetduino:
if en_debug:
print msg[:3], msg[3:6],msg[6:]
import grove_rgb_lcd
if msg[3:6].lower() == "col".lower():
rgb = []
for i in range(0,6,2):
rgb.append(int(msg[6:][i:i+2],16)) # convert from one hex string to three ints
if en_debug:
print "colours are:",rgb[0],rgb[1],rgb[2]
grove_rgb_lcd.setRGB(rgb[0],rgb[1],rgb[2])
elif msg[3:6].lower() == "txt".lower():
txt = msg[6:]
grove_rgb_lcd.setText(txt)
else:
pass
if en_debug:
print msg
elif msg[:10].lower()=="setOutput".lower():
if en_jetduino:
port=int(msg[10:])
a_read=jetduino.analogRead(port)
s.sensorupdate({'analogRead':a_read})
if en_debug:
print msg
print "Analog Reading: " + str(a_read)
elif msg.lower()=="READ_IR".lower():
print "READ_IR!"
if en_ir_sensor==0:
import lirc
sockid = lirc.init("keyes", blocking = False)
en_ir_sensor=1
try:
read_ir= lirc.nextcode() # press 1
if len(read_ir) !=0:
print read_ir[0]
except:
if en_debug:
e = sys.exc_info()[1]
print "Error reading IR sensor: " + str(read_ir)
if en_debug:
print "IR Recv Reading: " + str(read_ir)
if en_gpg:
if len(read_ir) !=0:
s.sensorupdate({'read_ir':read_ir[0]})
else:
s.sensorupdate({'read_ir':""})
elif msg.lower()=="TAKE_PICTURE".lower():
print "TAKE_PICTURE!"
try:
from subprocess import call
import datetime
cmd_start="raspistill -o /home/pi/Desktop/img_"
cmd_end=".jpg -w 640 -h 480 -t 1"
dt=str(datetime.datetime.now())
dt=dt.replace(' ','_',10)
call ([cmd_start+dt+cmd_end], shell=True)
print "Picture Taken"
except:
if en_debug:
e = sys.exc_info()[1]
print "Error taking picture"
s.sensorupdate({'camera':"Error"})
s.sensorupdate({'camera':"Picture Taken"})
else:
if en_debug:
print "Ignoring: ",msg
except KeyboardInterrupt:
running= False
print "Jetduino Scratch: Disconnected from Scratch"
break
except (scratch.scratch.ScratchConnectionError,NameError) as e:
while True:
#thread1.join(0)
print "Jetduino Scratch: Scratch connection error, Retrying"
time.sleep(5)
try:
s = scratch.Scratch()
s.broadcast('READY')
print "Jetduino Scratch: Connected to Scratch successfully"
break;
except scratch.ScratchError:
print "Jetduino Scratch: Scratch is either not opened or remote sensor connections aren't enabled\n..............................\n"
except:
e = sys.exc_info()[0]
print "Jetduino Scratch: Error %s" % e
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
============================================================
Using ``inherits`` and ``requires`` to ease parameter pain
============================================================
Most luigi plumbers will find themselves in an awkward task parameter situation
at some point or another. Consider the following "parameter explosion"
problem:
.. code-block:: python
class TaskA(luigi.ExternalTask):
param_a = luigi.Parameter()
def output(self):
return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self))
class TaskB(luigi.Task):
param_b = luigi.Parameter()
param_a = luigi.Parameter()
def requires(self):
return TaskA(param_a=self.param_a)
class TaskC(luigi.Task):
param_c = luigi.Parameter()
param_b = luigi.Parameter()
param_a = luigi.Parameter()
def requires(self):
return TaskB(param_b=self.param_b, param_a=self.param_a)
In work flows requiring many tasks to be chained together in this manner,
parameter handling can spiral out of control. Each downstream task becomes
more burdensome than the last. Refactoring becomes more difficult. There
are several ways one might try and avoid the problem.
**Approach 1**: Parameters via command line or config instead of ``requires``.
.. code-block:: python
class TaskA(luigi.ExternalTask):
param_a = luigi.Parameter()
def output(self):
return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self))
class TaskB(luigi.Task):
param_b = luigi.Parameter()
def requires(self):
return TaskA()
class TaskC(luigi.Task):
param_c = luigi.Parameter()
def requires(self):
return TaskB()
Then run in the shell like so:
.. code-block:: bash
luigi --module my_tasks TaskC --param-c foo --TaskB-param-b bar --TaskA-param-a baz
Repetitive parameters have been eliminated, but at the cost of making the job's
command line interface slightly clunkier. Often this is a reasonable
trade-off.
But parameters can't always be refactored out every class. Downstream
tasks might also need to use some of those parameters. For example,
if ``TaskC`` needs to use ``param_a`` too, then ``param_a`` would still need
to be repeated.
**Approach 2**: Use a common parameter class
.. code-block:: python
class Params(luigi.Config):
param_c = luigi.Parameter()
param_b = luigi.Parameter()
param_a = luigi.Parameter()
class TaskA(Params, luigi.ExternalTask):
def output(self):
return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self))
class TaskB(Params):
def requires(self):
return TaskA()
class TaskB(Params):
def requires(self):
return TaskB()
This looks great at first glance, but a couple of issues lurk. Now ``TaskA``
and ``TaskB`` have unnecessary significant parameters. Significant parameters
help define the identity of a task. Identical tasks are prevented from
running at the same time by the central planner. This helps preserve the
idempotent and atomic nature of luigi tasks. Unnecessary significant task
parameters confuse a task's identity. Under the right circumstances, task
identity confusion could lead to that task running when it shouldn't, or
failing to run when it should.
This approach should only be used when all of the parameters of the config
class, are significant (or all insignificant) for all of its subclasses.
And wait a second... there's a bug in the above code. See it?
``TaskA`` won't behave as an ``ExternalTask`` because the parent classes are
specified in the wrong order. This contrived example is easy to fix (by
swapping the ordering of the parents of ``TaskA``), but real world cases can be
more difficult to both spot and fix. Inheriting from multiple classes
derived from ``luigi.Task`` should be undertaken with caution and avoided
where possible.
**Approach 3**: Use ``inherits`` and ``requires``
The ``inherits`` class decorator in this module copies parameters (and
nothing else) from one task class to another, and avoids direct pythonic
inheritance.
.. code-block:: python
import luigi
from luigi.util import inherits
class TaskA(luigi.ExternalTask):
param_a = luigi.Parameter()
def output(self):
return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self))
@inherits(TaskA)
class TaskB(luigi.Task):
param_b = luigi.Parameter()
def requires(self):
t = self.clone(TaskA) # or t = self.clone_parent()
# Wait... whats this clone thingy do?
#
# Pass it a task class. It calls that task. And when it does, it
# supplies all parameters (and only those parameters) common to
# the caller and callee!
#
# The call to clone is equivalent to the following (note the
# fact that clone avoids passing param_b).
#
# return TaskA(param_a=self.param_a)
return t
@inherits(TaskB)
class TaskC(luigi.Task):
param_c = luigi.Parameter()
def requires(self):
return self.clone(TaskB)
This totally eliminates the need to repeat parameters, avoids inheritance
issues, and keeps the task command line interface as simple (as it can be,
anyway). Refactoring task parameters is also much easier.
The ``requires`` helper function can reduce this pattern even further. It
does everything ``inherits`` does, and also attaches a ``requires`` method
to your task (still all without pythonic inheritance).
But how does it know how to invoke the upstream task? It uses ``clone``
behind the scenes!
.. code-block:: python
import luigi
from luigi.util import inherits, requires
class TaskA(luigi.ExternalTask):
param_a = luigi.Parameter()
def output(self):
return luigi.LocalTarget('/tmp/log-{t.param_a}'.format(t=self))
@requires(TaskA)
class TaskB(luigi.Task):
param_b = luigi.Parameter()
# The class decorator does this for me!
# def requires(self):
# return self.clone(TaskA)
Use these helper functions effectively to avoid unnecessary
repetition and dodge a few potentially nasty workflow pitfalls at the same
time. Brilliant!
"""
import datetime
import functools
import logging
from luigi import six
from luigi import task
from luigi import parameter
if six.PY3:
xrange = range
logger = logging.getLogger('luigi-interface')
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_params_dict = dict(task_cls.get_params())
task_cls_param_names = task_cls_params_dict.keys()
common_param_names = set(task_instance_param_names).intersection(set(task_cls_param_names))
common_param_vals = [(key, task_cls_params_dict[key]) for key in common_param_names]
common_kwargs = dict((key, task_instance.param_kwargs[key]) for key in common_param_names)
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
def task_wraps(P):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
return functools.wraps(P, updated=[])
class inherits(object):
"""
Task inheritance.
Usage:
.. code-block:: python
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
@inherits(AnotherTask):
class MyTask(luigi.Task):
def requires(self):
return self.clone_parent()
def run(self):
print self.n # this will be defined
# ...
"""
def __init__(self, task_to_inherit):
super(inherits, self).__init__()
self.task_to_inherit = task_to_inherit
def __call__(self, task_that_inherits):
for param_name, param_obj in self.task_to_inherit.get_params():
if not hasattr(task_that_inherits, param_name):
setattr(task_that_inherits, param_name, param_obj)
# Modify task_that_inherits by subclassing it and adding methods
@task_wraps(task_that_inherits)
class Wrapped(task_that_inherits):
def clone_parent(_self, **args):
return _self.clone(cls=self.task_to_inherit, **args)
return Wrapped
class requires(object):
"""
Same as @inherits, but also auto-defines the requires method.
"""
def __init__(self, task_to_require):
super(requires, self).__init__()
self.inherit_decorator = inherits(task_to_require)
def __call__(self, task_that_requires):
task_that_requires = self.inherit_decorator(task_that_requires)
# Modify task_that_requres by subclassing it and adding methods
@task_wraps(task_that_requires)
class Wrapped(task_that_requires):
def requires(_self):
return _self.clone_parent()
return Wrapped
class copies(object):
"""
Auto-copies a task.
Usage:
.. code-block:: python
@copies(MyTask):
class CopyOfMyTask(luigi.Task):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
"""
def __init__(self, task_to_copy):
super(copies, self).__init__()
self.requires_decorator = requires(task_to_copy)
def __call__(self, task_that_copies):
task_that_copies = self.requires_decorator(task_that_copies)
# Modify task_that_copies by subclassing it and adding methods
@task_wraps(task_that_copies)
class Wrapped(task_that_copies):
def run(_self):
i, o = _self.input(), _self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
return Wrapped
def delegates(task_that_delegates):
""" Lets a task call methods on subtask(s).
The way this works is that the subtask is run as a part of the task, but
the task itself doesn't have to care about the requirements of the subtasks.
The subtask doesn't exist from the scheduler's point of view, and
its dependencies are instead required by the main task.
Example:
.. code-block:: python
class PowersOfN(luigi.Task):
n = luigi.IntParameter()
def f(self, x): return x ** self.n
@delegates
class T(luigi.Task):
def subtasks(self): return PowersOfN(5)
def run(self): print self.subtasks().f(42)
"""
if not hasattr(task_that_delegates, 'subtasks'):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates)
@task_wraps(task_that_delegates)
class Wrapped(task_that_delegates):
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run(self):
for t in task.flatten(self.subtasks()):
t.run()
task_that_delegates.run(self)
return Wrapped
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateSecondParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(seconds=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
def get_previous_completed(task, max_steps=10):
prev = task
for _ in xrange(max_steps):
prev = previous(prev)
logger.debug("Checking if %s is complete", prev)
if prev.complete():
return prev
return None
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import mxnet as mx
import numpy as np
import sys
import os
current_working_directory = os.getcwd()
sys.path.append(current_working_directory + "/..")
sys.path.append(current_working_directory + "/../converter/")
import _mxnet_converter as mxnet_converter
from collections import namedtuple
def _mxnet_remove_batch(input_data):
for blob in input_data:
input_data[blob] = np.reshape(input_data[blob], input_data[blob].shape[1:])
return input_data
def _get_mxnet_module(net, input_shape, mode, label_names, input_names=None):
""" Given a symbolic graph, input shape and the initialization mode,
returns an MXNet module.
"""
mx.random.seed(1993)
mod = mx.mod.Module(
symbol=net,
context=mx.cpu(),
label_names=label_names
)
mod.bind(
for_training=False,
data_shapes=[('data', input_shape)],
label_shapes=input_names
)
if mode == 'random':
mod.init_params(
initializer=mx.init.Uniform(scale=.1)
)
elif mode == 'zeros':
mod.init_params(
initializer=mx.init.Zero()
)
elif mode == 'ones':
mod.init_params(
initializer=mx.init.One()
)
else:
Exception(KeyError("%s is not a valid initialization mode" % mode))
return mod
class SingleLayerTest(unittest.TestCase):
"""
Unit test class for testing where converter is able to convert individual layers or not.
In order to do so, it converts model and generates preds on both CoreML and MXNet and check they are the same.
"""
def _test_mxnet_model(self, net, input_shape, mode, class_labels=None, coreml_mode=None, label_names=None, delta=1e-3,
pre_processing_args=None):
""" Helper method that convert the CoreML model into CoreML and compares the predictions over random data.
Parameters
----------
net: MXNet Symbol Graph
The graph that we'll be converting into CoreML.
input_shape: tuple of ints
The shape of input data. Generally of the format (batch-size, channels, height, width)
mode: (random|zeros|ones)
The mode to use in order to set the parameters (weights and biases).
label_names: list of strings
The names of the output labels. Default: None
delta: float
The maximum difference b/w predictions of MXNet and CoreML that is tolerable.
"""
mod = _get_mxnet_module(net, input_shape, mode, label_names)
# Generate some dummy data
input_data = {'data': np.random.uniform(-10., 10., input_shape)}
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(input_data['data'])]))
mxnet_preds = mod.get_outputs()[0].asnumpy().flatten()
# Get predictions from coreml
coreml_model = mxnet_converter.convert(
model=mod,
class_labels=class_labels,
mode=coreml_mode,
input_shape={'data': input_shape},
preprocessor_args=pre_processing_args
)
coreml_preds = coreml_model.predict(_mxnet_remove_batch(input_data)).values()[0].flatten()
# Check prediction accuracy
self.assertEquals(len(mxnet_preds), len(coreml_preds))
for i in range(len(mxnet_preds)):
self.assertAlmostEquals(mxnet_preds[i], coreml_preds[i], delta = delta)
def test_tiny_inner_product_zero_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='zeros')
def test_really_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=1)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_really_tiny_2_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_tiny_inner_product_ones_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_tiny_inner_product_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_softmax_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label'])
def test_tiny_relu_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='relu1', act_type="relu")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_sigmoid_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='sigmoid1', act_type="sigmoid")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_tanh_activation_random_input(self):
np.random.seed(1988)
input_shape = (1, 10)
# Define a model
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.Activation(net, name='tanh1', act_type="tanh")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1 ,1)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_tiny_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_conv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5 ,3)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_conv_random_asym_input(self):
np.random.seed(1988)
input_shape = (1, 1, 28, 18)
num_filter = 16
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
dilate = (1, 1)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1',
dilate=dilate)
net = mx.sym.Activation(net, name='tanh', act_type="tanh")
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_valid_pooling_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (2, 2)
stride = (2, 2)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='valid'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_pooling_full_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (2, 2)
stride = (2, 2)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='full'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_pooling_full_random_input_with_padding(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 2
kernel = (2, 2)
stride = (2, 2)
pad = (1, 1)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='avg',
pooling_convention='full'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_conv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_conv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_flatten(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
net = mx.sym.Flatten(data=net, name='flatten1')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label'])
def test_transpose(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 64
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
net = mx.sym.Variable('data')
net = mx.sym.transpose(data=net, name='transpose', axes=(0, 1, 2, 3))
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_reshape(self):
np.random.seed(1988)
input_shape = (1, 8)
net = mx.sym.Variable('data')
net = mx.sym.reshape(data=net, shape=(1, 2, 2, 2))
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_synset_random_input(self):
np.random.seed(1989)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
mod = _get_mxnet_module(net,
input_shape=input_shape,
mode='random',
label_names=['softmax_label'])
# Generate some dummy data
input_data = np.random.uniform(-0.1, 0.1, input_shape)
Batch = namedtuple('Batch', ['data'])
mod.forward(Batch([mx.nd.array(input_data)]))
kwargs = {'input_shape': {'data': input_shape}}
# Get predictions from coreml
coreml_model = mxnet_converter.convert(
model=mod,
class_labels=['Category1','Category2','Category3','Category4','Category5'],
mode='classifier',
**kwargs
)
prediction = coreml_model.predict(_mxnet_remove_batch({'data': input_data}))
self.assertEqual(prediction['classLabel'], 'Category3')
def test_really_tiny_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_ones_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='ones')
def test_tiny_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_deconv_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
# Define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_asym_deconv_random_asym_input(self):
np.random.seed(1988)
input_shape = (1, 1, 28, 18)
num_filter = 16
kernel = (5, 3)
stride = (1, 1)
pad = (0, 0)
dilate = (1, 1)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
dilate=dilate,
name='deconv_1'
)
net = mx.sym.Activation(net, name = 'tanh', act_type = "tanh")
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_pooling_random_input(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
net = mx.symbol.Pooling(
data=net,
kernel=kernel,
stride=stride,
pad=pad,
name='pool_1',
pool_type='max'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_deconv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_really_tiny_deconv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (1, 1)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_random_3d_input(self):
np.random.seed(1988)
input_shape = (1, 3, 10, 10)
num_filter = 1
kernel = (5, 5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_tiny_deconv_random_input_multi_filter(self):
np.random.seed(1988)
input_shape = (1, 1, 10, 10)
num_filter = 64
kernel = (5 ,5)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
name='deconv_1'
)
# Test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random(self):
np.random.seed(1988)
input_shape = (1, 10, 4, 4)
num_filter = 3
kernel = (2, 2)
stride = (1, 1)
pad = (0, 0)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='deconv_1'
)
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_output_shape(self):
np.random.seed(1988)
input_shape = (1, 10, 4, 4)
num_filter = 3
kernel = (2, 2)
stride = (1, 1)
pad = (0, 0)
target_shape = (5, 5)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
target_shape=target_shape,
name='deconv_1'
)
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_padding(self):
np.random.seed(1988)
input_shape = (1, 10, 9, 9)
num_filter = 3
kernel = (3, 3)
stride = (3, 3)
pad = (2, 2)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='deconv_1')
# test the mxnet model
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random_padding_odd(self):
np.random.seed(1988)
input_shape = (1, 10, 6, 6)
num_filter = 3
kernel = (5, 5)
stride = (1, 1)
pad = (3, 3)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_conv_random_padding_even(self):
np.random.seed(1988)
input_shape = (1, 10, 6, 6)
num_filter = 3
kernel = (5, 5)
stride = (1, 1)
pad = (2, 2)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Convolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
name='conv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_deconv_random_all_inputs(self):
np.random.seed(1988)
input_shape = (1, 10, 5, 5)
num_filter = 3
kernel = (3, 3)
stride = (2, 2)
pad = (1, 1)
dilate = (1, 1)
target_shape = (11, 11)
# define a model
net = mx.sym.Variable('data')
net = mx.symbol.Deconvolution(
data=net,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=False,
target_shape=target_shape,
dilate=dilate,
name='deconv_1'
)
self._test_mxnet_model(net, input_shape=input_shape, mode='random')
def test_batch_norm(self):
np.random.seed(1988)
input_shape = (1, 1, 2, 3)
net = mx.sym.Variable('data')
gamma = mx.sym.Variable('gamma')
beta = mx.sym.Variable('beta')
moving_mean = mx.sym.Variable('moving_mean')
moving_var = mx.sym.Variable('moving_var')
net = mx.symbol.BatchNorm(
data=net,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
use_global_stats=True,
name='batch_norm_1')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2)
def test_batch_norm_no_global_stats(self):
""" This test should throw an exception since converter doesn't support
conversion of MXNet models that use local batch stats (i.e.
use_global_stats=False). The reason for this is CoreML doesn't support
local batch stats.
"""
np.random.seed(1988)
input_shape = (1, 1, 2, 3)
net = mx.sym.Variable('data')
gamma = mx.sym.Variable('gamma')
beta = mx.sym.Variable('beta')
moving_mean = mx.sym.Variable('moving_mean')
moving_var = mx.sym.Variable('moving_var')
net = mx.symbol.BatchNorm(
data=net,
gamma=gamma,
beta=beta,
moving_mean=moving_mean,
moving_var=moving_var,
use_global_stats=False,
name='batch_norm_1')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', delta=1e-2)
def test_pre_processing_args(self):
np.random.seed(1988)
input_shape = (1, 10)
net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(data=net, name='fc1', num_hidden=5)
net = mx.sym.SoftmaxOutput(net, name='softmax')
self._test_mxnet_model(net, input_shape=input_shape, mode='random', label_names=['softmax_label'],
pre_processing_args={'red_bias':0, 'blue_bias':0, 'green_bias':0, 'image_scale':1})
# TODO test_concat
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SingleLayerTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
from itertools import chain
from subprocess import Popen as Popen, STDOUT, PIPE
from tempfile import TemporaryFile
from testfixtures.compat import basestring, PY3, zip_longest
from testfixtures.utils import extend_docstring
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
class PopenBehaviour(object):
"""
An object representing the behaviour of a :class:`MockPopen` when
simulating a particular command.
"""
def __init__(self, stdout=b'', stderr=b'', returncode=0, pid=1234,
poll_count=3):
self.stdout = stdout
self.stderr = stderr
self.returncode = returncode
self.pid = pid
self.poll_count = poll_count
class MockPopen(object):
"""
A specialised mock for testing use of :class:`subprocess.Popen`.
An instance of this class can be used in place of the
:class:`subprocess.Popen` and is often inserted where it's needed using
:func:`mock.patch` or a :class:`Replacer`.
"""
default_behaviour = None
def __init__(self):
self.commands = {}
self.mock = mock = Mock()
self.mock.Popen.side_effect = self.Popen
mock.Popen_instance = Mock(spec=Popen)
inst = mock.Popen.return_value = mock.Popen_instance
inst.communicate.side_effect = self.communicate
inst.wait.side_effect = self.wait
inst.send_signal.side_effect = self.send_signal
inst.terminate.side_effect = self.terminate
inst.kill.side_effect = self.kill
inst.poll.side_effect = self.poll
if PY3:
def __enter__(self):
return inst
inst.__enter__ = __enter__
def __exit__(self, exc_type, exc_val, exc_tb):
inst.wait()
for stream in inst.stdout, inst.stderr:
stream.close()
inst.__exit__ = __exit__
def _resolve_behaviour(self, stdout, stderr, returncode,
pid, poll_count, behaviour):
if behaviour is None:
return PopenBehaviour(
stdout, stderr, returncode, pid, poll_count
)
else:
return behaviour
def set_command(self, command, stdout=b'', stderr=b'', returncode=0,
pid=1234, poll_count=3, behaviour=None):
"""
Set the behaviour of this mock when it is used to simulate the
specified command.
:param command: A string representing the command to be simulated.
If supplied, ``behaviour`` must be either a :class:`PopenBehaviour`
instance or a callable that takes the ``command`` string representing
the command to be simulated and the ``stdin`` for that command and
returns a :class:`PopenBehaviour` instance.
"""
self.commands[command] = self._resolve_behaviour(
stdout, stderr, returncode, pid, poll_count, behaviour
)
def set_default(self, stdout=b'', stderr=b'', returncode=0,
pid=1234, poll_count=3, behaviour=None):
"""
Set the behaviour of this mock when it is used to simulate commands
that have no explicit behavior specified using
:meth:`~MockPopen.set_command` or :meth:`~MockPopen.set_callable`.
If supplied, ``behaviour`` must be either a :class:`PopenBehaviour`
instance or a callable that takes the ``command`` string representing
the command to be simulated and the ``stdin`` for that command and
returns a :class:`PopenBehaviour` instance.
"""
self.default_behaviour = self._resolve_behaviour(
stdout, stderr, returncode, pid, poll_count, behaviour
)
def __call__(self, *args, **kw):
return self.mock.Popen(*args, **kw)
def Popen(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False, cwd=None,
env=None, universal_newlines=False,
startupinfo=None, creationflags=0, restore_signals=True,
start_new_session=False, pass_fds=(), encoding=None, errors=None):
if isinstance(args, basestring):
cmd = args
else:
cmd = ' '.join(args)
behaviour = self.commands.get(cmd, self.default_behaviour)
if behaviour is None:
raise KeyError('Nothing specified for command %r' % cmd)
if callable(behaviour):
behaviour = behaviour(command=cmd, stdin=stdin)
self.returncode = behaviour.returncode
stdout_value = behaviour.stdout
stderr_value = behaviour.stderr
if stderr == STDOUT:
line_iterator = chain.from_iterable(zip_longest(
stdout_value.splitlines(True),
stderr_value.splitlines(True)
))
stdout_value = b''.join(l for l in line_iterator if l)
stderr_value = None
self.poll_count = behaviour.poll_count
for name, option, mock_value in (
('stdout', stdout, stdout_value),
('stderr', stderr, stderr_value)
):
value = None
if option is PIPE:
value = TemporaryFile()
value.write(mock_value)
value.flush()
value.seek(0)
setattr(self.mock.Popen_instance, name, value)
if stdin == PIPE:
self.mock.Popen_instance.stdin = Mock()
self.mock.Popen_instance.pid = behaviour.pid
self.mock.Popen_instance.returncode = None
return self.mock.Popen_instance
def wait(self):
"Simulate calls to :meth:`subprocess.Popen.wait`"
self.mock.Popen_instance.returncode = self.returncode
return self.returncode
def communicate(self, input=None):
"Simulate calls to :meth:`subprocess.Popen.communicate`"
self.wait()
i = self.mock.Popen_instance
return (i.stdout and i.stdout.read(),
i.stderr and i.stderr.read())
def poll(self):
"Simulate calls to :meth:`subprocess.Popen.poll`"
while self.poll_count and self.mock.Popen_instance.returncode is None:
self.poll_count -= 1
return None
# This call to wait() is NOT how poll() behaves in reality.
# poll() NEVER sets the returncode.
# The returncode is *only* ever set by process completion.
# The following is an artifact of the fixture's implementation.
return self.wait()
# These are here to check parameter types
def send_signal(self, signal):
"Simulate calls to :meth:`subprocess.Popen.send_signal`"
pass
def terminate(self):
"Simulate calls to :meth:`subprocess.Popen.terminate`"
pass
def kill(self):
"Simulate calls to :meth:`subprocess.Popen.kill`"
pass
set_command_params = """
:param stdout:
A string representing the simulated content written by the process
to the stdout pipe.
:param stderr:
A string representing the simulated content written by the process
to the stderr pipe.
:param returncode:
An integer representing the return code of the simulated process.
:param pid:
An integer representing the process identifier of the simulated
process. This is useful if you have code the prints out the pids
of running processes.
:param poll_count:
Specifies the number of times :meth:`MockPopen.poll` can be
called before :attr:`MockPopen.returncode` is set and returned
by :meth:`MockPopen.poll`.
"""
# add the param docs, so we only have one copy of them!
extend_docstring(set_command_params,
[MockPopen.set_command, MockPopen.set_default])
|
|
"""
tests the pysat meta object and code
"""
import pysat
import pandas as pds
from nose.tools import raises
import pysat.instruments.pysat_testing
import numpy as np
class TestBasics():
def setup(self):
"""Runs before every method to create a clean testing setup."""
self.meta = pysat.Meta()
self.testInst = pysat.Instrument('pysat', 'testing',
clean_level='clean')
def teardown(self):
"""Runs after every method to clean up previous testing."""
del self.testInst
del self.meta
@raises(ValueError)
def test_setting_nonpandas_metadata(self):
self.meta = pysat.Meta(metadata='Not a Panda')
def test_inst_data_assign_meta_default(self):
self.testInst.load(2009, 1)
self.testInst['help'] = self.testInst['mlt']
assert self.testInst.meta['help', 'long_name'] == 'help'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == ''
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_data_assign_meta(self):
self.testInst.load(2009, 1)
self.testInst['help'] = {'data': self.testInst['mlt'],
'units': 'V',
'long_name': 'The Doors'}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == 'V'
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_data_assign_meta_then_data(self):
self.testInst.load(2009, 1)
self.testInst['help'] = {'data': self.testInst['mlt'],
'units': 'V',
'long_name': 'The Doors'}
self.testInst['help'] = self.testInst['mlt']
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert self.testInst.meta['help', 'axis'] == 'help'
assert self.testInst.meta['help', 'label'] == 'help'
assert self.testInst.meta['help', 'notes'] == ''
assert np.isnan(self.testInst.meta['help', 'fill'])
assert np.isnan(self.testInst.meta['help', 'value_min'])
assert np.isnan(self.testInst.meta['help', 'value_max'])
assert self.testInst.meta['help', 'units'] == 'V'
assert self.testInst.meta['help', 'desc'] == ''
assert self.testInst.meta['help', 'scale'] == 'linear'
def test_inst_ho_data_assign_no_meta_default(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
self.testInst['help'] = [frame]*len(self.testInst.data.index)
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
def test_inst_ho_data_assign_meta_default(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors'}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
def test_inst_ho_data_assign_meta(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_ho_data_assign_meta_then_data(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
print('Setting original data')
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
self.testInst['help'] = [frame]*len(self.testInst.data.index)
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_ho_data_assign_meta_different_labels(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta(units_label='blah', desc_label='whoknew')
meta['dummy_frame1'] = {'blah': 'A'}
meta['dummy_frame2'] = {'whoknew': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
assert self.testInst.meta['help', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help']
assert 'dummy_frame1' in self.testInst.meta['help']['children']
assert 'dummy_frame2' in self.testInst.meta['help']['children']
assert self.testInst.meta['help']['children'].has_attr('units')
assert self.testInst.meta['help']['children'].has_attr('desc')
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help']['children']['dummy_frame2',
'desc'] == 'nothing'
def test_inst_assign_from_meta(self):
self.testInst.load(2009, 1)
self.testInst['help'] = self.testInst['mlt']
self.testInst['help2'] = self.testInst['mlt']
self.testInst.meta['help2'] = self.testInst.meta['help']
assert self.testInst.meta['help2', 'long_name'] == 'help'
assert self.testInst.meta['help2', 'axis'] == 'help'
assert self.testInst.meta['help2', 'label'] == 'help'
assert self.testInst.meta['help2', 'notes'] == ''
assert np.isnan(self.testInst.meta['help2', 'fill'])
assert np.isnan(self.testInst.meta['help2', 'value_min'])
assert np.isnan(self.testInst.meta['help2', 'value_max'])
assert self.testInst.meta['help2', 'units'] == ''
assert self.testInst.meta['help2', 'desc'] == ''
assert self.testInst.meta['help2', 'scale'] == 'linear'
assert 'children' not in self.testInst.meta.data.columns
assert 'help2' not in self.testInst.meta.keys_nD()
def test_inst_assign_from_meta_w_ho(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'long_name': 'The Doors',
'meta': meta}
self.testInst['help2'] = self.testInst['help']
self.testInst.meta['help2'] = self.testInst.meta['help']
assert self.testInst.meta['help'].children['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help2', 'long_name'] == 'The Doors'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame1' in self.testInst.meta['help2']['children']
assert 'dummy_frame2' in self.testInst.meta['help2']['children']
assert self.testInst.meta['help2']['children'].has_attr('units')
assert self.testInst.meta['help2']['children'].has_attr('desc')
assert self.testInst.meta['help2']['children']['dummy_frame1',
'desc'] == ''
assert self.testInst.meta['help2']['children']['dummy_frame2',
'desc'] == 'nothing'
assert 'children' not in self.testInst.meta.data.columns
def test_inst_assign_from_meta_w_ho_then_update(self):
self.testInst.load(2009, 1)
frame = pds.DataFrame({'dummy_frame1': np.arange(10),
'dummy_frame2': np.arange(10)},
columns=['dummy_frame1', 'dummy_frame2'])
meta = pysat.Meta()
meta['dummy_frame1'] = {'units': 'A'}
meta['dummy_frame2'] = {'desc': 'nothing'}
self.testInst['help'] = {'data': [frame]*len(self.testInst.data.index),
'units': 'V',
'name': 'The Doors',
'meta': meta}
self.testInst['help2'] = self.testInst['help']
self.testInst.meta['help2'] = self.testInst.meta['help']
new_meta = self.testInst.meta['help2'].children
new_meta['dummy_frame1'] = {'units': 'Amps',
'desc': 'something',
'label': 'John Wick',
'axis': 'Reeves',
}
self.testInst.meta['help2'] = new_meta
self.testInst.meta['help2'] = {'label': 'The Doors Return'}
# print('yoyo: ', self.testInst.meta['help']['children']
# ['dummy_frame1', 'units'])
assert self.testInst.meta['help']['children']['dummy_frame1',
'units'] == 'A'
assert self.testInst.meta['help2', 'name'] == 'The Doors'
assert self.testInst.meta['help2', 'label'] == 'The Doors Return'
assert 'dummy_frame1' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame2' in self.testInst.meta.ho_data['help2']
assert 'dummy_frame1' in self.testInst.meta['help2']['children']
assert 'dummy_frame2' in self.testInst.meta['help2']['children']
assert self.testInst.meta['help2']['children'].has_attr('units')
assert self.testInst.meta['help2']['children'].has_attr('desc')
assert self.testInst.meta['help2']['children']['dummy_frame1',
'desc'] == 'something'
assert self.testInst.meta['help2']['children']['dummy_frame2',
'desc'] == 'nothing'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'units'] == 'Amps'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'label'] == 'John Wick'
assert self.testInst.meta['help2']['children']['dummy_frame1',
'axis'] == 'Reeves'
assert 'children' not in self.testInst.meta.data.columns
def test_repr_call_runs(self):
self.testInst.meta['hi'] = {'units': 'yoyo', 'long_name': 'hello'}
print(self.testInst.meta)
assert True
def test_repr_call_runs_with_higher_order_data(self):
self.meta['param1'] = {'units': 'blank', 'long_name': u'parameter1',
'custom1': 14, 'custom2': np.NaN,
'custom3': 14.5, 'custom4': u'hello'}
self.testInst.meta['param0'] = {'units': 'basic',
'long_name': 'parameter0',
self.testInst.meta.fill_label: '10',
'CUSTOM4': 143}
self.testInst.meta['kiwi'] = self.meta
print(self.testInst.meta)
assert True
def test_basic_pops(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew',
'value_min': 0, 'value_max': 1}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': 1,
'value_min': 0, 'value_max': 1}
# create then assign higher order meta data
meta2 = pysat.Meta(name_label='long_name')
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew', 'fill': 1,
'value_min': 0, 'value_max': 1}
self.meta['new3'] = meta2
aa = self.meta.pop('new3')
assert np.all(aa['children'] == meta2)
# ensure lower metadata created when ho data assigned
assert aa['units'] == ''
assert aa['long_name'] == 'new3'
m1 = self.meta['new2']
m2 = self.meta.pop('new2')
assert m1['children'] is None
assert m2['children'] is None
for key in m1.index:
if key not in ['children']:
assert m1[key] == m2[key]
# make sure both have the same indexes
assert np.all(m1.index == m2.index)
@raises(KeyError)
def test_basic_pops_w_bad_key(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew',
'value_min': 0, 'value_max': 1}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': 1,
'value_min': 0, 'value_max': 1}
_ = self.meta.pop('new4')
@raises(KeyError)
def test_basic_getitem_w_bad_key_string(self):
self.meta['new4']
@raises(NotImplementedError)
def test_basic_getitem_w_integer(self):
self.meta[1]
def test_basic_equality(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo', 'fill': np.NaN}
# ensure things are the same
meta2 = self.meta.copy()
assert (meta2 == self.meta)
# different way to create meta object
meta3 = pysat.Meta()
meta3['new1'] = self.meta['new1']
meta3['new2'] = self.meta['new2']
assert (meta3 == self.meta)
# make sure differences matter
self.meta['new2'] = {'fill': 1}
assert not (meta2 == self.meta)
def test_basic_concat(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta = self.meta.concat(meta2)
assert (self.meta['new3'].units == 'hey3')
@raises(RuntimeError)
def test_concat_w_name_collision_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new2'] = {'units': 'hey2', 'long_name': 'crew_brew'}
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta = self.meta.concat(meta2, strict=True)
def test_basic_concat_w_ho(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
meta3 = pysat.Meta()
meta3['new41'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new4'] = meta3
self.meta = self.meta.concat(meta2)
assert (self.meta['new3'].units == 'hey3')
assert (self.meta['new4'].children['new41'].units == 'hey4')
@raises(RuntimeError)
def test_basic_concat_w_ho_collision_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta['new3'] = meta2
meta3 = pysat.Meta()
meta3['new31'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=True)
def test_basic_concat_w_ho_collision_not_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new3'] = {'units': 'hey3', 'long_name': 'crew_brew'}
meta3 = pysat.Meta()
meta3['new41'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=False)
assert self.meta['new3'].children['new41'].units == 'hey4'
assert self.meta['new3'].children['new41'].bob_level == 'max'
assert self.meta['new2'].units == 'hey'
def test_basic_concat_w_ho_collisions_not_strict(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
meta2 = pysat.Meta()
meta2['new31'] = {'units': 'hey3', 'long_name': 'crew_brew'}
self.meta['new3'] = meta2
meta3 = pysat.Meta()
meta3['new31'] = {'units': 'hey4', 'long_name': 'crew_brew',
'bob_level': 'max'}
meta2['new3'] = meta3
self.meta = self.meta.concat(meta2, strict=False)
assert self.meta['new3'].children['new31'].units == 'hey4'
assert self.meta['new3'].children['new31'].bob_level == 'max'
assert self.meta['new2'].units == 'hey'
def test_basic_meta_assignment(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
def test_basic_meta_assignment_w_Series(self):
self.meta['new'] = pds.Series({'units': 'hey', 'long_name': 'boo'})
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
def test_multiple_meta_assignment(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo', 'boo2']}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new2'].units == 'hey2'
assert self.meta['new2'].long_name == 'boo2'
def test_multiple_meta_retrieval(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo', 'boo2']}
self.meta[['new', 'new2']]
self.meta[['new', 'new2'],:]
self.meta[:, 'units']
self.meta['new',('units','long_name')]
def test_multiple_meta_ho_data_retrieval(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower']] = {'meta': [meta, None],
'units': [None, 'boo'],
'long_name': [None, 'boohoo']}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
self.meta['higher',('axis','scale')]
@raises(ValueError)
def test_multiple_meta_assignment_error(self):
self.meta[['new', 'new2']] = {'units': ['hey', 'hey2'],
'long_name': ['boo']}
def test_replace_meta_units(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new'] = {'units': 'yep'}
assert (self.meta['new'].units == 'yep')
assert (self.meta['new'].long_name == 'boo')
def test_replace_meta_long_name(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new'] = {'long_name': 'yep'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'yep')
def test_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['new'].description == 'boohoo')
def test_add_meta_then_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
def test_add_meta_with_custom_then_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew',
'description': 'boohoo'}
self.meta['new'] = {'units': 'hey2', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'heyy', 'long_name': 'hoo'}
self.meta['new3'] = {'units': 'hey3', 'long_name': 'crew3',
'description': 'boohoo3'}
assert self.meta['new'].units == 'hey2'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
assert self.meta['new3'].description == 'boohoo3'
assert self.meta['new2'].long_name == 'hoo'
def test_add_meta_then_add_different_additional_metadata_types(self):
self.meta['new1'] = {'units': 'hey1', 'long_name': 'crew'}
self.meta['new2'] = {'units': 'hey', 'long_name': 'boo',
'description': 'boohoo'}
assert self.meta['new2'].units == 'hey'
assert self.meta['new2'].long_name == 'boo'
assert self.meta['new2'].description == 'boohoo'
assert self.meta['new1'].units == 'hey1'
assert self.meta['new1'].long_name == 'crew'
assert np.isnan(self.meta['new1'].description)
def test_add_meta_then_partially_add_additional_metadata_types(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'crew'}
self.meta['new'] = {'long_name': 'boo', 'description': 'boohoo'}
assert self.meta['new'].units == 'hey'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new'].description == 'boohoo'
def test_meta_equality(self):
assert self.testInst.meta == self.testInst.meta
def test_false_meta_equality(self):
assert not (self.testInst.meta == self.testInst)
def test_equality_with_higher_order_meta(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert meta3 == self.meta
assert self.meta == meta3
def test_inequality_with_higher_order_meta(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo', 'radn': 'raiden'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_inequality_with_higher_order_meta2(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey2', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_inequality_with_higher_order_meta3(self):
self.meta = pysat.Meta()
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
self.meta['lower'] = {'units': 'yoyooy'}
meta2 = pysat.Meta()
meta2['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta2['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
meta3 = pysat.Meta()
meta3['higher'] = meta2
assert not (meta3 == self.meta)
assert not (self.meta == meta3)
def test_assign_higher_order_meta(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = meta
def test_assign_higher_order_meta_from_dict(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = {'meta': meta}
def test_assign_higher_order_meta_from_dict_correct(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta['higher'] = {'meta': meta}
assert self.meta['higher'].children == meta
def test_assign_higher_order_meta_from_dict_w_multiple(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower']] = {'meta': [meta, None],
'units': [None, 'boo'],
'long_name': [None, 'boohoo']}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
def test_assign_higher_order_meta_from_dict_w_multiple_2(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = \
{'meta': [meta, None, meta],
'units': [None, 'boo', None],
'long_name': [None, 'boohoo', None]}
assert self.meta['lower'].units == 'boo'
assert self.meta['lower'].long_name == 'boohoo'
assert self.meta['higher'].children == meta
def test_create_new_metadata_from_old(self):
meta = pysat.Meta()
meta['dm'] = {'units': 'hey', 'long_name': 'boo'}
meta['rpa'] = {'units': 'crazy', 'long_name': 'boo_whoo'}
self.meta[['higher', 'lower', 'lower2']] = \
{'meta': [meta, None, meta],
'units': [None, 'boo', None],
'long_name': [None, 'boohoo', None],
'fill': [1, 1, 1],
'value_min': [0, 0, 0],
'value_max': [1, 1, 1]}
meta2 = pysat.Meta(metadata=self.meta.data)
m1 = meta2['lower']
m2 = self.meta['lower']
assert m1['children'] is None
assert m2['children'] is None
for key in m1.index:
if key not in ['children']:
assert m1[key] == m2[key]
# make sure both have the same indexes
assert np.all(m1.index == m2.index)
# command below doesn't work because 'children' is None
# assert np.all(meta2['lower'] == self.meta['lower'])
def test_replace_meta_units_list(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta[['new2', 'new']] = {'units': ['yeppers', 'yep']}
assert self.meta['new'].units == 'yep'
assert self.meta['new'].long_name == 'boo'
assert self.meta['new2'].units == 'yeppers'
assert self.meta['new2'].long_name == 'boo2'
def test_meta_repr_functions(self):
self.testInst.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.testInst.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
print(self.testInst.meta)
# if it doesn't produce an error, we presume it works
# how do you test a print??
assert True
def test_meta_csv_load(self):
import os
name = os.path.join(pysat.__path__[0], 'tests', 'cindi_ivm_meta.txt')
mdata = pysat.Meta.from_csv(name=name, na_values=[], # index_col=2,
keep_default_na=False,
col_names=['name', 'long_name', 'idx',
'units', 'description'])
check = []
# print(mdata['yrdoy'])
check.append(mdata['yrdoy'].long_name == 'Date')
check.append(mdata['unit_mer_z'].long_name ==
'Unit Vector - Meridional Dir - S/C z')
check.append(mdata['iv_mer'].description ==
'Constructed using IGRF mag field.')
assert np.all(check)
# assign multiple values to default
def test_multiple_input_names_null_value(self):
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == ''
check2 = self.meta['test2', 'long_name'] == 'test2'
assert check1 & check2
def test_multiple_input_names_null_value_preexisting_values(self):
self.meta[['test1', 'test2']] = {'units': ['degrees', 'hams'],
'long_name': ['testing', 'further']}
self.meta[['test1', 'test2']] = {}
check1 = self.meta['test1', 'units'] == 'degrees'
check2 = self.meta['test2', 'long_name'] == 'further'
assert check1 & check2
# test behaviors related to case changes, 'units' vs 'Units'
def test_assign_Units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].Units == 'hey2') &
(self.meta['new2'].Long_Name == 'boo2'))
@raises(AttributeError)
def test_assign_Units_no_units(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
self.meta['new'].units
def test_get_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'Units': 'hey', 'Long_Name': 'boo'}
self.meta['new2'] = {'Units': 'hey2', 'Long_Name': 'boo2'}
assert ((self.meta['new', 'units'] == 'hey') &
(self.meta['new', 'long_name'] == 'boo') &
(self.meta['new2', 'units'] == 'hey2') &
(self.meta['new2', 'long_name'] == 'boo2'))
def test_set_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert self.meta['new'].Units == 'hey'
assert self.meta['new'].Long_Name == 'boo'
assert self.meta['new2'].Units == 'hey2'
assert self.meta['new2'].Long_Name == 'boo2'
def test_repeated_set_Units_wrong_case(self):
self.meta = pysat.Meta(units_label='Units', name_label='Long_Name')
for i in np.arange(10):
self.meta['new'] = {'units': 'hey%d' % i, 'long_name': 'boo%d' % i}
self.meta['new_%d' % i] = {'units': 'hey%d' % i,
'long_name': 'boo%d' % i}
for i in np.arange(10):
self.meta['new_5'] = {'units': 'hey%d' % i,
'long_name': 'boo%d' % i}
self.meta['new_%d' % i] = {'units': 'heyhey%d' % i,
'long_name': 'booboo%d' % i}
assert self.meta['new'].Units == 'hey9'
assert self.meta['new'].Long_Name == 'boo9'
assert self.meta['new_9'].Units == 'heyhey9'
assert self.meta['new_9'].Long_Name == 'booboo9'
assert self.meta['new_5'].Units == 'hey9'
assert self.meta['new_5'].Long_Name == 'boo9'
def test_change_Units_and_Name_case(self):
self.meta = pysat.Meta(units_label='units', name_label='long_name')
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].Units == 'hey2') &
(self.meta['new2'].Long_Name == 'boo2'))
def test_change_Units_and_Name_case_w_ho(self):
self.meta = pysat.Meta(units_label='units', name_label='long_Name')
meta2 = pysat.Meta(units_label='units', name_label='long_Name')
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].Units == 'hey') &
(self.meta['new'].Long_Name == 'boo') &
(self.meta['new2'].children['new21'].Units == 'hey2') &
(self.meta['new2'].children['new21'].Long_Name == 'boo2'))
@raises(AttributeError)
def test_change_Units_and_Name_case_w_ho_wrong_case(self):
self.meta = pysat.Meta(units_label='units', name_label='long_Name')
meta2 = pysat.Meta(units_label='units', name_label='long_Name')
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = meta2
self.meta.units_label = 'Units'
self.meta.name_label = 'Long_Name'
assert ((self.meta['new'].units == 'hey') &
(self.meta['new'].long_name == 'boo') &
(self.meta['new2'].children['new21'].units == 'hey2') &
(self.meta['new2'].children['new21'].long_name == 'boo2'))
def test_contains_case_insensitive(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert ('new2' in self.meta)
assert ('NEW2' in self.meta)
def test_contains_case_insensitive_w_ho(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['new21'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta['new2'] = meta2
assert ('new2' in self.meta)
assert ('NEW2' in self.meta)
assert not ('new21' in self.meta)
assert not ('NEW21' in self.meta)
def test_get_variable_name_case_preservation(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW2'] = {'units': 'hey2', 'long_name': 'boo2'}
assert ('NEW2' == self.meta.var_case_name('new2'))
assert ('NEW2' == self.meta.var_case_name('nEw2'))
assert ('NEW2' == self.meta.var_case_name('neW2'))
assert ('NEW2' == self.meta.var_case_name('NEW2'))
def test_get_attribute_name_case_preservation(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW2'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2', 'yoyoyo'] == 'yolo')
assert (self.meta['new2', 'YoYoYO'] == 'yolo')
def test_get_attribute_name_case_preservation_w_higher_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
self.meta['new'] = {'yoyoyo': 'YOLO'}
assert (self.meta.attr_case_name('YoYoYo') == 'YoYoYO')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'YoYoYO')
def test_get_attribute_name_case_preservation_w_higher_order_2(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
self.meta['NEW'] = {'yoyoyo': 'YOLO'}
assert (self.meta.attr_case_name('YoYoYo') == 'YoYoYO')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['NEW', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'YoYoYO')
def test_get_attribute_name_case_preservation_w_higher_order_reverse_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
self.meta['NEW2'] = meta2
assert (self.meta.attr_case_name('YoYoYo') == 'yoyoyo')
assert (self.meta['new', 'yoyoyo'] == 'YOLO')
assert (self.meta['new', 'YoYoYO'] == 'YOLO')
assert (self.meta['new2'].children['new21', 'yoyoyo'] == 'yolo')
assert (self.meta['new2'].children['new21', 'YoYoYO'] == 'yolo')
assert (self.meta['new2'].children.attr_case_name('YoYoYo') ==
'yoyoyo')
def test_has_attr_name_case_preservation_w_higher_order_reverse_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['new'] = {'yoyoyo': 'YOLO'}
self.meta['NEW2'] = meta2
assert (self.meta.has_attr('YoYoYo'))
assert (self.meta.has_attr('yoyoyo'))
assert not (self.meta.has_attr('YoYoYyo'))
def test_has_attr_name_case_preservation_w_higher_order(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta['NEW2'] = meta2
assert not (self.meta.has_attr('YoYoYo'))
assert not (self.meta.has_attr('yoyoyo'))
assert not (self.meta.has_attr('YoYoYyo'))
# check support on case preservation, but case insensitive
def test_replace_meta_units_list_weird_case(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['new2'] = {'units': 'hey2', 'long_name': 'boo2'}
self.meta[['NEW2', 'new']] = {'units': ['yeppers', 'yep']}
assert (self.meta['new'].units == 'yep')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['new2'].units == 'yeppers')
assert (self.meta['new2'].long_name == 'boo2')
# Test the attribute transfer function
def test_transfer_attributes_to_instrument(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
assert self.testInst.new_attribute == 'hello'
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading_(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst._yo_yo == 'yo yo'
# ensure leading hyphens are dropped
@raises(AttributeError)
def test_transfer_attributes_to_instrument_leading__(self):
self.meta.new_attribute = 'hello'
self.meta.__yo_yo = 'yo yo'
self.meta.date = None
self.meta.transfer_attributes_to_instrument(self.testInst)
self.testInst.__yo_yo == 'yo yo'
@raises(RuntimeError)
def test_transfer_attributes_to_instrument_strict_names(self):
self.meta.new_attribute = 'hello'
self.meta._yo_yo = 'yo yo'
self.meta.jojo_beans = 'yep!'
self.meta.name = 'Failure!'
self.meta.date = 'yo yo2'
self.testInst.load(2009, 1)
self.testInst.jojo_beans = 'nope!'
self.meta.transfer_attributes_to_instrument(self.testInst,
strict_names=True)
def test_merge_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
meta2 = pysat.Meta()
meta2['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.merge(meta2)
assert (self.meta['new'].units == 'hey')
assert (self.meta['new'].long_name == 'boo')
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
def test_drop_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.drop(['new'])
assert not ('new' in self.meta.data.index)
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
def test_keep_meta(self):
self.meta['new'] = {'units': 'hey', 'long_name': 'boo'}
self.meta['NEW21'] = {'units': 'hey2', 'long_name': 'boo2',
'YoYoYO': 'yolo'}
self.meta.keep(['new21'])
assert not ('new' in self.meta.data.index)
assert (self.meta['NEW21'].units == 'hey2')
assert (self.meta['NEW21'].long_name == 'boo2')
assert (self.meta['NEW21'].YoYoYO == 'yolo')
|
|
# noqa: D100, D101, D102
import datetime as dt
from pathlib import Path
from urllib.parse import urlparse
import dateutil.parser
from owslib.wps import ComplexDataInput
from ..utils import is_file, sanitize
def filter_case_insensitive(names, complete_list):
"""Filter a sequence of process names into a `known` and `unknown` list."""
contained = []
missing = []
complete_list_lower = set(map(str.lower, complete_list))
if isinstance(names, str):
names = [
names,
]
for name in names:
if name.lower() in complete_list_lower:
contained.append(name)
else:
missing.append(name)
return contained, missing
def pretty_repr(obj, linebreaks=True):
"""Output pretty repr for an Output.
Parameters
----------
obj : any type
linebreaks : bool
If True, split attributes with linebreaks
"""
class_name = obj.__class__.__name__
try:
obj = obj._asdict() # convert namedtuple to dict
except AttributeError:
pass
try:
items = list(obj.items())
except AttributeError:
try:
items = list(obj.__dict__.items())
except AttributeError:
return repr(obj)
attributes = []
indent = " " if linebreaks else ""
for key, value in items:
value = pretty_repr(value, linebreaks=False)
attributes.append(
"{indent}{key}={value}".format(indent=indent, key=key, value=value)
)
attribute_joiner = ",\n" if linebreaks else ", "
attributes = attribute_joiner.join(attributes)
joiner = "\n" if linebreaks else ""
return joiner.join([class_name + "(", attributes, ")"])
def build_wps_client_doc(wps, processes):
"""Create WPSClient docstring.
Parameters
----------
wps : owslib.wps.WebProcessingService
processes : Dict[str, owslib.wps.Process]
Returns
-------
str
The formatted docstring for this WPSClient
"""
doc = [wps.identification.abstract, "", "Processes", "---------", ""]
for process_name, process in list(processes.items()):
sanitized_name = sanitize(process_name)
description = "{name}\n {abstract}".format(
name=sanitized_name, abstract=process.abstract or "(No description)"
)
doc.append(description)
doc.append("")
if not processes:
doc.append("There aren't any available processes.")
doc.append("\n")
return "\n".join(doc)
def build_process_doc(process):
"""Create docstring from process metadata."""
doc = [process.abstract or "", ""]
# Inputs
if process.dataInputs:
doc.append("Parameters")
doc.append("----------")
for i in process.dataInputs:
doc.append("{} : {}".format(sanitize(i.identifier), format_type(i)))
doc.append(" {}".format(i.abstract or i.title))
# if i.metadata:
# doc[-1] += " ({})".format(', '.join(['`{} <{}>`_'.format(m.title, m.href) for m in i.metadata]))
doc.append("")
# Outputs
if process.processOutputs:
doc.append("Returns")
doc.append("-------")
for i in process.processOutputs:
doc.append("{} : {}".format(sanitize(i.identifier), format_type(i)))
doc.append(" {}".format(i.abstract or i.title))
doc.append("")
return "\n".join(doc)
def format_type(obj):
"""Create docstring entry for input parameter from an OWSlib object."""
nmax = 10
doc = ""
try:
if getattr(obj, "allowedValues", None):
av = ", ".join(["'{}'".format(i) for i in obj.allowedValues[:nmax]])
if len(obj.allowedValues) > nmax:
av += ", ..."
doc += "{" + av + "}"
if getattr(obj, "dataType", None):
doc += obj.dataType
if getattr(obj, "supportedValues", None):
doc += ", ".join(
[
":mimetype:`{}`".format(getattr(f, "mimeType", f))
for f in obj.supportedValues
]
)
if getattr(obj, "crss", None):
crss = ", ".join(obj.crss[:nmax])
if len(obj.crss) > nmax:
crss += ", ..."
doc += "[" + crss + "]"
if getattr(obj, "minOccurs", None) and obj.minOccurs == 0:
doc += ", optional"
if getattr(obj, "default", None):
doc += ", default:{}".format(obj.defaultValue)
if getattr(obj, "uoms", None):
doc += ", units:[{}]".format(", ".join([u.uom for u in obj.uoms]))
except Exception as e:
raise type(e)("{} (in {} docstring)".format(e, obj.identifier))
return doc
def is_embedded_in_request(url, value):
"""Whether or not to encode the value as raw data content.
Returns True if
- value is a file:/// URI or a local path
- value is a File-like instance
- url is not localhost
- value is a File object
- value is already the string content
"""
if hasattr(value, "read"): # File-like
return True
u = urlparse(url)
if isinstance(value, Path): # pathlib.Path
p = value
scheme = "file"
else: # String-like
v = urlparse(value)
p = Path(v.path)
scheme = v.scheme
if scheme == "file": # Explicit link to file
if is_file(p):
return "localhost" not in u.netloc
else:
raise IOError(
"{} should be a local file but was not found on disk.".format(value)
)
elif scheme == "": # Could be a local path or just a string
if is_file(p):
return "localhost" not in u.netloc
else:
return True
else: # Other URL (http, https, ftp, ...)
return False
def to_owslib(value, data_type, encoding=None, mimetype=None, schema=None):
"""Convert value into OWSlib objects."""
# owslib only accepts literaldata, complexdata and boundingboxdata
if data_type == "ComplexData":
return ComplexDataInput(
value, encoding=encoding, mimeType=mimetype, schema=schema
)
if data_type == "BoundingBoxData":
# TODO: return BoundingBoxDataInput(data=value, crs=crs, dimensions=2)
return value
else: # LiteralData
return str(value)
def from_owslib(value, data_type):
"""Convert a string into another data type."""
if value is None:
return None
if "string" in data_type:
pass
elif "integer" in data_type:
value = int(value)
elif "float" in data_type:
value = float(value)
elif "boolean" in data_type:
value = bool(value)
elif "dateTime" in data_type:
value = dateutil.parser.parse(value)
elif "time" in data_type:
value = dateutil.parser.parse(value).time()
elif "date" in data_type:
value = dateutil.parser.parse(value).date()
elif "angle" in data_type:
value = float(value)
elif "ComplexData" in data_type:
value = ComplexDataInput(value)
elif "BoundingBoxData" in data_type:
pass
# value = BoundingBoxDataInput(value)
return value
def py_type(data_type):
"""Return the python data type matching the WPS dataType."""
if data_type is None:
return None
if "string" in data_type:
return str
elif "integer" in data_type:
return int
elif "float" in data_type:
return float
elif "boolean" in data_type:
return bool
elif "dateTime" in data_type:
return dt.datetime
elif "time" in data_type:
return dt.time
elif "date" in data_type:
return dt.date
elif "angle" in data_type:
return float
elif "ComplexData" in data_type:
return str
elif "BoundingBoxData" in data_type:
return str
def extend_instance(obj, cls):
"""Apply mixins to a class instance after creation."""
base_cls = obj.__class__
base_cls_name = obj.__class__.__name__
obj.__class__ = type(base_cls_name, (cls, base_cls), {})
def add_output_format(output_dictionary, output_identifier, as_ref=None, mimetype=None):
"""Add an output format to an already existing dictionary.
Parameters
----------
output_dictionary: dict
The dictionary (created with create_output_dictionary()) to which this
output format will be added.
output_identifier: str
Identifier of the output.
as_ref: True, False or None
Determines if this output will be returned as a reference or not.
None for process default.
mimetype: str or None
If the process supports multiple MIME types, it can be specified with this argument.
None for process default.
"""
output_dictionary[output_identifier] = {
"as_ref": as_ref,
"mimetype": mimetype,
}
def create_output_dictionary(output_identifier, as_ref=None, mimetype=None):
"""Create an output format dictionary.
Parameters
----------
output_identifier: str
Identifier of the output.
as_ref: True, False or None
Determines if this output will be returned as a reference or not.
None for process default.
mimetype: str or None
If the process supports multiple MIME types, it can be specified with this argument.
None for process default.
Returns
-------
output_dictionary: dict
"""
output_dictionary = {
output_identifier: {
"as_ref": as_ref,
"mimetype": mimetype,
}
}
return output_dictionary
|
|
import uuid
import logging
from os import path
from datetime import datetime
from indra.statements import Agent, Statement, stmts_from_json
from indra.assemblers.html import HtmlAssembler
from indra.util.statement_presentation import group_and_sort_statements, \
make_string_from_relation_key, StmtGroup, make_standard_stats
from bioagents.settings import IMAGE_DIR, TIMESTAMP_PICS
from kqml.cl_json import CLJsonConverter
logging.basicConfig(format='%(levelname)s: %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger('Bioagents')
from indra.assemblers.english import EnglishAssembler
from kqml import KQMLModule, KQMLPerformative, KQMLList, KQMLString
class BioagentException(Exception):
pass
class Bioagent(KQMLModule):
"""Abstract class for bioagents."""
name = "Generic Bioagent (Should probably be overwritten)"
tasks = []
converter = CLJsonConverter(token_bools=True)
def __init__(self, **kwargs):
super(Bioagent, self).__init__(name=self.name, **kwargs)
self.my_log_file = self._add_log_file()
for task in self.tasks:
self.subscribe_request(task)
self.ready()
self.start()
logger.info("%s has started and is ready." % self.name)
return
@classmethod
def _add_log_file(cls):
log_file_name = '%s.log' % cls.name
handler = logging.FileHandler(log_file_name)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s: '
'%(name)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return log_file_name
@classmethod
def get_agent(cls, cl_agent):
"""Get an agent from the kqml cl-json representation (KQMLList)."""
agent_json = cls.converter.cl_to_json(cl_agent)
if isinstance(agent_json, list):
return [ensure_agent_type(Agent._from_json(agj))
for agj in agent_json]
else:
return ensure_agent_type(Agent._from_json(agent_json))
@classmethod
def get_statement(cls, cl_statement):
"""Get an INDRA Statement from cl-json"""
stmt_json = cls.converter.cl_to_json(cl_statement)
if not stmt_json:
return None
elif isinstance(stmt_json, list):
return stmts_from_json(stmt_json)
else:
return Statement._from_json(stmt_json)
@classmethod
def make_cljson(cls, entity):
"""Convert an Agent or a Statement into cljson.
`entity` is expected to have a method `to_json` which returns valid
json.
"""
# Regularize the input to plain JSON
if isinstance(entity, list):
entity_json = [e.to_json() if hasattr(e, 'to_json')
else e # assumed to be a list or a dict.
for e in entity]
elif hasattr(entity, 'to_json'):
entity_json = entity.to_json()
else: # Assumed to be a jsonifiable dict.
entity_json = entity.copy()
return cls.converter.cl_from_json(entity_json)
def receive_tell(self, msg, content):
tell_content = content[0].to_string().upper()
if tell_content == 'START-CONVERSATION':
logger.info('%s resetting' % self.name)
def receive_reply(self, msg, content):
pass
def receive_request(self, msg, content):
"""Handle request messages and respond.
If a "request" message is received, decode the task and the content
and call the appropriate function to prepare the response. A reply
message is then sent back.
"""
try:
content = msg.get('content')
task = content.head().upper()
logger.info("%s received request with task: %s" % (self.name, task))
except Exception as e:
logger.error('Could not get task string from request.')
logger.error(e)
reply_content = self.make_failure('INVALID_REQUEST')
return self.reply_with_content(msg, reply_content)
if task in self.tasks:
reply_content = self._respond_to(task, content)
else:
logger.error('Could not perform task.')
logger.error("Task %s not found in %s." %
(task, str(self.tasks)))
reply_content = self.make_failure('UNKNOWN_TASK')
return self.reply_with_content(msg, reply_content)
def _respond_to(self, task, content):
"""Get the method to responsd to the task indicated by task."""
resp_name = "respond_" + task.replace('-', '_').lower()
try:
resp = getattr(self, resp_name)
logger.info("%s will perform task %s with method %s."
% (self.name, task, resp_name))
except AttributeError:
logger.error("Tried to execute unimplemented task.")
logger.error("Did not find response method %s." % resp_name)
return self.make_failure('INVALID_TASK')
try:
reply_content = resp(content)
return reply_content
except BioagentException:
raise
except Exception as e:
logger.error('Could not perform response to %s' % task)
logger.exception(e)
return self.make_failure('INTERNAL_FAILURE', description=str(e))
def reply_with_content(self, msg, reply_content):
"""A wrapper around the reply method from KQMLModule."""
reply_msg = KQMLPerformative('reply')
reply_msg.set('content', reply_content)
self.reply(msg, reply_msg)
return
def tell(self, content):
"""Send a tell message."""
msg = KQMLPerformative('tell')
msg.set('content', content)
return self.send(msg)
def request(self, content):
"""Send a request message."""
msg = KQMLPerformative('request')
msg.set('content', content)
return self.send(msg)
def error_reply(self, msg, comment):
if not self.testing:
return KQMLModule.error_reply(self, msg, comment)
else:
return msg, comment
@staticmethod
def make_failure(reason=None, description=None):
msg = KQMLList('FAILURE')
if reason:
msg.set('reason', reason)
if description:
msg.sets('description', description)
return msg
def send_null_provenance(self, stmt, for_what, reason=''):
"""Send out that no provenance could be found for a given Statement."""
content_fmt = ('<h4>No supporting evidence found for {statement} from '
'{cause}{reason}.</h4>')
content = KQMLList('add-provenance')
stmt_txt = EnglishAssembler([stmt]).make_model()
content.sets('html', content_fmt.format(statement=stmt_txt,
cause=for_what, reason=reason))
return self.tell(content)
def send_provenance_for_stmts(self, stmt_list, for_what, limit=50,
ev_counts=None, source_counts=None):
"""Send out a provenance tell for a list of INDRA Statements.
The message is used to provide evidence supporting a conclusion.
"""
logger.info("Sending provenance for %d statements for \"%s\"."
% (len(stmt_list), for_what))
title = "Supporting evidence for %s" % for_what
content_fmt = '<h4>%s (max %s):</h4>\n%s<hr>'
evidence_html = self._make_report_cols_html(stmt_list, limit=limit,
ev_counts=ev_counts,
source_counts=source_counts,
title=title)
content = KQMLList('add-provenance')
content.sets('html', content_fmt % (title, limit, evidence_html))
return self.tell(content)
@staticmethod
def _make_evidence_html(stmts, ev_counts=None, source_counts=None,
title='Results from the INDRA database'):
"Make html from a set of statements."
ha = HtmlAssembler(stmts, db_rest_url='https://db.indra.bio', title=title,
ev_counts=ev_counts, source_counts=source_counts)
return ha.make_model()
@staticmethod
def _stash_evidence_html(html):
"""Make html for a set of statements, return a link to the file.
The if the PROVENANCE_LOCATION environment variable determines where
the content is stored. The variable should be divided by colons, the
first division indicating whether the file is stored locally or on s3,
being either "file" or "s3" respectively.
If the html will be stored locally, the next and last division should
be a path (absolute would be best) to the location where html files
will be stored. For example:
file:/home/myname/projects/cwc-integ/provenance
If the html will be stored on s3, the next division should be the
bucket, and the last division should be a prefix to a "directory" where
the html files will be stored. For example:
s3:cwc-stuff:bob/provenance
The default is:
file:{this directory}/../../../provenance
Which should land in the cwc-integ directory. If the directory does not
yet exist, it will be created.
"""
# Get the provenance location.
from os import environ, mkdir
loc = environ.get('PROVENANCE_LOCATION')
if loc is None:
this_dir = path.dirname(path.abspath(__file__))
rel = path.join(*([this_dir] + 3*[path.pardir] + ['provenance']))
loc = 'file:' + path.abspath(rel)
logger.info("Using provenance location: \"%s\"" % loc)
# Save the file.
method = loc.split(':')[0]
fname = '%s.html' % uuid.uuid4()
if method == 'file':
prov_path = loc.split(':')[1]
if not path.exists(prov_path):
mkdir(prov_path)
fpath = path.join(prov_path, fname)
with open(fpath, 'w') as f:
f.write(html)
link = fpath
elif method == 's3':
bucket = loc.split(':')[1]
prefix = loc.split(':')[2]
# Get a connection to s3.
import boto3
from botocore import UNSIGNED
from botocore.client import Config
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
key = prefix + fname
link = 'https://s3.amazonaws.com/%s/%s' % (bucket, key)
s3.put_object(Bucket=bucket, Key=key, Body=html.encode('utf-8'),
ContentType='text/html')
else:
logger.error('Invalid PROVENANCE_LOCATION: "%s". HTML not saved.'
% loc)
link = None
return link
def say(self, message):
"""Say something to the user."""
if message:
msg = KQMLList('say')
msg.append(KQMLString(message))
self.request(msg)
def _make_report_cols_html(self, stmt_list, limit=5, ev_counts=None,
source_counts=None, **kwargs):
"""Make columns listing the support given by the statement list."""
if not stmt_list:
return "No statements found."
def href(ref, text):
return '<a href=%s target="_blank">%s</a>' % (ref, text)
# Build the list of relevant statements and count their prevalence.
stmt_stats = make_standard_stats(ev_counts=ev_counts,
source_counts=source_counts)
sorted_groups = group_and_sort_statements(stmt_list,
custom_stats=stmt_stats,
grouping_level='relation')
# Build the html.
lines = []
for _, rel_key, _, metrics in sorted_groups[:limit]:
count = metrics['ev_count']
line = '<li>%s %s</li>' % (make_string_from_relation_key(rel_key),
'(%d)' % count)
lines.append(line)
# Build the overall html.
list_html = '<ul>%s</ul>' % ('\n'.join(lines))
html = self._make_evidence_html(stmt_list, ev_counts=ev_counts,
source_counts=source_counts, **kwargs)
link = self._stash_evidence_html(html)
if link is None:
link_html = 'I could not generate the full list.'
elif link.startswith('http'):
link_html = href(link, 'Here') + ' is the full list.'
else:
link_html = 'Here: %s is the full list.' % link
return list_html + '\n' + link_html
@staticmethod
def make_resolve_family_failure(family_agent):
gene_agents = get_genes_for_family(family_agent)
msg = Bioagent.make_failure(reason='FAMILY_NAME')
cl_family = Bioagent.make_cljson(family_agent)
cl_genes = Bioagent.make_cljson(gene_agents)
clarification = KQMLList('RESOLVE')
clarification.set('term', cl_family)
clarification.set('as', cl_genes)
msg.set('clarification', clarification)
return msg
def get_img_path(img_name):
"""Get a full path for the given image name.
The name will also have a timestamp added if settings.TIMESTAMP_PICS is
True. The timestamp, if applied, will prepend the name.
"""
if TIMESTAMP_PICS:
date_str = datetime.now().strftime('%Y%m%d%H%M%S')
img_name = '%s_%s' % (date_str, img_name)
return path.join(IMAGE_DIR, img_name)
def infer_agent_type(agent):
if 'FPLX' in agent.db_refs:
return 'ONT::PROTEIN-FAMILY'
elif 'HGNC' in agent.db_refs or 'UP' in agent.db_refs:
return 'ONT::GENE-PROTEIN'
elif 'CHEBI' in agent.db_refs or 'PUBCHEM' in agent.db_refs:
return 'ONT::PHARMACOLOGIC-SUBSTANCE'
elif 'GO' in agent.db_refs or 'MESH' in agent.db_refs:
return 'ONT::BIOLOGICAL-PROCESS'
return None
def add_agent_type(agent):
if agent is None:
return None
inferred_type = infer_agent_type(agent)
if inferred_type:
agent.db_refs['TYPE'] = inferred_type
return agent
def ensure_agent_type(agent):
if agent is None:
return None
if 'TYPE' not in agent.db_refs.keys():
return add_agent_type(agent)
else:
return agent
def get_genes_for_family(family_agent):
"""Return agents corresponding to specific genes in a given family agent"""
from indra.ontology.bio import bio_ontology
from indra.ontology.standardize \
import standardize_agent_name
family_grounding = family_agent.db_refs.get('FPLX')
if not family_grounding:
return []
children = bio_ontology.get_children('FPLX', family_grounding)
children = [c for c in children if c[0] == 'HGNC']
child_agents = []
for _, hgnc_id in children:
child_agent = Agent(None, db_refs={'HGNC': hgnc_id,
'TYPE': 'ONT::GENE-PROTEIN'})
standardize_agent_name(child_agent, standardize_refs=True)
child_agents.append(child_agent)
child_agents = sorted(child_agents, key=lambda x: x.name)
return child_agents
|
|
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
from numpy import sign
from numpy import zeros
import numpy as np
import math
def fixedendmomentsTimoshenko(theta_0,theta_L, L, E, I, G, kA, fed=[1,1]):
'''
Given the the start and end cross section rotations
Return the end moments that produce equal and opposite rotations
such that the net rotation at the member ends are 0 ie Fixed
Sign convention is clockwise moments are positive
[-Theta_0, -Theta_L] = [M_0,M_1] * [(1/kA G L)+(L/3EI), (1/kA G L)-(L/6EI)
(1/kA G L)-(L/6EI), (1/kA G L)+(L/3EI)]
Using Numpy Linear Algebra to solve the simultaneous equations
'''
if fed[0] == 1 and fed[1] == 1:
s = np.array([[-1.0*theta_0],[-1.0*theta_L]])
ems = np.array([[(1.0/(kA*G*L))+(L/(3.0*E*I)) , (1.0/(kA*G*L))-(L/(6.0*E*I))],
[(1.0/(kA*G*L))-(L/(6.0*E*I)) , (1.0/(kA*G*L))+(L/(3.0*E*I))]])
fem = np.linalg.solve(ems,s)
elif fed[0] == 1 and fed[1] == 0:
fel= (-1.0*theta_0) / ((1.0/(kA*G*L))+(L/(3.0*E*I)))
fem = np.array([[fel],[0]])
elif fed[0] == 0 and fed[1] == 1:
fer = (-1.0*theta_L) / ((1.0/(kA*G*L))+(L/(3.0*E*I)))
fem = np.array([[0],[fer]])
else:
fem = np.array([[0],[0]])
return fem
class TimoshenkoBeam:
def __init__(L, E, I, G, kA):
'''
Timoshenko General form equations for beam stiffness
and carry over factors
** Maintain consistent units among the inputs **
L = beam span
E = beam modulus of elastacity
I = beam second moment of area about the axis of bending
G = beam shear modulus
kA = beam shear area, typically the beam web area for steel W shapes
'''
self.L = L
self.E = E
self.I = I
self.G = G
self.kA = kA
def cof(self, fixed=[1,1]):
'''
carry over factor
g = 6EI / kAGL^2
'''
g = ((6*self.E*self.I) / (self.kA*self.G*self.L*self.L))
COF_fixed = (1-g) / (2+g)
COF = [i*COF_fixed for i in fixed]
return COF
def k(self, fixed=[1,1]):
'''
Stiffness factors
g = 6EI / kAGL^2
'''
g = ((6*self.E*self.I) / (self.kA*self.G*self.L*self.L))
K_farfixed = ((4*self.E*self.I) / self.L) * ((2+g)/(2*(1+(2*g))))
K_farpinned = ((3*self.E*self.I)/self.L) * (2/(2+g))
if fixed == [0,1]:
return [0,K_farpinned]
elif fixed == [1,0]:
return [K_farpinned,0]
elif fixed == [0,0]:
return [0,0]
else:
return [K_farfixed,K_farfixed]
class PointLoad:
def __init__(self, P, a, L, E, I, G, kA):
'''
Timoshenko General form equations for a simply
supported beam with an applied Point load anywhere
along the beam span.
Note unlike the Euler-Bernoulli beam formulas
the beam properties are needed as part of the
initial inputs.
** Maintain consistent units among the inputs **
P = load
a = load location from left end of beam
L = beam span
E = beam modulus of elastacity
I = beam second moment of area about the axis of bending
G = beam shear modulus
kA = beam shear area, typically the beam web area for steel W shapes
sign convention:
(+) positive loads are applied in the (-) negative y direction
(+) positive reactions are in the (+) positive y direction
'''
self.P = P
self.a = a
self.L = L
self.E = E
self.I = I
self.G = G
self.kA = kA
# b = L-a
self.b = self.L - self.a
self.kind = 'Point'
self.error = ''
if self.a > self.L:
self.error = 'Error a > l'
# Simple End Reactions from statics
# Sum V = 0 and Sum M = 0
self.rl = (self.P*self.b)/self.L
self.rr = (self.P*self.a)/self.L
'''
Integration constants
resulting from integration of the two formulas
M = -EI dtheta/dx
V = kAG (-theta + ddelta/dx)
Initial conditions:
delta = 0 at x=0 and x = L
Compatibility conditions:
theta = constant at x = a
delta = constant at x = a
'''
self.c1 = ((-1*self.P*math.pow(self.a,2)) / (2*self.E*self.I) +
((self.P*math.pow(self.a,3)) / (6*self.E*self.I*self.L)) +
((self.P*self.a*self.L) / (3*self.E*self.I)))
self.c2 = 0
self.c3 = (((self.P*math.pow(self.a,3))/(6*self.E*self.I*self.L)) +
((self.P*self.a*self.L)/(3*self.E*self.I)))
self.c4 = (((self.P*self.a)/(self.kA*self.G)) -
((self.P*math.pow(self.a,3))/(6*self.E*self.I)))
def chart_load(self, x_scale=0, y_scale=0, arrows=0):
'''
function returns x and y coordinate data to facilitate
chart plotting
y scaling is applied to the load value
x scaling only impacts the arrow head if arrows are
selected to be included
arrows simply places two diagonal lines at the load intersection with
the beam line.
'''
if arrows == 1:
arrow_height = (self.P/6.0)
#30 degree arrow
arrow_plus= (self.a+(arrow_height*math.tan(math.radians(30))))
arrow_minus= (self.a-(arrow_height*math.tan(math.radians(30))))
x=[arrow_minus,self.a,arrow_plus,self.a,self.a]
x = [i*x_scale for i in x]
y=[arrow_height,0,arrow_height,0,self.P]
y = [j*y_scale for j in y]
else:
x = [self.a*x_scale, self.a*x_scale]
y = [0,self.P*y_scale]
return x,y
def fef(self):
'''
fixed end forces
g = 6EI / kAGL^2
Sign convention:
(+) positive moments are clock-wise
'''
# redefine variables from self. to local to
# make formulas easier to read
P = self.P
a = self.a
b = self.b
L = self.L
g = ((6*self.E*self.I) / (self.kA*self.G*self.L*self.L))
ML = -1.0 * ((P*a*b*b) / (L*L))*((1+((L/b)*g))/(1+(2*g)))
MR = ((P*a*a*b) / (L*L))*((1+((L/a)*g))/(1+(2*g)))
# additional vertical reactions caused by fixed end moments
MRL = -1.0*(ML+MR)/L
MRR = -1.0*MRL
RL = self.rl + MRL
RR = self.rr + MRR
return [RL, ML, RR, MR]
def v(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal beam shear
'''
iters = len(x)
v=zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
if x[i] == 0 and self.a == 0:
v[i] = 0
else:
v[i] = self.rl
else:
v[i] = -1 * self.rr
return v
def m(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal beam moment
'''
iters = len(x)
m=zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
m[i] = self.rl * x[i]
else:
m[i] = (-1 * self.rr * x[i]) + (self.rr * self.L)
return m
def theta(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal rotation of the cross-section vertical
axis
'''
iters = len(x)
theta = zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
theta[i] = (((-1*self.rl*math.pow(x[i],2))/(2*self.E*self.I)) +
self.c1)
else:
theta[i] = (((self.P*self.a*math.pow(x[i],2)) / (2*self.E*self.I*self.L)) -
((self.P*self.a*x[i]) / (self.E*self.I)) +
self.c3)
return theta
def delta(self,x):
'''
function takes an array of x locations along the beam and
returns the associated beam vertical deflection
'''
iters = len(x)
delta = zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
delta[i] = (((self.rl*x[i])/(self.kA*self.G)) -
((self.rl*math.pow(x[i],3))/(6*self.E*self.I)) +
(self.c1*x[i]) + self.c2)
else:
delta[i] = (((-1.0*self.rr*x[i])/(self.kA*self.G)) +
((self.P*self.a*math.pow(x[i],3))/(6*self.E*self.I*self.L)) -
((self.P*self.a*math.pow(x[i],2))/(2*self.E*self.I)) +
(self.c3*x[i]) + self.c4)
return delta
'''
The below functions do the same as above with the exception that they only
accept a single x location and report a single result.
'''
def vx(self,x):
if x <= self.a:
if x == 0 and self.a == 0:
vx = 0
else:
vx = self.rl
else:
vx = -1 * self.rr
return vx
def mx(self,x):
if x <= self.a:
mx = self.rl * x
else:
mx = (-1 * self.rr * x) + (self.rr * self.L)
return mx
def thetax(self,x):
if x <= self.a:
thetax = (((-1*self.rl*math.pow(x,2))/(2*self.E*self.I)) +
self.c1)
else:
thetax = (((self.rr*math.pow(x,2)) / (2*self.E*self.I)) -
((self.rr*self.L*x) / (self.E*self.I)) +
self.c3)
return thetax
def deltax(self,x):
if x <= self.a:
deltax = (((self.rl*x)/(self.kA*self.G)) -
((self.rl*math.pow(x,3))/(6*self.E*self.I)) +
(self.c1*x) + self.c2)
else:
deltax = (((-1.0*self.rr*x)/(self.kA*self.G)) +
((self.P*self.a*math.pow(x,3))/(6*self.E*self.I*self.L)) -
((self.P*self.a*math.pow(x,2))/(2*self.E*self.I)) +
(self.c3*x) + self.c4)
return deltax
class PointMoment:
def __init__(self, M, a, L, E, I, G, kA):
'''
Timoshenko General form equations for a simply
supported beam with an applied Point Moment anywhere
along the beam span.
Note unlike the Euler-Bernoulli beam formulas
the beam properties are needed as part of the
initial inputs.
** Maintain consistent units among the inputs **
M = moment
a = load location from left end of beam
L = beam span
E = beam modulus of elastacity
I = beam second moment of area about the axis of bending
G = beam shear modulus
kA = beam shear area, typically the beam web area for steel W shapes
sign convention:
(+) positive moments are applied clockwise
(+) positive reactions are in the (+) positive y direction
'''
self.M = M
self.a = a
self.L = L
self.E = E
self.I = I
self.G = G
self.kA = kA
# b = L-a
self.b = self.L - self.a
self.kind = 'Moment'
self.error = ''
if self.a > self.L:
self.error = 'Error a > l'
# Simple End Reactions from statics
# Sum V = 0 and Sum M = 0
self.rl = -1.0*self.M/self.L
self.rr = self.M/self.L
'''
Integration constants
resulting from integration of the two formulas
M = -EI dtheta/dx
V = kAG (-theta + ddelta/dx)
Initial conditions:
delta = 0 at x=0 and x = L
Compatibility conditions:
theta = constant at x = a
delta = constant at x = a
'''
self.c1 = (((-1.0*self.M*self.a) / (self.E*self.I)) +
((self.M*math.pow(self.a,2))/(2*self.E*self.I*self.L)) +
(self.M/(self.kA*self.G*self.L)) +
((self.M*self.L)/(3*self.E*self.I)))
self.c2 = 0
self.c3 = (((self.M*math.pow(self.a,2))/(2*self.E*self.I*self.L)) +
(self.M/(self.kA*self.G*self.L)) +
((self.M*self.L)/(3*self.E*self.I)))
self.c4 = (-1.0*self.M*math.pow(self.a,2))/(2*self.E*self.I)
def chart_load(self, x_scale=0, y_scale=0, arrows=0):
'''
function returns x and y coordinate data to facilitate
chart plotting
y scaling is applied to the load value
x scaling only impacts the arrow head if arrows are
selected to be included
arrows simply places two diagonal lines at the load intersection with
the beam line.
'''
x=[]
y=[]
r = (self.M/2.0)
# set radius as M/2 so when centered on beam line the moment symbol
# total height mathces the moment value
if arrows == 1:
arrow_height = r/6.0
#30 degree arrow
arrow_minus= (arrow_height*math.tan(math.radians(30)))
if self.ma <0:
x = [self.a,self.a,self.a]
y = [r,0,-r]
xi=0
yi=0
for a in range(-90, 181):
xi = (self.a)+((r*math.cos(math.radians(a))))
yi = 0+((r*math.sin(math.radians(a))))
x.append(xi)
y.append(yi)
x.append(xi-arrow_minus)
y.append(yi+arrow_height)
x.append(xi)
y.append(yi)
x.append(xi+arrow_minus)
y.append(yi+arrow_height)
else:
x = [self.a-r,self.a,self.a+r, self.a+r-arrow_minus,self.a+r,self.a+r+arrow_minus,self.a+r]
y = [0,0,0,arrow_height,0,arrow_height,0]
xi=0
yi=0
for a in range(0,271):
xi = self.a+(r*math.cos(math.radians(a)))
yi = 0+(r*math.sin(math.radians(a)))
x.append(xi)
y.append(yi)
else:
if self.ma <0:
x = [self.a,self.a,self.a]
y = [r,0,-r]
xi=0
yi=0
for a in range(-90, 181):
xi = self.a+(r*math.cos(math.radians(a)))
yi = 0+(r*math.sin(math.radians(a)))
x.append(xi)
y.append(yi)
else:
x = [self.a-r,self.a,self.a+r]
y = [0,r,0]
xi=0
yi=0
for a in range(0,271):
xi = self.a+(r*math.cos(math.radians(a)))
yi = 0+(r*math.sin(math.radians(a)))
x.append(xi)
y.append(yi)
x = [i*x_scale for i in x]
y = [j*y_scale for j in y]
return x,y
def fef(self):
'''
fixed end forces
g = 6EI / kAGL^2
Sign convention:
(+) positive moments are clock-wise
'''
# redefine variables from self. to local to
# make formulas easier to read
M = self.M
a = self.a
b = self.b
L = self.L
g = ((6*self.E*self.I) / (self.kA*self.G*self.L*self.L))
ML = 0
MR = 0
# additional vertical reactions caused by fixed end moments
MRL = -1.0*(ML+MR)/L
MRR = -1.0*MRL
RL = self.rl + MRL
RR = self.rr + MRR
return [RL, ML, RR, MR]
def v(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal beam shear
'''
iters = len(x)
v=zeros(iters)
for i in range(0,iters):
v[i] = self.rl
return v
def m(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal beam moment
'''
iters = len(x)
m=zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
m[i] = self.rl * x[i]
else:
m[i] = self.rl * x[i] + self.M
return m
def theta(self,x):
'''
function takes an array of x locations along the beam and
returns the associated internal rotation of the cross-section vertical
axis
'''
iters = len(x)
theta = zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
theta[i] = ((self.M*math.pow(x[i],2))/(2*self.E*self.I*self.L)) + self.c1
else:
theta[i] = (((self.M*math.pow(x[i],2))/(2*self.E*self.I*self.L)) -
((self.M*x[i])/(self.E*self.I)) +
self.c3)
return theta
def delta(self,x):
'''
function takes an array of x locations along the beam and
returns the associated beam vertical deflection
'''
iters = len(x)
delta = zeros(iters)
for i in range(0,iters):
if x[i] <= self.a:
delta[i] = (((-1.0*self.M*x[i])/(self.kA*self.G*self.L))+
((self.M*math.pow(x[i],3))/(6*self.E*self.I*self.L))+
(self.c1*x[i])+
self.c2)
else:
delta[i] = (((-1.0*self.M*x[i])/(self.kA*self.G*self.L))+
((self.M*math.pow(x[i],3))/(6*self.E*self.I*self.L))-
((self.M*math.pow(x[i],2))/(2*self.E*self.I))+
(self.c3*x[i])+
self.c4)
return delta
'''
The below functions do the same as above with the exception that they only
accept a single x location and report a single result.
'''
def vx(self,x):
vx = self.rl
return vx
def mx(self,x):
if x <= self.a:
mx = self.rl * x
else:
mx = self.rl * x + self.M
return mx
def thetax(self,x):
if x <= self.a:
thetax = ((self.M*math.pow(x,2))/(2*self.E*self.I*self.L)) + self.c1
else:
thetax = (((self.M*math.pow(x,2))/(2*self.E*self.I*self.L)) -
((self.M*x)/(self.E*self.I)) +
self.c3)
return thetax
def deltax(self,x):
if x <= self.a:
deltax = (((-1.0*self.M*x)/(self.kA*self.G*self.L))+
((self.M*math.pow(x,3))/(6*self.E*self.I*self.L))+
(self.c1*x)+
self.c2)
else:
deltax = (((-1.0*self.M*x)/(self.kA*self.G*self.L))+
((self.M*math.pow(x,3))/(6*self.E*self.I*self.L))-
((self.M*math.pow(x,2))/(2*self.E*self.I))+
(self.c3*x)+
self.c4)
return deltax
class UniformLoad:
def __init__(self, w, a, b, L, E, I, G, kA):
'''
Timoshenko derivation for uniform loading
pin-pin single span beam
w - Load left end value
a - load start point from left end of beam
b - load end point from left end of beam
d - load width = b - a
L - beam span
E = beam modulus of elastacity
I = beam second moment of area about the axis of bending
G = beam shear modulus
kA = beam shear area, typically the beam web area for steel W shapes
sign convention:
(+) positive moments are applied clockwise
(+) positive reactions are in the (+) positive y direction
'''
self.w = w
self.a = a
self.b = b
self.L = L
self.E = E
self.I = I
self.G = G
self.kA = kA
d= b - a
self.d = d
'''
Reactions:
W = w*d
sum Fy = 0
--> RL + RR - W = 0
xbar = (a + d/2)
sum Mx,rl,cw+ = 0
--> -RR*L + W*(xbar) = 0
--> RR = W*(xbar) / L
RL = W - RR
'''
W = w*d
self.W = W
xbar = a + (d/2.0)
self.xbar = xbar
RR = (W*xbar) / L
RL = W - RR
self.RR = RR
self.RL = RL
#Boundary and Compatibility equation
#Lists of coefficients and lists of associated equalities
'''
Solve for Constants using boundary conditions and compatibility:
**Boundary @ x=0, V=RL:**
//c1 = RL
**Boundary @ x=L, V=-RR:**
//c3 = -RR
**Compatibility @ x=a, V=constant:**
c1 = -1*w*a + c2
//-c1 + c2 = w*a
**Boundary @ x=0, M=0:**
//c4 = 0
**Boundary @ x=L, M=0:**
//c3*L + c6 = 0
c6 = -c3*L
**Compatibility @ x=a, M=constant:**
c1*a = (-1*w*a^2)/2 + c2*a + c5
//c1*a - c2*a - c5 = (-1*w*a^2)/2
**Boundary @ x=0, delta=0:**
//c10 = 0
**Boundary @ x=L, delta=0:**
//0 = c3*L/kAG + (-c3*L^3)/(6*EI) - c6*L^2/2*EI + c9*L + c12
**Compatibility @ x=a, delta=constant:**
c1*a/kAG + (-c1*a^3)/(6*EI) - c4*a^2/2*EI + c7*a =
-1*w*a^2/2*kAG + c2*a/kAG + (w*a^4)/(24*EI) - (c2*a^3)/6*EI - c5*a^2/2*EI + c8*a + c11
//c1*a/kAG + (-c1*a^3)/(6*EI) - c2*a/kAG + (c2*a^3)/6*EI - c4*a^2/2*EI + c5*a^2/2*EI + c7*a - c8*a - c11 =
-1*w*a^2/2*kAG + (w*a^4)/(24*EI)
**Compatibility @ x=b, delta = constant:**
c3*b/kAG + (-c3*b^3)/(6*EI) - c6*b^2/2*EI + c9*b + c12 =
-1*w*b^2/2*kAG + c2*b/kAG + (w*b^4)/(24*EI) - (c2*b^3)/6*EI - c5*b^2/2*EI + c8*b + c11
//-c2*b/kAG + (c2*b^3)/6*EI + c3*b/kAG + (-c3*b^3)/(6*EI) + c5*b^2/2*EI - c6*b^2/2*EI - c8*b + c9*b - c11 + c12 =
-1*w1*b^2/2*kAG + (w1*b^4)/(24*EI)
**Compatibility @ x=a, theta=constant:**
(-c1*a^2)/(2*EI) - c4*a/EI + c7 = (w*a^3)/(6*EI) - (c2*a^2)/2*EI - c5*a/EI + c8
//(-c1*a^2)/(2*EI) + (c2*a^2)/2*EI - c4*a/EI + c5*a/EI + c7 - c8 = (w*a^3)/(6*EI)
**Compatibility @ x=b, theta = constant:**
(w*b^3)/(6*EI) - (c2*b^2)/2*EI - c5*b/EI + c8 = (-c3*b^2)/(2*EI) - c6*b/EI + c9
//(-c2*b^2)/2*EI + (c3*b^2)/(2*EI) - c5*b/EI + c6*b/EI + c8 - c9 = (-1*w*b^3)/(6*EI)
'''
bc1_coeff = [1,0,0,0,0,0,0,0,0,0,0,0]
bc1_eq = [RL]
bc2_coeff = [-1,1,0,0,0,0,0,0,0,0,0,0]
bc2_eq = [w*a]
bc3_coeff = [0,0,1,0,0,0,0,0,0,0,0,0]
bc3_eq = [-1*RR]
bc4_coeff = [0,0,0,1,0,0,0,0,0,0,0,0]
bc4_eq = [0]
bc5_coeff = [-1*a,a,0,0,1,0,0,0,0,0,0,0]
bc5_eq = [(w*math.pow(a,2))/2.0]
bc6_coeff = [0,0,L,0,0,1,0,0,0,0,0,0]
bc6_eq = [0]
bc7_coeff = [(-1*math.pow(a,2))/(2*E*I),(math.pow(a,2))/(2*E*I),0,0,a/(E*I),0,1,-1,0,0,0,0]
bc7_eq = [(w*math.pow(a,3))/(6*E*I)]
bc8_coeff = [0,
(math.pow(b,2))/(2*E*I),
(-1*math.pow(b,2))/(2*E*I),
0,
b/(E*I),
(-1*b)/(E*I),
0,
-1,
1,
0,
0,
0]
bc8_eq = [((w*math.pow(b,3))/(6*E*I))]
bc9_coeff = [0,0,(L/(kA*G)) + ((-1*math.pow(L,3))/(6*E*I)),0,0,-1*math.pow(L,2)/(2*E*I),0,0,L,0,0,1]
bc9_eq = [0]
bc10_coeff = [0,0,0,0,0,0,0,0,0,1,0,0]
bc10_eq = [0]
bc11_coeff = [(a/(kA*G)) + ((-1*math.pow(a,3))/(6*E*I)),
(-1*a/(kA*G)) + ((math.pow(a,3))/(6*E*I)),
0,
0,
(math.pow(a,2)/(2*E*I)),
0,
a,
-1*a,
0,
0,
-1,
0]
bc11_eq = [(-1*w*math.pow(a,2)/(2*kA*G)) + ((w*math.pow(a,4))/(24*E*I))]
bc12_coeff = [0,
(-1*b/(kA*G)) + ((math.pow(b,3))/(6*E*I)),
(b/(kA*G)) + ((-1*math.pow(b,3))/(6*E*I)),
0,
math.pow(b,2)/(2*E*I),
-1*math.pow(b,2)/(2*E*I),
0,
-1*b,
b,
0,
-1,
1]
bc12_eq = [((-1*w*math.pow(b,2))/(2*kA*G)) + ((w*math.pow(b,4))/(24*E*I))]
bceq = [bc1_coeff,bc2_coeff,bc3_coeff,bc4_coeff,bc5_coeff,bc6_coeff,bc7_coeff,bc8_coeff,bc9_coeff,bc10_coeff,bc11_coeff,bc12_coeff]
bcs = [bc1_eq,bc2_eq,bc3_eq,bc4_eq,bc5_eq,bc6_eq,bc7_eq,bc8_eq,bc9_eq,bc10_eq,bc11_eq,bc12_eq]
bceq = np.array(bceq)
bcs = np.array(bcs)
c = np.linalg.solve(bceq,bcs)
self.c = c
'''
Load Formulas:
0 < x < a:
w = 0
a < x < b:
w = -1*w
b < x < L:
w = 0
Shear Formulas:
w = dV/dx, therefore V = integral w dx
0 < x < a:
V = c1
a < x < b:
V = -1*w*x + c2
c < x < L:
V = c3
'''
def vx(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w = self.w
a = self.a
c = self.c
b = self.b
L = self.L
if 0 <= x <= a:
v = c[0][0]
elif a < x <= b:
v = (-1*w*x) + c[1][0]
elif b < x <= L:
v = c[2][0]
else:
v = 0
return v
'''
Moment Formulas:
V = dM/dx, therefore M = integral V dx
0 < x < a:
M = c1*x + c4
a < x < b:
M = (-1*w*x^2)/2 + c2*x + c5
b < x < L:
M = c3*x + c6
'''
def mx(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w = self.w
a = self.a
c = self.c
b = self.b
L = self.L
if 0 <= x <= a:
m = c[0][0]*x + c[3][0]
elif a < x <= b:
m = (((-1*w*math.pow(x,2))/2.0) +
c[1][0]*x +
c[4][0])
elif b < x <= L:
m = c[2][0]*x + c[5][0]
else:
m = 0
return m
'''
Timoshenko Relationship for Rotation, theta, and Deflection, delta
M = -E*I d theta/dx
V = kAG (-theta + d delta/dx)
Rotation Formulas:
theta = integral M/-EI dx
0 < x < a:
theta = (-c1*x^2)/(2*EI) - c4*x/EI + c7
a < x < b:
theta = (w1*x^3)/(6*EI) - (c2*x^2)/2*EI - c5*x/EI + c8
b < x < L:
theta = (-c3*x^2)/(2*EI) - c6*x/EI + c9
'''
def thetax(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w = self.w
a = self.a
c = self.c
b = self.b
L = self.L
E = self.E
I = self.I
if 0 <= x <= a:
theta = (((-1*c[0][0]*math.pow(x,2))/(2.0*E*I)) -
((c[3][0]*x)/(E*I)) +
c[6][0])
elif a < x <= b:
theta = (((w*math.pow(x,3))/(6.0*E*I)) -
((c[1][0]*math.pow(x,2))/(2.0*E*I)) -
((c[4][0]*x)/(E*I)) +
c[7][0])
elif b < x <= L:
theta = (((-1*c[2][0]*math.pow(x,2))/(2.0*E*I)) -
((c[5][0]*x)/(E*I)) +
c[8][0])
else:
theta = 0
return theta
'''
Delta Formulas:
delta = integral V/kAG + theta dx
0 < x < a:
delta = c1*x/kAG + (-c1*x^3)/(6*EI) - c4*x^2/2*EI + c7*x + c10
a < x < b:
delta = -1*w1*x^2/2*kAG + c2*x/kAG + (w1*x^4)/(24*EI) - (c2*x^3)/6*EI - c5*x^2/2*EI + c8*x + c11
b < x < L:
delta = c3*x/kAG + (-c3*x^3)/(6*EI) - c6*x^2/2*EI + c9*x + c12
'''
def deltax(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w = self.w
a = self.a
c = self.c
b = self.b
L = self.L
E = self.E
I = self.I
G = self.G
kA = self.kA
if 0 <= x <= a:
delta = (((c[0][0]*x)/(kA*G)) +
((-1*c[0][0]*math.pow(x,3))/(6.0*E*I)) -
((c[3][0]*math.pow(x,2))/(2.0*E*I)) +
(c[6][0]*x) +
c[9][0])
elif a < x <= b:
delta = (((-1*w*math.pow(x,2))/(2.0*kA*G)) +
((c[1][0]*x)/(kA*G)) +
((w*math.pow(x,4))/(24.0*E*I)) -
((c[1][0]*math.pow(x,3))/(6.0*E*I)) -
((c[4][0]*math.pow(x,2))/(2.0*E*I)) +
(c[7][0]*x) +
c[10][0])
elif b < x <= L:
delta = (((c[2][0]*x)/(kA*G)) +
((-1*c[2][0]*math.pow(x,3))/(6.0*E*I)) -
((c[5][0]*math.pow(x,2))/(2*E*I)) +
(c[8][0]*x) +
c[11][0])
else:
delta = 0
return delta
def fef(self):
L = self.L
E = self.E
I = self.I
G = self.G
kA = self.kA
fem = fixedendmomentsTimoshenko(self.thetax(0), self.thetax(L), L, E, I, G, kA, [1,1])
ML = fem[0][0]
MR = fem[1][0]
mo = timoforms.PointMoment(ML,0,L,E,I,G,kA)
ml = timoforms.PointMoment(MR,L,L,E,I,G,kA)
RL = self.RL + mo.rl + ml.rl
RR = self.RR + mo.rr + ml.rr
return [RL,ML,RR,MR]
class VariableLoad:
def __init__(self, w1, w2, a, b, L, E, I, G, kA):
'''
Timoshenko derivation for trapezoidal/variable loading
pin-pin single span beam
w1 - Load left end value
w2 - load right end value
a - load start point from left end of beam
b - load end point from left end of beam
d - load width = b - a
s - slope of load = (w2 - w1) / d
L - beam span
E = beam modulus of elastacity
I = beam second moment of area about the axis of bending
G = beam shear modulus
kA = beam shear area, typically the beam web area for steel W shapes
sign convention:
(+) positive moments are applied clockwise
(+) positive reactions are in the (+) positive y direction
'''
self.w1 = w1
self.w2 = w2
self.a = a
self.b = b
self.L = L
self.E = E
self.I = I
self.G = G
self.kA = kA
d= b - a
self.d = d
s= (w2 - w1) / d
self.s = s
'''
Reactions:
W = (w1 + w2)*d*0.5
sum Fy = 0
--> RL + RR - W = 0
xbar = (d * ((2 * w2) + w1)) / (3 * (w2 + w1))
sum Mx,rl,cw+ = 0
--> -RR*L + W*(a+xbar) = 0
--> RR = W*(a+xbar) / L
RL = W - RR
'''
W = (w1 + w2)*d*0.5
self.W = W
xbar = (d * ((2 * w2) + w1)) / (3 * (w2 + w1))
self.xbar = xbar
RR = W*(a+xbar) / L
RL = W - RR
self.RR = RR
self.RL = RL
#Boundary and Compatibility equation
#Lists of coefficients and lists of associated equalities
'''
Solve for Constants using boundary conditions and compatibility:
**Boundary @ x=0, V=RL:**
//c1 = RL
**Boundary @ x=L, V=-RR:**
//c3 = -RR
**Compatibility @ x=a, V=constant:**
c1 = -1*w1*a - (s(a-a)^2)/2 + c2
//-c1 + c2 = w1*a
**Boundary @ x=0, M=0:**
//c4 = 0
**Boundary @ x=L, M=0:**
//c3*L + c6 = 0
c6 = -c3*L
**Compatibility @ x=a, M=constant:**
c1*a = (-1*w1*a^2)/2 - (s(a-a)^3)/6 + c2*a + c5
//-c1*a + c2*a + c5 = (w1*a^2)/2 + (s(a-a)^3)/6
**Boundary @ x=0, delta=0:**
//c10 = 0
**Boundary @ x=L, delta=0:**
//0 = c3*L/kAG + (-c3*L^3)/(6*EI) - c6*L^2/2*EI + c9*L + c12
**Compatibility @ x=a, delta=constant:**
c1*a/kAG + (-c1*a^3)/(6*EI) - c4*a^2/2*EI + c7*a =
-1*w1*a^2/2*kAG - (s(a-a)^3)/6*kAG + c2*a/kAG + (w1*a^4)/(24*EI) + (s(a-a)^5)/(120*EI) - (c2*a^3)/6*EI - c5*a^2/2*EI + c8*a + c11
//c1*a/kAG + (-c1*a^3)/(6*EI) - c2*a/kAG + (c2*a^3)/6*EI - c4*a^2/2*EI + c5*a^2/2*EI + c7*a - c8*a - c11 =
-1*w1*a^2/2*kAG + (w1*a^4)/(24*EI)
**Compatibility @ x=a, theta=constant:**
(-c1*a^2)/(2*EI) - c4*a/EI + c7 = (w1*a^3)/(6*EI) + (s(a-a)^4)/(24*EI) - (c2*a^2)/2*EI - c5*a/EI + c8
//(-c1*a^2)/(2*EI) + (c2*a^2)/2*EI - c4*a/EI + c5*a/EI + c7 - c8 = (w1*a^3)/(6*EI)
**Compatibility @ x=b, theta = constant:**
(w1*b^3)/(6*EI) + (s(b-a)^4)/(24*EI) - (c2*b^2)/2*EI - c5*b/EI + c8 = (-c3*b^2)/(2*EI) - c6*b/EI + c9
//(-c2*b^2)/2*EI + (c3*b^2)/(2*EI) - c5*b/EI + c6*b/EI + c8 - c9 = (-1*w1*b^3)/(6*EI) - (s(b-a)^4)/(24*EI)
**Compatibility @ x=b, delta = constant:**
c3*b/kAG + (-c3*b^3)/(6*EI) - c6*b^2/2*EI + c9*b + c12 =
-1*w1*b^2/2*kAG - (s(b-a)^3)/6*kAG + c2*b/kAG + (w1*b^4)/(24*EI) + (s(b-a)^5)/(120*EI) - (c2*b^3)/6*EI - c5*b^2/2*EI + c8*b + c11
//-c2*b/kAG + (c2*b^3)/6*EI + c3*b/kAG + (-c3*b^3)/(6*EI) + c5*b^2/2*EI - c6*b^2/2*EI - c8*b + c9*b - c11 + c12 = -1*w1*b^2/2*kAG - (s(b-a)^3)/6*kAG + (w1*b^4)/(24*EI) + (s(b-a)^5)/(120*EI)
Matrix formulation for constants:
// above indicates formula in matrix
c1 [1,0,0,0,0,0,0,0,0,0,0,0] [RL]
c2 [-1,1,0,0,0,0,0,0,0,0,0,0] [w1*a]
c3 [0,0,1,0,0,0,0,0,0,0,0,0] [-RR]
c4 [0,0,0,1,0,0,0,0,0,0,0,0] [0]
c5 [-a,a,0,0,1,0,0,0,0,0,0,0] [(w1*a^2)/2]
c6 [0,0,L,0,0,1,0,0,0,0,0,0] [0]
c7 [(-1*a^2)/(2*EI),(a^2)/2*EI,0,-1*a/EI,a/EI,0,1,-1,0,0,0,0] [(w1*a^3)/(6*EI)]
c8 [0,(-1*b^2)/2*EI,(b^2)/(2*EI),0,-1*b/EI,b/EI,0,1,-1,0,0,0] [(-1*w1*b^3)/(6*EI) - (s(b-a)^4)/(24*EI)]
c9 [0,0,L/kAG + (-1*L^3)/(6*EI),0,0,-1*L^2/2*EI,0,0,L,0,0,1] [0]
c10 [0,0,0,0,0,0,0,0,0,1,0,0] [0]
c11 [a/kAG + (-1*a^3)/(6*EI),-1*a/kAG + (a^3)/6*EI,0,-1*a^2/2*EI,a^2/2*EI,0,a,-a,0,0,-1,0] [-1*w1*a^2/2*kAG + (w1*a^4)/(24*EI)]
c12 [0,-1*b/kAG + (b^3)/6*EI,d/kAG + (-1*b^3)/(6*EI),0,b^2/2*EI,-1*b^2/2*EI,0,-1*b,b,0,-1,1] [-1*w1*b^2/2*kAG - (s(b-a)^3)/6*kAG + (w1*b^4)/(24*EI) + (s(b-a)^5)/(120*EI)]
'''
bc1_coeff = [1,0,0,0,0,0,0,0,0,0,0,0]
bc1_eq = [RL]
bc2_coeff = [-1,1,0,0,0,0,0,0,0,0,0,0]
bc2_eq = [w1*a]
bc3_coeff = [0,0,1,0,0,0,0,0,0,0,0,0]
bc3_eq = [-1*RR]
bc4_coeff = [0,0,0,1,0,0,0,0,0,0,0,0]
bc4_eq = [0]
bc5_coeff = [-1*a,a,0,0,1,0,0,0,0,0,0,0]
bc5_eq = [(w1*math.pow(a,2))/2.0]
bc6_coeff = [0,0,L,0,0,1,0,0,0,0,0,0]
bc6_eq = [0]
bc7_coeff = [(-1*math.pow(a,2))/(2*E*I),(math.pow(a,2))/(2*E*I),0,-1*a/(E*I),a/(E*I),0,1,-1,0,0,0,0]
bc7_eq = [(w1*math.pow(a,3))/(6*E*I)]
bc8_coeff = [0,
(-1*math.pow(b,2))/(2*E*I),
(math.pow(b,2))/(2*E*I),
0,
-1*b/(E*I),
b/(E*I),
0,
1,
-1,
0,
0,
0]
bc8_eq = [((-1*w1*math.pow(b,3))/(6*E*I)) - ((s*math.pow((b-a),4))/(24*E*I))]
bc9_coeff = [0,0,(L/(kA*G)) + ((-1*math.pow(L,3))/(6*E*I)),0,0,-1*math.pow(L,2)/(2*E*I),0,0,L,0,0,1]
bc9_eq = [0]
bc10_coeff = [0,0,0,0,0,0,0,0,0,1,0,0]
bc10_eq = [0]
bc11_coeff = [(a/(kA*G)) + ((-1*math.pow(a,3))/(6*E*I)),
(-1*a/(kA*G)) + ((math.pow(a,3))/(6*E*I)),
0,
((-1*math.pow(a,2))/(2*E*I)),
(math.pow(a,2)/(2*E*I)),
0,
a,
-1*a,
0,
0,
-1,
0]
bc11_eq = [(-1*w1*math.pow(a,2)/(2*kA*G)) + ((w1*math.pow(a,4))/(24*E*I))]
bc12_coeff = [0,
(-1*b/(kA*G)) + ((math.pow(b,3))/(6*E*I)),
(b/(kA*G)) + ((-1*math.pow(b,3))/(6*E*I)),
0,
math.pow(b,2)/(2*E*I),
-1*math.pow(b,2)/(2*E*I),
0,
-1*b,
b,
0,
-1,
1]
bc12_eq = [((-1*w1*math.pow(b,2))/(2*kA*G)) - ((s*math.pow((b-a),3))/(6*kA*G)) + ((w1*math.pow(b,4))/(24*E*I)) + ((s*math.pow((b-a),5))/(120*E*I))]
bceq = [bc1_coeff,bc2_coeff,bc3_coeff,bc4_coeff,bc5_coeff,bc6_coeff,bc7_coeff,bc8_coeff,bc9_coeff,bc10_coeff,bc11_coeff,bc12_coeff]
bcs = [bc1_eq,bc2_eq,bc3_eq,bc4_eq,bc5_eq,bc6_eq,bc7_eq,bc8_eq,bc9_eq,bc10_eq,bc11_eq,bc12_eq]
bceq = np.array(bceq)
bcs = np.array(bcs)
c = np.linalg.solve(bceq,bcs)
self.c = c
'''
Load Formulas:
0 < x < a:
w = 0
a < x < b:
w = -1*w1 - s(x-a)
b < x < L:
w = 0
Shear Formulas:
w = dV/dx, therefore V = integral w dx
0 < x < a:
V = c1
a < x < b:
V = -1*w1*x - (s(x-a)^2)/2 + c2
c < x < L:
V = c3
'''
def vx(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w1 = self.w1
a = self.a
s = self.s
c = self.c
b = self.b
L = self.L
if 0 <= x <= a:
v = c[0][0]
elif a < x <= b:
v = (-1*w1*x) - ((s*math.pow((x-a),2))/2.0) + c[1][0]
elif b < x <= L:
v = c[2][0]
else:
v = 0
return v
'''
Moment Formulas:
V = dM/dx, therefore M = integral V dx
0 < x < a:
M = c1*x + c4
a < x < b:
M = (-1*w1*x^2)/2 - (s(x-a)^3)/6 + c2*x + c5
b < x < L:
M = c3*x + c6
'''
def mx(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w1 = self.w1
a = self.a
s = self.s
c = self.c
b = self.b
L = self.L
if 0 <= x <= a:
m = c[0][0]*x + c[3][0]
elif a < x <= b:
m = (((-1*w1*math.pow(x,2))/2.0) -
((s*math.pow((x-a),3))/6.0) +
c[1][0]*x +
c[4][0])
elif b < x <= L:
m = c[2][0]*x + c[5][0]
else:
m = 0
return m
'''
Timoshenko Relationship for Rotation, theta, and Deflection, delta
M = -E*I d theta/dx
V = kAG (-theta + d delta/dx)
Rotation Formulas:
theta = integral M/-EI dx
0 < x < a:
theta = (-c1*x^2)/(2*EI) - c4*x/EI + c7
a < x < b:
theta = (w1*x^3)/(6*EI) + (s(x-a)^4)/(24*EI) - (c2*x^2)/2*EI - c5*x/EI + c8
b < x < L:
theta = (-c3*x^2)/(2*EI) - c6*x/EI + c9
'''
def thetax(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w1 = self.w1
a = self.a
s = self.s
c = self.c
b = self.b
L = self.L
E = self.E
I = self.I
if 0 <= x <= a:
theta = (((-1*c[0][0]*math.pow(x,2))/(2.0*E*I)) -
((c[3][0]*x)/(E*I)) +
c[6][0])
elif a < x <= b:
theta = (((w1*math.pow(x,3))/(6.0*E*I)) +
((s*math.pow((x-a),4))/(24.0*E*I)) -
((c[1][0]*math.pow(x,2))/(2.0*E*I)) -
((c[4][0]*x)/(E*I)) +
c[7][0])
elif b < x <= L:
theta = (((-1*c[2][0]*math.pow(x,2))/(2.0*E*I)) -
((c[5][0]*x)/(E*I)) +
c[8][0])
else:
theta = 0
return theta
'''
Delta Formulas:
delta = integral V/kAG + theta dx
0 < x < a:
delta = c1*x/kAG + (-c1*x^3)/(6*EI) - c4*x^2/2*EI + c7*x + c10
a < x < b:
delta = -1*w1*x^2/2*kAG - (s(x-a)^3)/6*kAG + c2*x/kAG + (w1*x^4)/(24*EI) + (s(x-a)^5)/(120*EI) - (c2*x^3)/6*EI - c5*x^2/2*EI + c8*x + c11
b < x < L:
delta = c3*x/kAG + (-c3*x^3)/(6*EI) - c6*x^2/2*EI + c9*x + c12
'''
def deltax(self,x):
# redefine variables from self. to local to
# make formulas easier to read
w1 = self.w1
a = self.a
s = self.s
c = self.c
b = self.b
L = self.L
E = self.E
I = self.I
G = self.G
kA = self.kA
if 0 <= x <= a:
delta = (((c[0][0]*x)/(kA*G))
+ (-1*c[0][0]*math.pow(x,3))/(6.0*E*I))
- ((c[3][0]*math.pow(x,2))/(2.0*E*I))
+ (c[6][0]*x)
+ c[9][0])
elif a < x <= b:
delta = (((-1*w1*math.pow(x,2))/(2.0*kA*G))
- ((s*math.pow((x-a),3))/(6.0*kA*G))
+ ((c[1][0]*x)/(kA*G))
+ ((w1*math.pow(x,4))/(24.0*E*I))
+ ((s*math.pow((x-a),5))/(120.0*E*I))
- ((c[1][0]*math.pow(x,3))/(6.0*E*I))
- ((c[4][0]*math.pow(x,2))/(2.0*E*I))
+ (c[7][0]*x)
+ c[10][0])
elif b < x <= L:
delta = (((c[2][0]*x)/(kA*G))
+ ((-1*c[2][0]*math.pow(x,3))/(6.0*E*I))
- ((c[5][0]*math.pow(x,2))/(2*E*I))
+ (c[8][0]*x)
+ c[11][0])
else:
delta = 0
return delta
def fef(self):
L = self.L
E = self.E
I = self.I
G = self.G
kA = self.kA
fem = fixedendmomentsTimoshenko(self.thetax(0), self.thetax(L), L, E, I, G, kA, [1,1])
ML = fem[0][0]
MR = fem[1][0]
mo = timoforms.PointMoment(ML,0,L,E,I,G,kA)
ml = timoforms.PointMoment(MR,L,L,E,I,G,kA)
RL = self.RL + mo.rl + ml.rl
RR = self.RR + mo.rr + ml.rr
return [RL,ML,RR,MR]
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to generate a list of feature maps based on image features.
Provides several feature map generators that can be used to build object
detection feature extractors.
Object detection feature extractors usually are built by stacking two components
- A base feature extractor such as Inception V3 and a feature map generator.
Feature map generators build on the base feature extractors and produce a list
of final feature maps.
"""
import collections
import functools
import tensorflow as tf
from object_detection.utils import ops
slim = tf.contrib.slim
# Activation bound used for TPU v1. Activations will be clipped to
# [-ACTIVATION_BOUND, ACTIVATION_BOUND] when training with
# use_bounded_activations enabled.
ACTIVATION_BOUND = 6.0
def get_depth_fn(depth_multiplier, min_depth):
"""Builds a callable to compute depth (output channels) of conv filters.
Args:
depth_multiplier: a multiplier for the nominal depth.
min_depth: a lower bound on the depth of filters.
Returns:
A callable that takes in a nominal depth and returns the depth to use.
"""
def multiply_depth(depth):
new_depth = int(depth * depth_multiplier)
return max(new_depth, min_depth)
return multiply_depth
class KerasMultiResolutionFeatureMaps(tf.keras.Model):
"""Generates multi resolution feature maps from input image features.
A Keras model that generates multi-scale feature maps for detection as in the
SSD papers by Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, when called on inputs it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
When this feature generator object is called on input image_features:
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
def __init__(self,
feature_map_layout,
depth_multiplier,
min_depth,
insert_1x1_conv,
is_training,
conv_hyperparams,
freeze_batchnorm,
name=None):
"""Constructor.
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from
the feature map (instead of using the provided 'layer_depth' parameter).
In this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution
operations. Note that the current implementation only supports
generating new layers using convolutions of stride 2 (resulting in a
spatial resolution reduction by a factor of 2), and will be extended to
a more flexible design. Convolution kernel size is set to 3 by default,
and can be customized by 'conv_kernel_size' parameter (similarily,
'conv_kernel_size' should be set to -1 if 'from_layer' is specified).
The created convolution operation will be a normal 2D convolution by
default, and a depthwise convolution followed by 1x1 convolution if
'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1
convolution should be inserted before shrinking the feature map.
is_training: Indicates whether the feature generator is in training mode.
conv_hyperparams: A `hyperparams_builder.KerasLayerHyperparams` object
containing hyperparameters for convolution ops.
freeze_batchnorm: Bool. Whether to freeze batch norm parameters during
training or not. When training with a small batch size (e.g. 1), it is
desirable to freeze batch norm update and use pretrained batch norm
params.
name: A string name scope to assign to the model. If 'None', Keras
will auto-generate one from the class name.
"""
super(KerasMultiResolutionFeatureMaps, self).__init__(name=name)
self.feature_map_layout = feature_map_layout
self.convolutions = []
depth_fn = get_depth_fn(depth_multiplier, min_depth)
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
net = []
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
base_from_layer = from_layer
else:
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth / 2),
[1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
# We define this function here while capturing the value of
# conv_kernel_size, to avoid holding a reference to the loop variable
# conv_kernel_size inside of a lambda function
def fixed_padding(features, kernel_size=conv_kernel_size):
return ops.fixed_padding(features, kernel_size)
net.append(tf.keras.layers.Lambda(fixed_padding))
# TODO(rathodv): Add some utilities to simplify the creation of
# Depthwise & non-depthwise convolutions w/ normalization & activations
if use_depthwise:
net.append(tf.keras.layers.DepthwiseConv2D(
[conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
strides=stride,
name=layer_name + '_depthwise_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_depthwise_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name + '_depthwise'))
net.append(tf.keras.layers.Conv2D(depth_fn(layer_depth), [1, 1],
padding='SAME',
strides=1,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
else:
net.append(tf.keras.layers.Conv2D(
depth_fn(layer_depth),
[conv_kernel_size, conv_kernel_size],
padding=padding,
strides=stride,
name=layer_name + '_conv',
**conv_hyperparams.params()))
net.append(
conv_hyperparams.build_batch_norm(
training=(is_training and not freeze_batchnorm),
name=layer_name + '_batchnorm'))
net.append(
conv_hyperparams.build_activation_layer(
name=layer_name))
# Until certain bugs are fixed in checkpointable lists,
# this net must be appended only once it's been filled with layers
self.convolutions.append(net)
def call(self, image_features):
"""Generate the multi-resolution feature maps.
Executed when calling the `.__call__` method on input.
Args:
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
feature_maps = []
feature_map_keys = []
for index, from_layer in enumerate(self.feature_map_layout['from_layer']):
if from_layer:
feature_map = image_features[from_layer]
feature_map_keys.append(from_layer)
else:
feature_map = feature_maps[-1]
for layer in self.convolutions[index]:
feature_map = layer(feature_map)
layer_name = self.convolutions[index][-1].name
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def multi_resolution_feature_maps(feature_map_layout, depth_multiplier,
min_depth, insert_1x1_conv, image_features,
pool_residual=False):
"""Generates multi resolution feature maps from input image features.
Generates multi-scale feature maps for detection as in the SSD papers by
Liu et al: https://arxiv.org/pdf/1512.02325v2.pdf, See Sec 2.1.
More specifically, it performs the following two tasks:
1) If a layer name is provided in the configuration, returns that layer as a
feature map.
2) If a layer name is left as an empty string, constructs a new feature map
based on the spatial shape and depth configuration. Note that the current
implementation only supports generating new layers using convolution of
stride 2 resulting in a spatial resolution reduction by a factor of 2.
By default convolution kernel size is set to 3, and it can be customized
by caller.
An example of the configuration for Inception V3:
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
Args:
feature_map_layout: Dictionary of specifications for the feature map
layouts in the following format (Inception V2/V3 respectively):
{
'from_layer': ['Mixed_3c', 'Mixed_4c', 'Mixed_5c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
or
{
'from_layer': ['Mixed_5d', 'Mixed_6e', 'Mixed_7c', '', '', ''],
'layer_depth': [-1, -1, -1, 512, 256, 128]
}
If 'from_layer' is specified, the specified feature map is directly used
as a box predictor layer, and the layer_depth is directly infered from the
feature map (instead of using the provided 'layer_depth' parameter). In
this case, our convention is to set 'layer_depth' to -1 for clarity.
Otherwise, if 'from_layer' is an empty string, then the box predictor
layer will be built from the previous layer using convolution operations.
Note that the current implementation only supports generating new layers
using convolutions of stride 2 (resulting in a spatial resolution
reduction by a factor of 2), and will be extended to a more flexible
design. Convolution kernel size is set to 3 by default, and can be
customized by 'conv_kernel_size' parameter (similarily, 'conv_kernel_size'
should be set to -1 if 'from_layer' is specified). The created convolution
operation will be a normal 2D convolution by default, and a depthwise
convolution followed by 1x1 convolution if 'use_depthwise' is set to True.
depth_multiplier: Depth multiplier for convolutional layers.
min_depth: Minimum depth for convolutional layers.
insert_1x1_conv: A boolean indicating whether an additional 1x1 convolution
should be inserted before shrinking the feature map.
image_features: A dictionary of handles to activation tensors from the
base feature extractor.
pool_residual: Whether to add an average pooling layer followed by a
residual connection between subsequent feature maps when the channel
depth match. For example, with option 'layer_depth': [-1, 512, 256, 256],
a pooling and residual layer is added between the third and forth feature
map. This option is better used with Weight Shared Convolution Box
Predictor when all feature maps have the same channel depth to encourage
more consistent features across multi-scale feature maps.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: if the number entries in 'from_layer' and
'layer_depth' do not match.
ValueError: if the generated layer does not have the same resolution
as specified.
"""
depth_fn = get_depth_fn(depth_multiplier, min_depth)
feature_map_keys = []
feature_maps = []
base_from_layer = ''
use_explicit_padding = False
if 'use_explicit_padding' in feature_map_layout:
use_explicit_padding = feature_map_layout['use_explicit_padding']
use_depthwise = False
if 'use_depthwise' in feature_map_layout:
use_depthwise = feature_map_layout['use_depthwise']
for index, from_layer in enumerate(feature_map_layout['from_layer']):
layer_depth = feature_map_layout['layer_depth'][index]
conv_kernel_size = 3
if 'conv_kernel_size' in feature_map_layout:
conv_kernel_size = feature_map_layout['conv_kernel_size'][index]
if from_layer:
feature_map = image_features[from_layer]
base_from_layer = from_layer
feature_map_keys.append(from_layer)
else:
pre_layer = feature_maps[-1]
pre_layer_depth = pre_layer.get_shape().as_list()[3]
intermediate_layer = pre_layer
if insert_1x1_conv:
layer_name = '{}_1_Conv2d_{}_1x1_{}'.format(
base_from_layer, index, depth_fn(layer_depth / 2))
intermediate_layer = slim.conv2d(
pre_layer,
depth_fn(layer_depth / 2), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
layer_name = '{}_2_Conv2d_{}_{}x{}_s2_{}'.format(
base_from_layer, index, conv_kernel_size, conv_kernel_size,
depth_fn(layer_depth))
stride = 2
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
intermediate_layer = ops.fixed_padding(
intermediate_layer, conv_kernel_size)
if use_depthwise:
feature_map = slim.separable_conv2d(
intermediate_layer,
None, [conv_kernel_size, conv_kernel_size],
depth_multiplier=1,
padding=padding,
stride=stride,
scope=layer_name + '_depthwise')
feature_map = slim.conv2d(
feature_map,
depth_fn(layer_depth), [1, 1],
padding='SAME',
stride=1,
scope=layer_name)
if pool_residual and pre_layer_depth == depth_fn(layer_depth):
feature_map += slim.avg_pool2d(
pre_layer, [3, 3],
padding='SAME',
stride=2,
scope=layer_name + '_pool')
else:
feature_map = slim.conv2d(
intermediate_layer,
depth_fn(layer_depth), [conv_kernel_size, conv_kernel_size],
padding=padding,
stride=stride,
scope=layer_name)
feature_map_keys.append(layer_name)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
def fpn_top_down_feature_maps(image_features,
depth,
use_depthwise=False,
use_explicit_padding=False,
use_bounded_activations=False,
scope=None,
use_native_resize_op=False):
"""Generates `top-down` feature maps for Feature Pyramid Networks.
See https://arxiv.org/abs/1612.03144 for details.
Args:
image_features: list of tuples of (tensor_name, image_feature_tensor).
Spatial resolutions of succesive tensors must reduce exactly by a factor
of 2.
depth: depth of output feature maps.
use_depthwise: whether to use depthwise separable conv instead of regular
conv.
use_explicit_padding: whether to use explicit padding.
use_bounded_activations: Whether or not to clip activations to range
[-ACTIVATION_BOUND, ACTIVATION_BOUND]. Bounded activations better lend
themselves to quantized inference.
scope: A scope name to wrap this op under.
use_native_resize_op: If True, uses tf.image.resize_nearest_neighbor op for
the upsampling process instead of reshape and broadcasting implementation.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
"""
with tf.name_scope(scope, 'top_down'):
num_levels = len(image_features)
output_feature_maps_list = []
output_feature_map_keys = []
padding = 'VALID' if use_explicit_padding else 'SAME'
kernel_size = 3
with slim.arg_scope(
[slim.conv2d, slim.separable_conv2d], padding=padding, stride=1):
top_down = slim.conv2d(
image_features[-1][1],
depth, [1, 1], activation_fn=None, normalizer_fn=None,
scope='projection_%d' % num_levels)
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
output_feature_maps_list.append(top_down)
output_feature_map_keys.append(
'top_down_%s' % image_features[-1][0])
for level in reversed(range(num_levels - 1)):
if use_native_resize_op:
with tf.name_scope('nearest_neighbor_upsampling'):
top_down_shape = top_down.shape.as_list()
top_down = tf.image.resize_nearest_neighbor(
top_down, [top_down_shape[1] * 2, top_down_shape[2] * 2])
else:
top_down = ops.nearest_neighbor_upsampling(top_down, scale=2)
residual = slim.conv2d(
image_features[level][1], depth, [1, 1],
activation_fn=None, normalizer_fn=None,
scope='projection_%d' % (level + 1))
if use_bounded_activations:
residual = tf.clip_by_value(residual, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_explicit_padding:
# slice top_down to the same shape as residual
residual_shape = tf.shape(residual)
top_down = top_down[:, :residual_shape[1], :residual_shape[2], :]
top_down += residual
if use_bounded_activations:
top_down = tf.clip_by_value(top_down, -ACTIVATION_BOUND,
ACTIVATION_BOUND)
if use_depthwise:
conv_op = functools.partial(slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if use_explicit_padding:
top_down = ops.fixed_padding(top_down, kernel_size)
output_feature_maps_list.append(conv_op(
top_down,
depth, [kernel_size, kernel_size],
scope='smoothing_%d' % (level + 1)))
output_feature_map_keys.append('top_down_%s' % image_features[level][0])
return collections.OrderedDict(reversed(
list(zip(output_feature_map_keys, output_feature_maps_list))))
def pooling_pyramid_feature_maps(base_feature_map_depth, num_layers,
image_features, replace_pool_with_conv=False):
"""Generates pooling pyramid feature maps.
The pooling pyramid feature maps is motivated by
multi_resolution_feature_maps. The main difference are that it is simpler and
reduces the number of free parameters.
More specifically:
- Instead of using convolutions to shrink the feature map, it uses max
pooling, therefore totally gets rid of the parameters in convolution.
- By pooling feature from larger map up to a single cell, it generates
features in the same feature space.
- Instead of independently making box predictions from individual maps, it
shares the same classifier across different feature maps, therefore reduces
the "mis-calibration" across different scales.
See go/ppn-detection for more details.
Args:
base_feature_map_depth: Depth of the base feature before the max pooling.
num_layers: Number of layers used to make predictions. They are pooled
from the base feature.
image_features: A dictionary of handles to activation tensors from the
feature extractor.
replace_pool_with_conv: Whether or not to replace pooling operations with
convolutions in the PPN. Default is False.
Returns:
feature_maps: an OrderedDict mapping keys (feature map names) to
tensors where each tensor has shape [batch, height_i, width_i, depth_i].
Raises:
ValueError: image_features does not contain exactly one entry
"""
if len(image_features) != 1:
raise ValueError('image_features should be a dictionary of length 1.')
image_features = image_features[image_features.keys()[0]]
feature_map_keys = []
feature_maps = []
feature_map_key = 'Base_Conv2d_1x1_%d' % base_feature_map_depth
if base_feature_map_depth > 0:
image_features = slim.conv2d(
image_features,
base_feature_map_depth,
[1, 1], # kernel size
padding='SAME', stride=1, scope=feature_map_key)
# Add a 1x1 max-pooling node (a no op node) immediately after the conv2d for
# TPU v1 compatibility. Without the following dummy op, TPU runtime
# compiler will combine the convolution with one max-pooling below into a
# single cycle, so getting the conv2d feature becomes impossible.
image_features = slim.max_pool2d(
image_features, [1, 1], padding='SAME', stride=1, scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(image_features)
feature_map = image_features
if replace_pool_with_conv:
with slim.arg_scope([slim.conv2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'Conv2d_{}_3x3_s2_{}'.format(i,
base_feature_map_depth)
feature_map = slim.conv2d(
feature_map, base_feature_map_depth, [3, 3], scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
else:
with slim.arg_scope([slim.max_pool2d], padding='SAME', stride=2):
for i in range(num_layers - 1):
feature_map_key = 'MaxPool2d_%d_2x2' % i
feature_map = slim.max_pool2d(
feature_map, [2, 2], padding='SAME', scope=feature_map_key)
feature_map_keys.append(feature_map_key)
feature_maps.append(feature_map)
return collections.OrderedDict(
[(x, y) for (x, y) in zip(feature_map_keys, feature_maps)])
|
|
from flask import (
render_template, request, send_from_directory,
abort, jsonify, Response, redirect, url_for
)
import os
import uuid
from six import string_types
import logging
log = logging.getLogger(__name__)
from .bbauth import check_read_authentication_and_create_client
from ... import resources
from ..app import bokeh_app
from ..models import user
from ..models import docs
from ..models import convenience as mconv
from ... import protocol
from ...exceptions import DataIntegrityException
from ..views import make_json
from ..crossdomain import crossdomain
from ..serverbb import prune
from ...templates import AUTOLOAD
from ...resources import Resources
@bokeh_app.route('/bokeh/ping')
def ping():
#test route, to know if the server is up
return "pong"
@bokeh_app.route('/bokeh/')
def index(*unused_all, **kwargs):
bokehuser = bokeh_app.current_user()
if not bokehuser:
return redirect(url_for('.login_get'))
return render_template('bokeh.html',
splitjs=bokeh_app.splitjs,
username=bokehuser.username,
title="Bokeh Documents for %s" % bokehuser.username
)
@bokeh_app.route('/')
def welcome(*unused_all, **kwargs):
redirect(url_for('.index'))
@bokeh_app.route('/bokeh/favicon.ico')
def favicon():
return send_from_directory(os.path.join(bokeh_app.root_path, 'static'),
'favicon.ico', mimetype='image/x-icon')
def _makedoc(redisconn, u, title):
docid = str(uuid.uuid4())
if isinstance(u, string_types):
u = user.User.load(redisconn, u)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
u.add_doc(docid, title)
doc = docs.new_doc(bokeh_app, docid,
title, clientdoc,
rw_users=[u.username])
u.save(redisconn)
bokeh_app.backbone_storage.store_document(clientdoc)
return doc
@bokeh_app.route('/bokeh/doc', methods=['POST'])
@bokeh_app.route('/bokeh/doc/', methods=['POST'])
def makedoc():
if request.json:
title = request.json['title']
else:
title = request.values['title']
bokehuser = bokeh_app.current_user()
try:
_makedoc(bokeh_app.servermodel_storage, bokehuser, title)
except DataIntegrityException as e:
return abort(409, e.message)
jsonstring = protocol.serialize_web(bokehuser.to_public_json())
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.wsmanager.send("bokehuser:" + bokehuser.username, msg)
return make_json(jsonstring)
@bokeh_app.route('/bokeh/doc/<docid>', methods=['delete'])
@bokeh_app.route('/bokeh/doc/<docid>/', methods=['delete'])
def deletedoc(docid):
bokehuser = bokeh_app.current_user()
try:
bokehuser.remove_doc(docid)
bokehuser.save(bokeh_app.servermodel_storage)
except DataIntegrityException as e:
return abort(409, e.message)
jsonstring = protocol.serialize_web(bokehuser.to_public_json())
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.wsmanager.send("bokehuser:" + bokehuser.username, msg)
return make_json(jsonstring)
@bokeh_app.route('/bokeh/getdocapikey/<docid>')
def get_doc_api_key(docid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
if mconv.can_write_from_request(doc, request, bokeh_app):
return jsonify({'apikey' : doc.apikey})
elif mconv.can_write_from_request(doc, request, bokeh_app):
return jsonify({'readonlyapikey' : doc.readonlyapikey})
else:
return abort(401)
@bokeh_app.route('/bokeh/userinfo/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def get_user():
bokehuser = bokeh_app.current_user()
if not bokehuser:
abort(403)
content = protocol.serialize_web(bokehuser.to_public_json())
return make_json(content)
def _make_test_plot_file(username, userapikey, url):
lines = ["from bokeh import mpl",
"p = mpl.PlotClient(username='%s', serverloc='%s', userapikey='%s')" % (username, url, userapikey)]
return "\n".join(lines)
@bokeh_app.route('/bokeh/doc/<docid>/', methods=['GET', 'OPTIONS'])
@bokeh_app.route('/bokeh/bokehinfo/<docid>/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
@check_read_authentication_and_create_client
def get_bokeh_info(docid):
return _get_bokeh_info(docid)
def _get_bokeh_info(docid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
all_models = clientdoc._models.values()
log.info("num models: %s", len(all_models))
all_models = clientdoc.dump(*all_models)
returnval = {'plot_context_ref' : doc.plot_context_ref,
'docid' : docid,
'all_models' : all_models,
'apikey' : doc.apikey}
returnval = protocol.serialize_json(returnval)
#i don't think we need to set the header here...
result = make_json(returnval,
headers={"Access-Control-Allow-Origin": "*"})
return result
@bokeh_app.route('/bokeh/doc/<title>/show', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def show_doc_by_title(title):
bokehuser = bokeh_app.current_user()
docs = [ doc for doc in bokehuser.docs if doc['title'] == title ]
doc = docs[0] if len(docs) != 0 else abort(404)
docid = doc['docid']
return render_template('show.html', title=title, docid=docid, splitjs=bokeh_app.splitjs)
@bokeh_app.route('/bokeh/doc/', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
def doc_by_title():
if request.json:
title = request.json['title']
else:
title = request.values['title']
bokehuser = bokeh_app.current_user()
docs = [doc for doc in bokehuser.docs if doc['title'] == title]
if len(docs) == 0:
try:
doc = _makedoc(bokeh_app.servermodel_storage, bokehuser, title)
docid = doc.docid
except DataIntegrityException as e:
return abort(409, e.message)
msg = protocol.serialize_web({'msgtype' : 'docchange'})
bokeh_app.wsmanager.send("bokehuser:" + bokehuser.username, msg)
else:
doc = docs[0]
docid = doc['docid']
return get_bokeh_info(docid)
# need to rethink public publishing
# @bokeh_app.route('/bokeh/publicbokehinfo/<docid>')
# def get_public_bokeh_info(docid):
# doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
# plot_context_ref = doc.plot_context_ref
# all_models = docs.prune_and_get_valid_models(bokeh_app.servermodel_storage,
# bokeh_app.collections,
# docid)
# public_models = [x for x in all_models if x.get('public', False)]
# if len(public_models) == 0:
# return False
# all_models_json = [x.to_broadcast_json() for x in all_models]
# returnval = {'plot_context_ref' : plot_context_ref,
# 'docid' : docid,
# 'all_models' : all_models_json,
# }
# returnval = protocol.serialize_web(returnval)
# #return returnval
# return (returnval, "200",
# {"Access-Control-Allow-Origin": "*"})
@bokeh_app.route('/bokeh/sampleerror')
def sampleerror():
return 1 + "sdf"
def make_test_plot():
import numpy as np
from bokeh.plotting import output_server, line
N = 8000
x = np.linspace(0, 4*np.pi, N)
y = np.sin(x)
output_server("line.py example")
l = line(
x,y, color="#0000FF",
plot_height=300, plot_width=300,
tools="pan,resize")
return l
#show()
@bokeh_app.route("/bokeh/generate_embed/<inject_type>")
def generate_embed(inject_type):
"""the following 8 functions setup embedding pages in a variety of formats
urls with no_js don't have any of our javascript included in
script tags. the embed.js code is supposed to make sure the
proper js files are sourced. Embed.js should only donwload a new
js file if the existing javascript code isn't in the runtime
environment.
static places a script tag into the html markup.
static_double places two script tags in the dom. This should
still cause the bokeh js to be downloaded only once
the rest of the urls construct a script tag with a source of the
embed.js along with the proper attributes.
with_delay doesn't inject until 5 seconds after pageload
double_delay injects two separate plots, one at 3 seconds in,
the other at 5 seconds in.
onload injects at onload
direct injects as soon as the script block is hit.
Everyone one of these urls should display the same plot
"""
plot = make_test_plot()
delay, double_delay, onload, direct = [False] * 4
plot_scr = ""
if inject_type == "delay":
delay = True
if inject_type == "double_delay":
double_delay = True
elif inject_type == "onload":
onload = True
elif inject_type == "direct":
direct = True
elif inject_type == "static":
plot_scr = plot.create_html_snippet(server=True)
elif inject_type == "static_double":
plot_scr = "%s %s" % (plot.create_html_snippet(server=True),
plot.create_html_snippet(server=True))
return dom_embed(
plot, delay=delay, onload=onload,
direct=direct, plot_scr=plot_scr, double_delay=double_delay)
@bokeh_app.route("/bokeh/embed.js")
def embed_js():
import jinja2
t_file = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "..", "..", "templates", "embed_direct.js")
with open(t_file) as f:
template = jinja2.Template(f.read())
rendered = template.render(host=request.host)
return Response(rendered, 200,
{'Content-Type':'application/javascript'})
@bokeh_app.route("/bokeh/autoload.js/<elementid>")
def autoload_js(elementid):
if bokeh_app.url_prefix:
root_url = request.url_root + bokeh_app.url_prefix[1:] # strip of leading slash
else:
root_url = request.url_root
resources = Resources(root_url=root_url, mode='server')
rendered = AUTOLOAD.render(
js_url = resources.js_files[0],
css_files = resources.css_files,
elementid = elementid,
)
return Response(rendered, 200,
{'Content-Type':'application/javascript'})
@bokeh_app.route('/bokeh/objinfo/<docid>/<objid>', methods=['GET', 'OPTIONS'])
@crossdomain(origin="*", headers=['BOKEH-API-KEY', 'Continuum-Clientid'])
@check_read_authentication_and_create_client
def get_bokeh_info_one_object(docid, objid):
doc = docs.Doc.load(bokeh_app.servermodel_storage, docid)
clientdoc = bokeh_app.backbone_storage.get_document(docid)
prune(clientdoc)
obj = clientdoc._models[objid]
objs = obj.references()
all_models = clientdoc.dump(*objs)
returnval = {'plot_context_ref' : doc.plot_context_ref,
'docid' : docid,
'all_models' : all_models,
'apikey' : doc.apikey,
'type' : obj.__view_model__
}
returnval = protocol.serialize_json(returnval)
result = make_json(returnval,
headers={"Access-Control-Allow-Origin": "*"})
return result
@bokeh_app.route('/bokeh/doc/<docid>/<objid>', methods=['GET'])
def show_obj(docid, objid):
bokehuser = bokeh_app.current_user()
if not bokehuser:
return redirect(url_for(".login_get", next=request.url))
return render_template("oneobj.html",
docid=docid,
objid=objid,
hide_navbar=True,
splitjs=bokeh_app.splitjs,
username=bokehuser.username)
|
|
"""
Channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import asyncio
from concurrent.futures import TimeoutError as Timeout
from enum import Enum
from functools import wraps
import logging
from random import uniform
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from ..helpers import (
bind_configure_reporting, construct_unique_id,
safe_read, get_attr_id_by_name, bind_cluster)
from ..const import (
REPORT_CONFIG_DEFAULT, SIGNAL_ATTR_UPDATED, ATTRIBUTE_CHANNEL,
EVENT_RELAY_CHANNEL, ZDO_CHANNEL
)
from ..registries import CLUSTER_REPORT_CONFIGS
NODE_DESCRIPTOR_REQUEST = 0x0002
MAINS_POWERED = 1
BATTERY_OR_UNKNOWN = 0
ZIGBEE_CHANNEL_REGISTRY = {}
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(unique_id, cluster, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = cluster.server_commands.get(command_id, [command_id])[0]
_LOGGER.debug(
"%s: received '%s' command with %s args on cluster_id '%s' tsn '%s'",
unique_id,
cmd,
args,
cluster.cluster_id,
tsn
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
from zigpy.zcl.foundation import Status
from zigpy.exceptions import DeliveryError
try:
result = await command(*args, **kwds)
_LOGGER.debug("%s: executed command: %s %s %s %s",
channel.unique_id,
command.__name__,
"{}: {}".format("with args", args),
"{}: {}".format("with kwargs", kwds),
"{}: {}".format("and result", result))
if isinstance(result, bool):
return result
return result[1] is Status.SUCCESS
except (DeliveryError, Timeout) as ex:
_LOGGER.debug(
"%s: command failed: %s exception: %s",
channel.unique_id,
command.__name__,
str(ex)
)
return False
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel:
"""Base channel for a Zigbee cluster."""
CHANNEL_NAME = None
def __init__(self, cluster, device):
"""Initialize ZigbeeChannel."""
self._channel_name = cluster.ep_attribute
if self.CHANNEL_NAME:
self._channel_name = self.CHANNEL_NAME
self._generic_id = 'channel_0x{:04x}'.format(cluster.cluster_id)
self._cluster = cluster
self._zha_device = device
self._unique_id = construct_unique_id(cluster)
self._report_config = CLUSTER_REPORT_CONFIGS.get(
self._cluster.cluster_id,
[{'attr': 0, 'config': REPORT_CONFIG_DEFAULT}]
)
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def device(self):
"""Return the device this channel is linked to."""
return self._zha_device
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
def set_report_config(self, report_config):
"""Set the reporting configuration."""
self._report_config = report_config
async def async_configure(self):
"""Set cluster binding and attribute reporting."""
manufacturer = None
manufacturer_code = self._zha_device.manufacturer_code
if self.cluster.cluster_id >= 0xfc00 and manufacturer_code:
manufacturer = manufacturer_code
if self.cluster.bind_only:
await bind_cluster(self._unique_id, self.cluster)
else:
skip_bind = False # bind cluster only for the 1st configured attr
for report_config in self._report_config:
attr = report_config.get('attr')
min_report_interval, max_report_interval, change = \
report_config.get('config')
await bind_configure_reporting(
self._unique_id, self.cluster, attr,
min_report=min_report_interval,
max_report=max_report_interval,
reportable_change=change,
skip_bind=skip_bind,
manufacturer=manufacturer
)
skip_bind = True
await asyncio.sleep(uniform(0.1, 0.5))
_LOGGER.debug(
"%s: finished channel configuration",
self._unique_id
)
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache):
"""Initialize channel."""
_LOGGER.debug(
'initializing channel: %s from_cache: %s',
self._channel_name,
from_cache
)
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
pass
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
pass
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
pass
@callback
def zha_send_event(self, cluster, command, args):
"""Relay events to hass."""
self._zha_device.hass.bus.async_fire(
'zha_event',
{
'unique_id': self._unique_id,
'device_ieee': str(self._zha_device.ieee),
'command': command,
'args': args
}
)
async def async_update(self):
"""Retrieve latest state from cluster."""
pass
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._zha_device.manufacturer_code
if self.cluster.cluster_id >= 0xfc00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache,
manufacturer=manufacturer
)
return result.get(attribute)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(
getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(
self,
command
)
return self.__getattribute__(name)
class AttributeListeningChannel(ZigbeeChannel):
"""Channel for attribute reports from the cluster."""
CHANNEL_NAME = ATTRIBUTE_CHANNEL
def __init__(self, cluster, device):
"""Initialize AttributeListeningChannel."""
super().__init__(cluster, device)
attr = self._report_config[0].get('attr')
if isinstance(attr, str):
self.value_attribute = get_attr_id_by_name(self.cluster, attr)
else:
self.value_attribute = attr
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == self.value_attribute:
async_dispatcher_send(
self._zha_device.hass,
"{}_{}".format(self.unique_id, SIGNAL_ATTR_UPDATED),
value
)
async def async_initialize(self, from_cache):
"""Initialize listener."""
await self.get_attribute_value(
self._report_config[0].get('attr'), from_cache=from_cache)
await super().async_initialize(from_cache)
class ZDOChannel:
"""Channel for ZDO events."""
POWER_SOURCES = {
MAINS_POWERED: 'Mains',
BATTERY_OR_UNKNOWN: 'Battery or Unknown'
}
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = ZDO_CHANNEL
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = "{}_ZDO".format(device.name)
self._cluster.add_listener(self)
self.power_source = None
self.manufacturer_code = None
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
pass
@callback
def permit_duration(self, duration):
"""Permit handler."""
pass
async def async_initialize(self, from_cache):
"""Initialize channel."""
entry = self._zha_device.gateway.zha_storage.async_get_or_create(
self._zha_device)
_LOGGER.debug("entry loaded from storage: %s", entry)
if entry is not None:
self.power_source = entry.power_source
self.manufacturer_code = entry.manufacturer_code
if self.power_source is None:
self.power_source = BATTERY_OR_UNKNOWN
if self.manufacturer_code is None and not from_cache:
# this should always be set. This is from us not doing
# this previously so lets set it up so users don't have
# to reconfigure every device.
await self.async_get_node_descriptor(False)
entry = self._zha_device.gateway.zha_storage.async_update(
self._zha_device)
_LOGGER.debug("entry after getting node desc in init: %s", entry)
self._status = ChannelStatus.INITIALIZED
async def async_get_node_descriptor(self, from_cache):
"""Request the node descriptor from the device."""
from zigpy.zdo.types import Status
if from_cache:
return
node_descriptor = await self._cluster.request(
NODE_DESCRIPTOR_REQUEST,
self._cluster.device.nwk, tries=3, delay=2)
def get_bit(byteval, idx):
return int(((byteval & (1 << idx)) != 0))
if node_descriptor is not None and\
node_descriptor[0] == Status.SUCCESS:
mac_capability_flags = node_descriptor[2].mac_capability_flags
self.power_source = get_bit(mac_capability_flags, 2)
self.manufacturer_code = node_descriptor[2].manufacturer_code
_LOGGER.debug("node descriptor: %s", node_descriptor)
async def async_configure(self):
"""Configure channel."""
await self.async_get_node_descriptor(False)
self._status = ChannelStatus.CONFIGURED
class EventRelayChannel(ZigbeeChannel):
"""Event relay that can be attached to zigbee clusters."""
CHANNEL_NAME = EVENT_RELAY_CHANNEL
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
self.zha_send_event(
self._cluster,
SIGNAL_ATTR_UPDATED,
{
'attribute_id': attrid,
'attribute_name': self._cluster.attributes.get(
attrid,
['Unknown'])[0],
'value': value
}
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if self._cluster.server_commands is not None and \
self._cluster.server_commands.get(command_id) is not None:
self.zha_send_event(
self._cluster,
self._cluster.server_commands.get(command_id)[0],
args
)
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.disk.mount import api
from nova.virt.disk.mount import block
from nova.virt.disk.mount import loop
from nova.virt.disk.mount import nbd
from nova.virt.image import model as imgmodel
PARTITION = 77
ORIG_DEVICE = "/dev/null"
AUTOMAP_PARTITION = "/dev/nullp77"
MAP_PARTITION = "/dev/mapper/nullp77"
class MountTestCase(test.NoDBTestCase):
def setUp(self):
super(MountTestCase, self).setUp()
def _test_map_dev(self, partition):
mount = api.Mount(mock.sentinel.image, mock.sentinel.mount_dir)
mount.device = ORIG_DEVICE
mount.partition = partition
mount.map_dev()
return mount
@mock.patch('nova.utils.trycmd')
def _test_map_dev_with_trycmd(self, partition, trycmd):
trycmd.return_value = [None, None]
mount = self._test_map_dev(partition)
self.assertEqual(1, trycmd.call_count) # don't care about args
return mount
def _exists_effect(self, data):
def exists_effect(filename):
try:
v = data[filename]
if isinstance(v, list):
if len(v) > 0:
return v.pop(0)
self.fail("Out of items for: %s" % filename)
return v
except KeyError:
self.fail("Unexpected call with: %s" % filename)
return exists_effect
def _check_calls(self, exists, filenames, trailing=0):
self.assertEqual([mock.call(x) for x in filenames],
exists.call_args_list[:len(filenames)])
self.assertEqual([mock.call(MAP_PARTITION)] * trailing,
exists.call_args_list[len(filenames):])
@mock.patch('os.path.exists')
def test_map_dev_partition_search(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True})
mount = self._test_map_dev(-1)
self._check_calls(exists, [ORIG_DEVICE])
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_good(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: [False, True]})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION], 2)
self.assertEqual("", mount.error)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_error(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: False})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION],
api.MAX_FILE_CHECKS + 1)
self.assertNotEqual("", mount.error)
self.assertFalse(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_error_then_pass(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: False,
MAP_PARTITION: [False, False, True]})
mount = self._test_map_dev_with_trycmd(PARTITION)
self._check_calls(exists, [ORIG_DEVICE, AUTOMAP_PARTITION], 3)
self.assertEqual("", mount.error)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_automap(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(PARTITION)
self._check_calls(exists,
[ORIG_DEVICE, AUTOMAP_PARTITION, AUTOMAP_PARTITION])
self.assertEqual(AUTOMAP_PARTITION, mount.mapped_device)
self.assertTrue(mount.automapped)
self.assertTrue(mount.mapped)
@mock.patch('os.path.exists')
def test_map_dev_else(self, exists):
exists.side_effect = self._exists_effect({
ORIG_DEVICE: True,
AUTOMAP_PARTITION: True})
mount = self._test_map_dev(None)
self._check_calls(exists, [ORIG_DEVICE])
self.assertEqual(ORIG_DEVICE, mount.mapped_device)
self.assertFalse(mount.automapped)
self.assertTrue(mount.mapped)
def test_instance_for_format_raw(self):
image = imgmodel.LocalFileImage("/some/file.raw",
imgmodel.FORMAT_RAW)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_format_qcow2(self):
image = imgmodel.LocalFileImage("/some/file.qcows",
imgmodel.FORMAT_QCOW2)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_format_block(self):
image = imgmodel.LocalBlockImage(
"/dev/mapper/instances--instance-0000001_disk",)
mount_dir = '/mount/dir'
partition = -1
inst = api.Mount.instance_for_format(image, mount_dir, partition)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_loop(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/loop0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_loop_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/loop0p1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, loop.LoopMount)
def test_instance_for_device_nbd(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/nbd0'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_nbd_partition(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/nbd0p1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, nbd.NbdMount)
def test_instance_for_device_block(self):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = -1
device = '/dev/mapper/instances--instance-0000001_disk'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
def test_instance_for_device_block_partiton(self,):
image = mock.MagicMock()
mount_dir = '/mount/dir'
partition = 1
device = '/dev/mapper/instances--instance-0000001_diskp1'
inst = api.Mount.instance_for_device(image, mount_dir, partition,
device)
self.assertIsInstance(inst, block.BlockMount)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/Fortran/FORTRANPATH.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
import os
import TestSCons
_exe = TestSCons._exe
prog = 'prog' + _exe
subdir_prog = os.path.join('subdir', 'prog' + _exe)
variant_prog = os.path.join('variant', 'prog' + _exe)
args = prog + ' ' + subdir_prog + ' ' + variant_prog
test = TestSCons.TestSCons()
fc = 'f77'
if not test.detect_tool(fc):
test.skip_test('Could not find a f77 tool; skipping test.\n')
test.subdir('include',
'subdir',
['subdir', 'include'],
'foobar',
'inc2')
test.write('SConstruct', """
env = Environment(FORTRAN = '%s',
FORTRANPATH = ['$FOO', '${TARGET.dir}', '${SOURCE.dir}'],
FOO='include')
obj = env.Object(target='foobar/prog', source='subdir/prog.f')
env.Program(target='prog', source=obj)
SConscript('subdir/SConscript', "env")
VariantDir('variant', 'subdir', 0)
include = Dir('include')
env = Environment(FORTRAN = '%s',
FORTRANPATH=[include, '#foobar', '#subdir'])
SConscript('variant/SConscript', "env")
""" % (fc, fc))
test.write(['subdir', 'SConscript'],
"""
Import("env")
env.Program(target='prog', source='prog.f')
""")
test.write(['include', 'foo.f'],
r"""
PRINT *, 'include/foo.f 1'
INCLUDE 'bar.f'
""")
test.write(['include', 'bar.f'],
r"""
PRINT *, 'include/bar.f 1'
""")
test.write(['subdir', 'prog.f'],
r"""
PROGRAM PROG
PRINT *, 'subdir/prog.f'
include 'foo.f'
include 'sss.f'
include 'ttt.f'
STOP
END
""")
test.write(['subdir', 'include', 'foo.f'],
r"""
PRINT *, 'subdir/include/foo.f 1'
INCLUDE 'bar.f'
""")
test.write(['subdir', 'include', 'bar.f'],
r"""
PRINT *, 'subdir/include/bar.f 1'
""")
test.write(['subdir', 'sss.f'],
r"""
PRINT *, 'subdir/sss.f'
""")
test.write(['subdir', 'ttt.f'],
r"""
PRINT *, 'subdir/ttt.f'
""")
import sys
if sys.platform[:5] == 'sunos':
# Sun f77 always put some junk in stderr
test.run(arguments = args, stderr = None)
else:
test.run(arguments = args)
test.run(program = test.workpath(prog),
stdout = """\
subdir/prog.f
include/foo.f 1
include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(subdir_prog),
stdout = """\
subdir/prog.f
subdir/include/foo.f 1
subdir/include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(variant_prog),
stdout = """\
subdir/prog.f
include/foo.f 1
include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
# Make sure we didn't duplicate the source file in the variant subdirectory.
test.must_not_exist(test.workpath('variant', 'prog.f'))
test.up_to_date(arguments = args)
test.write(['include', 'foo.f'],
r"""
PRINT *, 'include/foo.f 2'
INCLUDE 'bar.f'
""")
if sys.platform[:5] == 'sunos':
# Sun f77 always put some junk in stderr
test.run(arguments = args, stderr = None)
else:
test.run(arguments = args)
test.run(program = test.workpath(prog),
stdout = """\
subdir/prog.f
include/foo.f 2
include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(subdir_prog),
stdout = """\
subdir/prog.f
subdir/include/foo.f 1
subdir/include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(variant_prog),
stdout = """\
subdir/prog.f
include/foo.f 2
include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
# Make sure we didn't duplicate the source file in the variant subdirectory.
test.must_not_exist(test.workpath('variant', 'prog.f'))
test.up_to_date(arguments = args)
#
test.write(['include', 'bar.f'],
r"""
PRINT *, 'include/bar.f 2'
""")
if sys.platform[:5] == 'sunos':
# Sun f77 always put some junk in stderr
test.run(arguments = args, stderr = None)
else:
test.run(arguments = args)
test.run(program = test.workpath(prog),
stdout = """\
subdir/prog.f
include/foo.f 2
include/bar.f 2
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(subdir_prog),
stdout = """\
subdir/prog.f
subdir/include/foo.f 1
subdir/include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(variant_prog),
stdout = """\
subdir/prog.f
include/foo.f 2
include/bar.f 2
subdir/sss.f
subdir/ttt.f
""")
# Make sure we didn't duplicate the source file in the variant subdirectory.
test.must_not_exist(test.workpath('variant', 'prog.f'))
test.up_to_date(arguments = args)
# Change FORTRANPATH and make sure we don't rebuild because of it.
test.write('SConstruct', """
env = Environment(FORTRAN = '%s',
FORTRANPATH = Split('inc2 include ${TARGET.dir} ${SOURCE.dir}'))
obj = env.Object(target='foobar/prog', source='subdir/prog.f')
env.Program(target='prog', source=obj)
SConscript('subdir/SConscript', "env")
VariantDir('variant', 'subdir', 0)
include = Dir('include')
env = Environment(FORTRAN = '%s',
FORTRANPATH=['inc2', include, '#foobar', '#subdir'])
SConscript('variant/SConscript', "env")
""" % (fc, fc))
test.up_to_date(arguments = args)
#
test.write(['inc2', 'foo.f'],
r"""
PRINT *, 'inc2/foo.f 1'
INCLUDE 'bar.f'
""")
if sys.platform[:5] == 'sunos':
# Sun f77 always put some junk in stderr
test.run(arguments = args, stderr = None)
else:
test.run(arguments = args)
test.run(program = test.workpath(prog),
stdout = """\
subdir/prog.f
inc2/foo.f 1
include/bar.f 2
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(subdir_prog),
stdout = """\
subdir/prog.f
subdir/include/foo.f 1
subdir/include/bar.f 1
subdir/sss.f
subdir/ttt.f
""")
test.run(program = test.workpath(variant_prog),
stdout = """\
subdir/prog.f
include/foo.f 2
include/bar.f 2
subdir/sss.f
subdir/ttt.f
""")
test.up_to_date(arguments = args)
# Check that a null-string FORTRANPATH doesn't blow up.
test.write('SConstruct', """
env = Environment(FORTRANPATH = '')
env.Object('foo', source = 'empty.f')
""")
test.write('empty.f', '')
if sys.platform[:5] == 'sunos':
# Sun f77 always put some junk in stderr
test.run(arguments = '.', stderr = None)
else:
test.run(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
# coding: utf-8
"""
This script identifies the structure of a given track using a modified version
of the C-NMF method described here:
Nieto, O., Jehan, T., Convex Non-negative Matrix Factorization For Automatic
Music Structure Identification. Proc. of the 38th IEEE International
Conference on Acoustics, Speech, and Signal Processing (ICASSP).
Vancouver, Canada, 2013
"""
import logging
import numpy as np
import pylab as plt
from scipy.ndimage import filters
from msaf.algorithms.interface import SegmenterInterface
from msaf import pymf
def median_filter(X, M=8):
"""Median filter along the first axis of the feature matrix X."""
for i in range(X.shape[1]):
X[:, i] = filters.median_filter(X[:, i], size=M)
return X
def cnmf(S, rank, niter=500, hull=False):
"""(Convex) Non-Negative Matrix Factorization.
Parameters
----------
S: np.array(p, N)
Features matrix. p row features and N column observations.
rank: int
Rank of decomposition
niter: int
Number of iterations to be used
Returns
-------
F: np.array
Cluster matrix (decomposed matrix)
G: np.array
Activation matrix (decomposed matrix)
(s.t. S ~= F * G)
"""
if hull:
nmf_mdl = pymf.CHNMF(S, num_bases=rank)
else:
nmf_mdl = pymf.CNMF(S, num_bases=rank)
nmf_mdl.factorize(niter=niter)
F = np.asarray(nmf_mdl.W)
G = np.asarray(nmf_mdl.H)
return F, G
def most_frequent(x):
"""Returns the most frequent value in x."""
return np.argmax(np.bincount(x))
def compute_labels(X, rank, R, bound_idxs, niter=300):
"""Computes the labels using the bounds."""
try:
F, G = cnmf(X, rank, niter=niter, hull=False)
except:
return [1]
label_frames = filter_activation_matrix(G.T, R)
label_frames = np.asarray(label_frames, dtype=int)
#labels = [label_frames[0]]
labels = []
bound_inters = zip(bound_idxs[:-1], bound_idxs[1:])
for bound_inter in bound_inters:
if bound_inter[1] - bound_inter[0] <= 0:
labels.append(np.max(label_frames) + 1)
else:
labels.append(most_frequent(
label_frames[bound_inter[0]: bound_inter[1]]))
#print bound_inter, labels[-1]
#labels.append(label_frames[-1])
return labels
def filter_activation_matrix(G, R):
"""Filters the activation matrix G, and returns a flattened copy."""
#import pylab as plt
#plt.imshow(G, interpolation="nearest", aspect="auto")
#plt.show()
idx = np.argmax(G, axis=1)
max_idx = np.arange(G.shape[0])
max_idx = (max_idx, idx.flatten())
G[:, :] = 0
G[max_idx] = idx + 1
# TODO: Order matters?
G = np.sum(G, axis=1)
G = median_filter(G[:, np.newaxis], R)
return G.flatten()
def get_segmentation(X, rank, R, rank_labels, R_labels, niter=300,
bound_idxs=None, in_labels=None):
"""
Gets the segmentation (boundaries and labels) from the factorization
matrices.
Parameters
----------
X: np.array()
Features matrix (e.g. chromagram)
rank: int
Rank of decomposition
R: int
Size of the median filter for activation matrix
niter: int
Number of iterations for k-means
bound_idxs : list
Use previously found boundaries (None to detect them)
in_labels : np.array()
List of input labels (None to compute them)
Returns
-------
bounds_idx: np.array
Bound indeces found
labels: np.array
Indeces of the labels representing the similarity between segments.
"""
#import pylab as plt
#plt.imshow(X, interpolation="nearest", aspect="auto")
#plt.show()
# Find non filtered boundaries
compute_bounds = True if bound_idxs is None else False
while True:
if bound_idxs is None:
try:
F, G = cnmf(X, rank, niter=niter, hull=False)
except:
return np.empty(0), [1]
# Filter G
G = filter_activation_matrix(G.T, R)
if bound_idxs is None:
bound_idxs = np.where(np.diff(G) != 0)[0] + 1
# Increase rank if we found too few boundaries
if compute_bounds and len(np.unique(bound_idxs)) <= 2:
rank += 1
bound_idxs = None
else:
break
# Add first and last boundary
bound_idxs = np.concatenate(([0], bound_idxs, [X.shape[1] - 1]))
bound_idxs = np.asarray(bound_idxs, dtype=int)
if in_labels is None:
labels = compute_labels(X, rank_labels, R_labels, bound_idxs,
niter=niter)
else:
labels = np.ones(len(bound_idxs) - 1)
#plt.imshow(G[:, np.newaxis], interpolation="nearest", aspect="auto")
#for b in bound_idxs:
#plt.axvline(b, linewidth=2.0, color="k")
#plt.show()
return bound_idxs, labels
class Segmenter(SegmenterInterface):
def processFlat(self):
"""Main process.
Returns
-------
est_idxs : np.array(N)
Estimated indeces for the segment boundaries in frames.
est_labels : np.array(N-1)
Estimated labels for the segments.
"""
# C-NMF params
niter = 500 # Iterations for the matrix factorization and clustering
# Preprocess to obtain features, times, and input boundary indeces
F = self._preprocess()
if F.shape[0] >= self.config["h"]:
# Median filter
F = median_filter(F, M=self.config["h"])
#plt.imshow(F.T, interpolation="nearest", aspect="auto"); plt.show()
# Find the boundary indices and labels using matrix factorization
est_idxs, est_labels = get_segmentation(
F.T, self.config["rank"], self.config["R"],
self.config["rank_labels"], self.config["R_labels"],
niter=niter, bound_idxs=self.in_bound_idxs, in_labels=None)
est_idxs = np.unique(np.asarray(est_idxs, dtype=int))
else:
# The track is too short. We will only output the first and last
# time stamps
if self.in_bound_idxs is None:
est_idxs = np.array([0, F.shape[0]-1])
est_labels = [1]
else:
est_idxs = self.in_bound_idxs
est_labels = [1] * (len(est_idxs) + 1)
# Make sure that the first and last boundaries are included
assert est_idxs[0] == 0 and est_idxs[-1] == F.shape[0] - 1
# Post process estimations
est_idxs, est_labels = self._postprocess(est_idxs, est_labels)
return est_idxs, est_labels
|
|
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Pattern compiler.
The grammer is taken from PatternGrammar.txt.
The compiler compiles a pattern to a pytree.*Pattern instance.
"""
__author__ = "Guido van Rossum <guido@python.org>"
# Python imports
import os
# Fairly local imports
from .pgen2 import driver, literals, token, tokenize, parse, grammar
# Really local imports
from . import pytree
from . import pygram
# The pattern grammar file
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
"PatternGrammar.txt")
class PatternSyntaxError(Exception):
pass
def tokenize_wrapper(input):
"""Tokenizes a string suppressing significant whitespace."""
skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
tokens = tokenize.generate_tokens(driver.generate_lines(input).__next__)
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if type not in skip:
yield quintuple
class PatternCompiler(object):
def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
"""Initializer.
Takes an optional alternative filename for the pattern grammar.
"""
self.grammar = driver.load_grammar(grammar_file)
self.syms = pygram.Symbols(self.grammar)
self.pygrammar = pygram.python_grammar
self.pysyms = pygram.python_symbols
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
def compile_pattern(self, input, debug=False, with_tree=False):
"""Compiles a pattern string to a nested pytree.*Pattern object."""
tokens = tokenize_wrapper(input)
try:
root = self.driver.parse_tokens(tokens, debug=debug)
except parse.ParseError as e:
raise PatternSyntaxError(str(e))
if with_tree:
return self.compile_node(root), root
else:
return self.compile_node(root)
def compile_node(self, node):
"""Compiles a node, recursively.
This is one big switch on the node type.
"""
# XXX Optimize certain Wildcard-containing-Wildcard patterns
# that can be merged
if node.type == self.syms.Matcher:
node = node.children[0] # Avoid unneeded recursion
if node.type == self.syms.Alternatives:
# Skip the odd children since they are just '|' tokens
alts = [self.compile_node(ch) for ch in node.children[::2]]
if len(alts) == 1:
return alts[0]
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
return p.optimize()
if node.type == self.syms.Alternative:
units = [self.compile_node(ch) for ch in node.children]
if len(units) == 1:
return units[0]
p = pytree.WildcardPattern([units], min=1, max=1)
return p.optimize()
if node.type == self.syms.NegatedUnit:
pattern = self.compile_basic(node.children[1:])
p = pytree.NegatedPattern(pattern)
return p.optimize()
assert node.type == self.syms.Unit
name = None
nodes = node.children
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
name = nodes[0].value
nodes = nodes[2:]
repeat = None
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
repeat = nodes[-1]
nodes = nodes[:-1]
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
pattern = self.compile_basic(nodes, repeat)
if repeat is not None:
assert repeat.type == self.syms.Repeater
children = repeat.children
child = children[0]
if child.type == token.STAR:
min = 0
max = pytree.HUGE
elif child.type == token.PLUS:
min = 1
max = pytree.HUGE
elif child.type == token.LBRACE:
assert children[-1].type == token.RBRACE
assert len(children) in (3, 5)
min = max = self.get_int(children[1])
if len(children) == 5:
max = self.get_int(children[3])
else:
assert False
if min != 1 or max != 1:
pattern = pattern.optimize()
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
if name is not None:
pattern.name = name
return pattern.optimize()
def compile_basic(self, nodes, repeat=None):
# Compile STRING | NAME [Details] | (...) | [...]
assert len(nodes) >= 1
node = nodes[0]
if node.type == token.STRING:
value = str(literals.evalString(node.value))
return pytree.LeafPattern(_type_of_literal(value), value)
elif node.type == token.NAME:
value = node.value
if value.isupper():
if value not in TOKEN_MAP:
raise PatternSyntaxError("Invalid token: %r" % value)
if nodes[1:]:
raise PatternSyntaxError("Can't have details for token")
return pytree.LeafPattern(TOKEN_MAP[value])
else:
if value == "any":
type = None
elif not value.startswith("_"):
type = getattr(self.pysyms, value, None)
if type is None:
raise PatternSyntaxError("Invalid symbol: %r" % value)
if nodes[1:]: # Details present
content = [self.compile_node(nodes[1].children[1])]
else:
content = None
return pytree.NodePattern(type, content)
elif node.value == "(":
return self.compile_node(nodes[1])
elif node.value == "[":
assert repeat is None
subpattern = self.compile_node(nodes[1])
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
assert False, node
def get_int(self, node):
assert node.type == token.NUMBER
return int(node.value)
# Map named tokens to the type value for a LeafPattern
TOKEN_MAP = {"NAME": token.NAME,
"STRING": token.STRING,
"NUMBER": token.NUMBER,
"TOKEN": None}
def _type_of_literal(value):
if value[0].isalpha():
return token.NAME
elif value in grammar.opmap:
return grammar.opmap[value]
else:
return None
def pattern_convert(grammar, raw_node_info):
"""Converts raw node information to a Node or Leaf instance."""
type, value, context, children = raw_node_info
if children or type in grammar.number2symbol:
return pytree.Node(type, children, context=context)
else:
return pytree.Leaf(type, value, context=context)
def compile_pattern(pattern):
return PatternCompiler().compile_pattern(pattern)
|
|
"""Base classes for all estimators."""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import copy
import inspect
import warnings
import numpy as np
from scipy import sparse
from .externals import six
class ChangedBehaviorWarning(UserWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
args, varargs, kw, default = inspect.getargspec(init)
if varargs is not None:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s doesn't follow this convention."
% (cls, ))
# Remove 'self'
# XXX: This is going to fail if the init is a staticmethod, but
# who would do this?
args.pop(0)
args.sort()
return args
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests a Glance API server which uses an Swift backend by default
This test requires that a real Swift account is available. It looks
in a file GLANCE_TEST_SWIFT_CONF environ variable for the credentials to
use.
Note that this test clears the entire container from the Swift account
for use by the test case, so make sure you supply credentials for
test accounts only.
If a connection cannot be established, all the test cases are
skipped.
"""
import ConfigParser
import hashlib
import httplib
import httplib2
import json
import os
from glance.common import crypt
import glance.store.swift # Needed to register driver for location
from glance.store.location import get_location_from_uri
from glance.tests.functional import test_api
from glance.tests.utils import skip_if_disabled
FIVE_KB = 5 * 1024
FIVE_MB = 5 * 1024 * 1024
class TestSwift(test_api.TestApi):
"""Functional tests for the Swift backend"""
# Test machines can set the GLANCE_TEST_SWIFT_CONF variable
# to override the location of the config file for migration testing
CONFIG_FILE_PATH = os.environ.get('GLANCE_TEST_SWIFT_CONF')
def setUp(self):
"""
Test a connection to an Swift store using the credentials
found in the environs or /tests/functional/test_swift.conf, if found.
If the connection fails, mark all tests to skip.
"""
self.inited = False
self.disabled = True
if self.inited:
return
if not self.CONFIG_FILE_PATH:
self.disabled_message = "GLANCE_TEST_SWIFT_CONF environ not set."
self.inited = True
return
if os.path.exists(TestSwift.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestSwift.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.__dict__[key] = value
except ConfigParser.ParsingError, e:
self.disabled_message = ("Failed to read test_swift.conf "
"file. Got error: %s" % e)
self.inited = True
return
from swift.common import client as swift_client
try:
swift_host = self.swift_store_auth_address
if not swift_host.startswith('http'):
swift_host = 'https://' + swift_host
user = self.swift_store_user
key = self.swift_store_key
container_name = self.swift_store_container
except AttributeError, e:
self.disabled_message = ("Failed to find required configuration "
"options for Swift store. "
"Got error: %s" % e)
self.inited = True
return
self.swift_conn = swift_conn = swift_client.Connection(
authurl=swift_host, user=user, key=key, snet=False, retries=1)
try:
_resp_headers, containers = swift_conn.get_account()
except Exception, e:
self.disabled_message = ("Failed to get_account from Swift "
"Got error: %s" % e)
self.inited = True
return
try:
for container in containers:
if container == container_name:
swift_conn.delete_container(container)
except swift_client.ClientException, e:
self.disabled_message = ("Failed to delete container from Swift "
"Got error: %s" % e)
self.inited = True
return
self.swift_conn = swift_conn
try:
swift_conn.put_container(container_name)
except swift_client.ClientException, e:
self.disabled_message = ("Failed to create container. "
"Got error: %s" % e)
self.inited = True
return
self.disabled = False
self.inited = True
self.default_store = 'swift'
super(TestSwift, self).setUp()
def tearDown(self):
if not self.disabled:
self.clear_container()
super(TestSwift, self).tearDown()
def clear_container(self):
from swift.common import client as swift_client
try:
self.swift_conn.delete_container(self.swift_store_container)
except swift_client.ClientException, e:
if e.http_status == httplib.CONFLICT:
pass
else:
raise
self.swift_conn.put_container(self.swift_store_container)
@skip_if_disabled
def test_large_objects(self):
"""
We test the large object manifest code path in the Swift driver.
In the case where an image file is bigger than the config variable
swift_store_large_object_size, then we chunk the image into
Swift, and add a manifest put_object at the end.
We test that the delete of the large object cleans up all the
chunks in Swift, in addition to the manifest file (LP Bug# 833285)
"""
self.cleanup()
self.swift_store_large_object_size = 2 # In MB
self.swift_store_large_object_chunk_size = 1 # In MB
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# GET /images
# Verify no public images
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_MB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_MB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# GET image
# Verify all information on image we just added is correct
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': '',
'x-image-meta-container_format': '',
'x-image-meta-size': str(FIVE_MB)
}
expected_std_headers = {
'content-length': str(FIVE_MB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. Got '%s'"
% (expected_key, expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. Got '%s'"
% (expected_key,
expected_value,
response[expected_key]))
self.assertEqual(content, "*" * FIVE_MB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_MB).hexdigest())
# We test that the delete of the large object cleans up all the
# chunks in Swift, in addition to the manifest file (LP Bug# 833285)
# Grab the actual Swift location and query the object manifest for
# the chunks/segments. We will check that the segments don't exist
# after we delete the object through Glance...
path = "http://%s:%d/images/%s" % ("0.0.0.0", self.registry_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
image_loc = data['image']['location']
if hasattr(self, 'metadata_encryption_key'):
key = self.metadata_encryption_key
else:
key = self.api_server.metadata_encryption_key
image_loc = crypt.urlsafe_decrypt(key, image_loc)
image_loc = get_location_from_uri(image_loc)
swift_loc = image_loc.store_location
from swift.common import client as swift_client
swift_conn = swift_client.Connection(
authurl=swift_loc.swift_auth_url,
user=swift_loc.user, key=swift_loc.key)
# Verify the object manifest exists
headers = swift_conn.head_object(swift_loc.container, swift_loc.obj)
manifest = headers.get('x-object-manifest')
self.assertTrue(manifest is not None, "Manifest could not be found!")
# Grab the segment identifiers
obj_container, obj_prefix = manifest.split('/', 1)
segments = [segment['name'] for segment in
swift_conn.get_container(obj_container,
prefix=obj_prefix)[1]]
# Verify the segments exist
for segment in segments:
headers = swift_conn.head_object(obj_container, segment)
self.assertTrue(headers.get('content-length') is not None,
headers)
# DELETE image
# Verify image and all chunks are gone...
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
# Verify the segments no longer exist
for segment in segments:
self.assertRaises(swift_client.ClientException,
swift_conn.head_object,
obj_container, segment)
self.stop_servers()
@skip_if_disabled
def test_add_large_object_manifest_uneven_size(self):
"""
Test when large object manifest in scenario where
image size % chunk size != 0
"""
self.cleanup()
self.swift_store_large_object_size = 3 # In MB
self.swift_store_large_object_chunk_size = 2 # In MB
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# 0. GET /images
# Verify no public images
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(content, '{"images": []}')
# 1. POST /images with public image named Image1
# attribute and no custom properties. Verify a 200 OK is returned
image_data = "*" * FIVE_MB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_MB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# 4. HEAD image
# Verify image found now
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'HEAD')
self.assertEqual(response.status, 200)
self.assertEqual(response['x-image-meta-name'], "Image1")
# 5. GET image
# Verify all information on image we just added is correct
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
expected_image_headers = {
'x-image-meta-id': image_id,
'x-image-meta-name': 'Image1',
'x-image-meta-is_public': 'True',
'x-image-meta-status': 'active',
'x-image-meta-disk_format': '',
'x-image-meta-container_format': '',
'x-image-meta-size': str(FIVE_MB)
}
expected_std_headers = {
'content-length': str(FIVE_MB),
'content-type': 'application/octet-stream'}
for expected_key, expected_value in expected_image_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. Got '%s'"
% (expected_key, expected_value,
response[expected_key]))
for expected_key, expected_value in expected_std_headers.items():
self.assertEqual(response[expected_key], expected_value,
"For key '%s' expected header value '%s'. Got '%s'"
% (expected_key,
expected_value,
response[expected_key]))
self.assertEqual(content, "*" * FIVE_MB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_MB).hexdigest())
# DELETE image
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.stop_servers()
@skip_if_disabled
def test_remote_image(self):
"""
Ensure we can retrieve an image that was not stored by glance itself
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
registry_port = self.registry_port
# POST /images with public image named Image1
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True'}
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
self.assertEqual(data['image']['checksum'],
hashlib.md5(image_data).hexdigest())
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id = data['image']['id']
# GET image and make sure data was uploaded
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(response['content-length'], str(FIVE_KB))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
# Find the location that was just added and use it as
# the remote image location for the next image
path = "http://%s:%d/images/%s" % ("0.0.0.0", self.registry_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
data = json.loads(content)
self.assertTrue('location' in data['image'].keys())
loc = data['image']['location']
if hasattr(self, 'metadata_encryption_key'):
key = self.metadata_encryption_key
else:
key = self.api_server.metadata_encryption_key
swift_location = crypt.urlsafe_decrypt(key, loc)
# POST /images with public image named Image1 without uploading data
image_data = "*" * FIVE_KB
headers = {'Content-Type': 'application/octet-stream',
'X-Image-Meta-Name': 'Image1',
'X-Image-Meta-Is-Public': 'True',
'X-Image-Meta-Location': swift_location}
path = "http://%s:%d/v1/images" % ("0.0.0.0", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers)
self.assertEqual(response.status, 201, content)
data = json.loads(content)
self.assertEqual(data['image']['checksum'], None)
self.assertEqual(data['image']['size'], FIVE_KB)
self.assertEqual(data['image']['name'], "Image1")
self.assertEqual(data['image']['is_public'], True)
image_id2 = data['image']['id']
# GET /images/2 ensuring the data already in swift is accessible
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id2)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(response.status, 200)
self.assertEqual(response['content-length'], str(FIVE_KB))
self.assertEqual(content, "*" * FIVE_KB)
self.assertEqual(hashlib.md5(content).hexdigest(),
hashlib.md5("*" * FIVE_KB).hexdigest())
# DELETE boty images
# Verify image and all chunks are gone...
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
path = "http://%s:%d/v1/images/%s" % ("0.0.0.0", self.api_port,
image_id2)
http = httplib2.Http()
response, content = http.request(path, 'DELETE')
self.assertEqual(response.status, 200)
self.stop_servers()
|
|
import json
import logging
import os
import shutil
import time
from subprocess import Popen, PIPE
from urllib import parse
import requests
from flask import session
from son_editor.app.database import db_session, scan_project_dir, sync_project_descriptor
from son_editor.app.exceptions import NotFound, InvalidArgument, NameConflict
from son_editor.impl import usermanagement
from son_editor.models.project import Project
from son_editor.models.workspace import Workspace
from son_editor.util.constants import PROJECT_REL_PATH, Github, REQUIRED_SON_PROJECT_FILES
logger = logging.getLogger(__name__)
def create_oauth_header() -> dict:
"""
Creates oauth header by providing the access token in the header.
:return: Header as dict
"""
return {'Authorization': 'token {}'.format(session['access_token'])}
def build_github_delete(owner: str, repo_name: str) -> str:
"""
Builds relative github api url to delete a repository
:param owner: Owner of the github repository
:param repo_name: Repository name
:return: the relative GitHub api url
"""
return Github.API_URL + Github.API_DELETE_REPO.format(owner, repo_name)
def is_github(netloc):
"""
Checks if the given url is on github
:param netloc: http url
:return: True if on github, False else
"""
if netloc.lower() in Github.DOMAINS:
return True
return False
def git_command(git_args: list, cwd: str = None):
"""
Calls the git command with given args and returns out, err and exitcode
:param git_args: Arguments for git
:param cwd: Optional current working directory
:return: out, error, exitcode
"""
args = ['git']
args.extend(git_args)
git_process = Popen(args,
stdout=PIPE, stderr=PIPE, cwd=cwd)
out, err = git_process.communicate()
exitcode = git_process.returncode
return out.decode(), err.decode(), exitcode
def create_info_dict(out: str = None, err: str = None, exitcode: int = 0) -> dict:
"""
Creates a dict that holds process information
:param out: Out bytes
:param err: Err bytes
:param exitcode: exitcode
:return: Dict with packed information.
"""
# Empty result_dict
result_dict = {}
# Prioritize err message
if err:
result_dict.update({'message': err})
elif out:
result_dict.update({'message': out})
if exitcode:
# Frontend parses '\n' to <br>
output = (out + "\n" if out else '') + (err if err else '')
return result_dict
def get_project(ws_id, pj_id: int, session=db_session()) -> Project:
"""
Returns a project and raises 404, when project not found.
:param ws_id: Workspace id
:param pj_id: Project id
:param session: db session
:return: Project model
"""
project = session.query(Project).join(Workspace) \
.filter(Workspace.id == ws_id) \
.filter(Project.id == pj_id).first()
if not project:
raise NotFound("Could not find project with id {}".format(pj_id))
return project
def check_son_validity(project_path: str):
"""
Checks if the given project path is a valid son project, otherwise it raises an exception. Valid means, it has
a consistent son file structure, so no semantics will be tested.
:param project_path: the path of the cloned project
"""
missing_files = []
files = [f for f in os.listdir(project_path)]
logger.warn('Files in {}: '.format(project_path))
for f in files:
logger.warn('{}'.format(f))
for file in REQUIRED_SON_PROJECT_FILES:
if not os.path.isfile(os.path.join(project_path, file)):
missing_files.append(file)
missing_files_count = len(missing_files)
# If project seems to be valid.
if missing_files_count is 0:
return
elif missing_files_count is 1:
result = "The project has no '{}' file".format(file)
else:
result = "The project has the following missing files: '{}'".format(",".join(missing_files_count))
# Delete project, if there are missing files.
shutil.rmtree(project_path)
raise InvalidArgument(result)
def get_workspace(ws_id: int) -> Workspace:
"""
Returns the workspace model of the given workspace
:param ws_id: The workspace ID
:return: The corresponding workspace model
"""
workspace = db_session().query(Workspace).filter(Workspace.id == ws_id).first()
if not workspace:
raise NotFound("Could not find workspace with id {}".format(ws_id))
return workspace
def init(ws_id: int, project_id: int):
"""
Initializes a git repository in the given project
:param ws_id: The workpace ID
:param project_id: The project ID to initialize
:return: a dictionary containing the result of the operation
"""
project = get_project(ws_id, project_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
out, err, exitcode = git_command(['init'], cwd=project_full_path)
# Additionally set repository user information
if exitcode is 0:
setup_git_user_email(project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
def setup_git_user_email(project_full_path: str):
"""
Setting up the git user in the local git config to be able to make commits and push
:param project_full_path: The absolute project path
"""
user = usermanagement.get_user(session['user_data']['login'])
git_command(['config', 'user.name', user.name], cwd=project_full_path)
git_command(['config', 'user.email', user.email], cwd=project_full_path)
git_command(['config', 'push.default', 'simple'], cwd=project_full_path)
def commit_and_push(ws_id: int, project_id: int, commit_message: str):
"""
Commits and then pushes changes.
:param ws_id: The workspace ID
:param project_id: The project ID
:param commit_message: The commit message
:return: a dictionary containing the result of the operation
"""
project = get_project(ws_id, project_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
logger.warn("Commit and Push files")
files = [f for f in os.listdir(project_full_path)]
logger.warn('Files in {}: '.format(project_full_path))
for f in files:
logger.warn('{}'.format(f))
# Stage all modified, added, removed files
out, err, exitcode = git_command(['add', '-A'], cwd=project_full_path)
if exitcode is not 0:
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Add succeeded: {}".format(out))
# Commit with message
out, err, exitcode = git_command(['commit', "-m '{}'".format(commit_message)], cwd=project_full_path)
if exitcode is not 0:
if 'up-to-date' not in out:
git_command(['reset', 'HEAD~1'], cwd=project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Nothing to commit. Trying to push anyways".format(out))
else:
logger.warn("Commit succeeded: {}".format(out))
# Push all changes to the repo url
sout, serr, sexitcode = git_command(['status', '-u'], cwd=project_full_path)
url_decode = parse.urlparse(project.repo_url)
logger.warn("Executed status".format(out))
git_command(['remote', 'rm', 'origin', _get_repo_url(url_decode)], cwd=project_full_path)
git_command(['remote', 'add', 'origin', _get_repo_url(url_decode)], cwd=project_full_path)
git_command(['push', '--set-upstream', 'origin', 'master'], cwd=project_full_path)
git_command(['push', '-u'], cwd=project_full_path)
# time.sleep(30)
if exitcode is not 0:
git_command(['reset', 'HEAD~1'], cwd=project_full_path)
return create_info_dict(out, err=err, exitcode=exitcode)
else:
logger.warn("Push succeeded: {}".format(out))
logger.warn("Push out: {}\n err: {} \n exitcode: {}\n repo url: {}".format(out, err, exitcode,
_get_repo_url(url_decode)))
logger.warn("Status: out: {}\n err: {} \n exitcode: {}\n".format(sout, serr, sexitcode))
# Success on commit
return create_info_dict(out)
def create_commit_and_push(ws_id: int, project_id: int, remote_repo_name: str):
"""
Creates a remote GitHub repository named remote_repo_name and pushes given git project into it.
:param ws_id: Workspace ID
:param project_id: Project ID to create and push it
:param remote_repo_name: Remote repository name
:return: a dictionary containing the result of the operation
"""
database_session = db_session()
try:
project = get_project(ws_id, project_id, database_session)
# curl -H "Authorization: token [TOKEN]" -X POST https://api.github.com/user/repos --data '{"name":"repo_name"}'
repo_data = {'name': remote_repo_name}
request = requests.post(Github.API_URL + Github.API_CREATE_REPO_REL, json=repo_data,
headers=create_oauth_header())
# Handle exceptions
if request.status_code != 201:
# Repository already exists
if request.status_code == 422:
raise NameConflict("Repository with name {} already exist on GitHub".format(remote_repo_name))
raise Exception("Unhandled status_code: {}\n{}".format(request.status_code, request.text))
# Get git url and commit to db
data = json.loads(request.text)
git_url = data['svn_url']
project.repo_url = git_url
database_session.commit()
except Exception:
database_session.rollback()
raise
# Try to push project
try:
# Give github some time to see created repo
# (dirty hack)
time.sleep(0.5)
return commit_and_push(ws_id, project_id, "Initial commit")
except Exception:
# Delete newly created repository if commit and push failed.
result = requests.delete(build_github_delete(session['user_data']['login'], remote_repo_name),
headers=create_oauth_header())
# Reraise
raise
def delete(ws_id: int, project_id: int, remote_repo_name: str, organization_name: str = None):
"""
Deletes given project on remote repository
:param project_id:
:param ws_id: Workspace of the project
:param remote_repo_name: Remote repository name
:param organization_name: Optional parameter to specify the organization / login
:return: a dictionary containing the result of the operation
"""
if organization_name is None:
owner = session['user_data']['login']
else:
owner = organization_name
sql_session = db_session()
project = get_project(ws_id, project_id, sql_session)
url_decode = parse.urlparse(project.repo_url)
if _repo_name_from_url(url_decode) == remote_repo_name:
result = _do_delete(owner, remote_repo_name)
if result.status_code == 204:
project.repo_url = None
sql_session.commit()
return create_info_dict("Successfully deleted")
else:
sql_session.rollback()
return create_info_dict(result.text, exitcode=1)
raise InvalidArgument("The given repo name does not correspond to the remote repository name")
def _do_delete(owner, remote_repo_name):
"""
Executes the delete api call at the given remote repo
:param owner: The github user name of the repository owner
:param remote_repo_name: The remote repository name
:return: The APIs answer
"""
return requests.delete(build_github_delete(owner, remote_repo_name), headers=create_oauth_header())
def diff(ws_id: int, pj_id: int):
"""
Shows the local changes of the given project.
:param ws_id: Workspace of the project.
:param pj_id: Given project to show from.
:return: a dictionary containing the result of the operation
"""
project = get_project(ws_id, pj_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
out, err, exitcode = git_command(['diff'], project_full_path)
if exitcode is 0:
return create_info_dict(out)
else:
return create_info_dict(out, err, exitcode)
def status(ws_id: int, pj_id: int):
"""
Shows the git status of the repository
:param ws_id: The workspace ID
:param pj_id: The project ID
:return: a dictionary containing the result of the operation
"""
project = get_project(ws_id, pj_id)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
# fetch remote changes
out, err, exitcode = git_command(['remote', 'update'], project_full_path)
if exitcode is 0:
# get the status
out, err, exitcode = git_command(['status', '-uno', '-u'], project_full_path)
if exitcode is 0:
return create_info_dict(out)
return create_info_dict(out, err, exitcode)
def pull(ws_id: int, project_id: int):
"""
Pulls data from the given project_id.
:param ws_id: Workspace of the project
:param project_id: Project to pull.
:return: a dictionary containing the result of the operation
"""
dbsession = db_session()
project = get_project(ws_id, project_id, session=dbsession)
project_full_path = os.path.join(project.workspace.path, PROJECT_REL_PATH, project.rel_path)
# Error handling
if not os.path.isdir(project_full_path):
raise Exception("Could not find project directory {}".format(project_full_path))
if not project.repo_url:
raise InvalidArgument("Project with id {} is missing the repo attribute".format(project_id))
# Pull in project directory
# If url in GitHub domain, access by token
out, err, exitcode = git_command(['pull', project.repo_url], cwd=project_full_path)
# Return error if pull failed.
if exitcode is not 0:
return create_info_dict(err=err, exitcode=exitcode)
# Rescan project
try:
sync_project_descriptor(project)
dbsession.add(project)
scan_project_dir(project_full_path, project)
dbsession.commit()
except:
dbsession.rollback()
raise Exception("Could not scan the project after pull.")
return create_info_dict(out=out)
def list():
"""
Lists the available remote repositories.
:param ws_id: The workspace ID
:return: https://developer.github.com/v3/repos/#response
"""
result = requests.get(Github.API_URL + Github.API_LIST_REPOS.format(session['user_data']['login']),
headers=create_oauth_header())
return json.loads(result.text)
def _repo_name_from_url(url_decode: str):
"""
Extracts the repository name from its URL
:param url_decode:
:return:
"""
github_project_name = os.path.split(url_decode.path)[-1]
return github_project_name.replace('.git', '')
def clone(ws_id: int, url: str, name: str = None):
"""
Clones a repository by url into given workspace
:param name: Optional name of the local repository name, otherwise the remote name is taken
:param user_data: Session data to get access token for GitHub
:param ws_id: Destination workspace to clone
:param url: URL of the source repository
:return: True if successful, otherwise NameConflict is thrown
"""
workspace = get_workspace(ws_id)
url_decode = parse.urlparse(url)
if is_github(url_decode.netloc):
# Take the suffix of url as first name candidate
github_project_name = name
if github_project_name is None:
github_project_name = _repo_name_from_url(url_decode)
dbsession = db_session()
pj = dbsession.query(Project).join(Workspace)\
.filter(Workspace.id == workspace.id).filter(
Project.name == github_project_name).first()
dbsession.commit()
# Error when the project name in given workspace already exists
if pj is not None:
raise NameConflict('A project with name {} already exists'.format(github_project_name))
project_target_path = os.path.join(workspace.path, PROJECT_REL_PATH, github_project_name)
logger.info('Cloning from github repo...')
# If url in GitHub domain, access by token
url_with_token = _get_repo_url(url_decode)
out, err, exitcode = git_command(['clone', url_with_token, project_target_path])
if exitcode is 0:
setup_git_user_email(project_target_path)
# Check if the project is a valid son project
check_son_validity(project_target_path)
# Create project and scan it.
dbsession = db_session()
try:
pj = Project(github_project_name, github_project_name, workspace)
pj.repo_url = url
sync_project_descriptor(pj)
dbsession.add(pj)
scan_project_dir(project_target_path, pj)
dbsession.commit()
# Check if the project is valid
result = create_info_dict(out=out)
result["id"] = pj.id
return result
except:
dbsession.rollback()
shutil.rmtree(project_target_path)
raise Exception("Scan project failed")
else:
return create_info_dict(err=err, exitcode=exitcode)
raise NotImplemented("Cloning from other is not implemented yet. Only github is supported for now.")
def _get_repo_url(url_decode):
return 'https://{}@github.com{}'.format(session['access_token'], url_decode.path)
|
|
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import hashlib
import re
import time
from django.conf import settings
from django.core.cache import get_cache
from django.utils.encoding import iri_to_uri, force_unicode
from django.utils.http import http_date
from django.utils.timezone import get_current_timezone_name
from django.utils.translation import get_language
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
result = t[0]
else:
result = '%s=%s' % (t[0], t[1])
return result.encode('utf-8')
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(int(cc['max-age']), kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = b', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def _set_response_etag(response):
response['ETag'] = '"%s"' % hashlib.md5(response.content).hexdigest()
return response
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
if hasattr(response, 'render') and callable(response.render):
response.add_post_render_callback(_set_response_etag)
else:
response = _set_response_etag(response)
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If necessary, adds the current locale or time zone to the cache key."""
if settings.USE_I18N or settings.USE_L10N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
if settings.USE_TZ:
# The datetime module doesn't restrict the output of tzname().
# Windows is known to use non-standard, locale-dependant names.
# User-defined tzinfo classes may return absolutely anything.
# Hence this paranoid conversion to create a valid cache key.
tz_name = force_unicode(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii',
'ignore').replace(b' ',
b'_').decode('ascii')
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = hashlib.md5()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = hashlib.md5(iri_to_uri(request.get_full_path()).encode('utf-8'))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = hashlib.md5(iri_to_uri(request.get_full_path()).encode('utf-8'))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
|
|
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
# Prepare a release
#
# 1. Update the Version.java to remove the snapshot bit
# 2. Remove the -SNAPSHOT suffix in all pom.xml files
#
# USAGE:
#
# python3 ./dev-tools/prepare-release.py
#
# Note: Ensure the script is run from the elasticsearch top level directory
#
import fnmatch
import argparse
from prepare_release_update_documentation import update_reference_docs
import subprocess
import tempfile
import re
import os
import shutil
VERSION_FILE = 'core/src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
MAIL_TEMPLATE = """
Hi all
The new release candidate for %(version)s based on this commit[1] is now available, including the x-plugins, and RPM/deb repos:
- ZIP [2]
- tar.gz [3]
- RPM [4]
- deb [5]
Plugins can be installed as follows,
bin/plugin -Des.plugins.staging=true install cloud-aws
The same goes for the x-plugins:
bin/plugin -Des.plugins.staging=true install license
bin/plugin -Des.plugins.staging=true install shield
bin/plugin -Des.plugins.staging=true install watcher
To install the deb from an APT repo:
APT line sources.list line:
deb http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/debian/ stable main
To install the RPM, create a YUM file like:
/etc/yum.repos.d/elasticsearch.repo
containing:
[elasticsearch-2.0]
name=Elasticsearch repository for packages
baseurl=http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/centos
gpgcheck=1
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
enabled=1
To smoke-test the release please run:
python3 -B ./dev-tools/smoke_tests_rc.py --version %(version)s --hash %(hash)s --plugins license,shield,watcher
NOTE: this script requires JAVA_HOME to point to a Java 7 Runtime
[1] https://github.com/elastic/elasticsearch/commit/%(hash)s
[2] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip
[3] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz
[4] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm
[5] http://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb
"""
VERBOSE=True
def run(command, env_vars=None, verbose=VERBOSE):
if env_vars:
for key, value in env_vars.items():
os.putenv(key, value)
if not verbose:
command = '%s >> /dev/null 2>&1' % (command)
if os.system(command):
raise RuntimeError(' FAILED: %s' % (command))
def ensure_checkout_is_clean():
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True).decode('utf-8')
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
if 'dev-tools/__pycache__/' in s:
print('*** NOTE: invoke python with -B to prevent __pycache__ directories ***')
raise RuntimeError('git status shows untracked files got:\n%s' % s)
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin" in this branch got:\n%s' % (s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout ", "git reset --hard origin/" in this branch got:\n%s' % (s))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
release = release.replace('-', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
processed = process_file(version_file, callback)
if not processed:
raise RuntimeError('failed to remove snapshot version for %s' % (release))
def rename_local_meta_files(path):
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, 'maven-metadata-local.xml*'):
full_path = os.path.join(root, file_name)
os.rename(full_path, os.path.join(root, file_name.replace('-local', '')))
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version():
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--deploy', '-d', dest='deploy', action='store_true',
help='Installs and Deploys the release on a sonartype staging repository.')
parser.add_argument('--skipDocCheck', '-c', dest='skip_doc_check', action='store_false',
help='Skips any checks for pending documentation changes')
parser.add_argument('--push-s3', '-p', dest='push', action='store_true',
help='Pushes artifacts to the S3 staging area')
parser.add_argument('--install_only', '-i', dest='install_only', action='store_true',
help='Only runs a maven install to skip the remove deployment step')
parser.add_argument('--gpg-key', '-k', dest='gpg_key', default="D88E42B4",
help='Allows you to specify a different gpg_key to be used instead of the default release key')
parser.add_argument('--verbose', '-b', dest='verbose',
help='Runs the script in verbose mode')
parser.set_defaults(deploy=False)
parser.set_defaults(skip_doc_check=False)
parser.set_defaults(push=False)
parser.set_defaults(install_only=False)
parser.set_defaults(verbose=False)
args = parser.parse_args()
install_and_deploy = args.deploy
skip_doc_check = args.skip_doc_check
push = args.push
gpg_key = args.gpg_key
install_only = args.install_only
VERBOSE = args.verbose
ensure_checkout_is_clean()
release_version = find_release_version()
if not re.match('(\d+\.\d+)\.*',release_version):
raise RuntimeError('illegal release version format: %s' % (release_version))
major_minor_version = re.match('(\d+\.\d+)\.*',release_version).group(1)
print('*** Preparing release version: [%s]' % release_version)
if not skip_doc_check:
print('*** Check for pending documentation changes')
pending_files = update_reference_docs(release_version)
if pending_files:
raise RuntimeError('pending coming[%s] documentation changes found in %s' % (release_version, pending_files))
run('cd dev-tools && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('cd rest-api-spec && mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
run('mvn versions:set -DnewVersion=%s -DgenerateBackupPoms=false' % (release_version))
remove_version_snapshot(VERSION_FILE, release_version)
print('*** Done removing snapshot version. DO NOT COMMIT THIS, WHEN CREATING A RELEASE CANDIDATE.')
shortHash = subprocess.check_output('git log --pretty=format:"%h" -n 1', shell=True).decode('utf-8')
localRepo = '/tmp/elasticsearch-%s-%s' % (release_version, shortHash)
localRepoElasticsearch = localRepo + '/org/elasticsearch'
if os.path.exists(localRepoElasticsearch):
print('clean local repository %s' % localRepoElasticsearch)
shutil.rmtree(localRepoElasticsearch)
if install_only:
mvn_target = 'install'
else:
mvn_target = 'deploy'
install_command = 'mvn clean %s -Prelease -Dskip.integ.tests=true -Dgpg.key="%s" -Dpackaging.rpm.rpmbuild=/usr/bin/rpmbuild -Drpm.sign=true -Dmaven.repo.local=%s -Dno.commit.pattern="\\bno(n|)commit\\b" -Dforbidden.test.signatures=""' % (mvn_target, gpg_key, localRepo)
clean_repo_command = 'find %s -name _remote.repositories -exec rm {} \;' % (localRepoElasticsearch)
rename_metadata_files_command = 'for i in $(find %s -name "maven-metadata-local.xml*") ; do mv "$i" "${i/-local/}" ; done' % (localRepoElasticsearch)
s3_sync_command = 's3cmd sync %s s3://download.elasticsearch.org/elasticsearch/staging/%s-%s/org/' % (localRepoElasticsearch, release_version, shortHash)
s3_bucket_sync_to = 'download.elasticsearch.org/elasticsearch/staging/%s-%s/repos' % (release_version, shortHash)
if install_and_deploy:
for cmd in [install_command, clean_repo_command]:
run(cmd)
rename_local_meta_files(localRepoElasticsearch)
else:
print('')
print('*** To create a release candidate run: ')
print(' %s' % (install_command))
print(' 1. Remove all _remote.repositories: %s' % (clean_repo_command))
print(' 2. Rename all maven metadata files: %s' % (rename_metadata_files_command))
if push:
run(s3_sync_command)
print('Use rpm-s3/deb-s3 to push into repositories at %s' % s3_bucket_sync_to)
else:
print('')
print('*** To push a release candidate to s3 run: ')
print(' 1. Sync %s into S3 bucket' % (localRepoElasticsearch))
print (' %s' % (s3_sync_command))
print(' 2. Create repositories: ')
print(' Use rpm-s3/deb-s3 to push into repositories at %s' % s3_bucket_sync_to)
print('')
print('NOTE: the above mvn command will promt you several times for the GPG passphrase of the key you specified you can alternatively pass it via -Dgpg.passphrase=yourPassPhrase')
print(' since RPM signing doesn\'t support gpg-agents the recommended way to set the password is to add a release profile to your settings.xml:')
print("""
<profiles>
<profile>
<id>release</id>
<properties>
<gpg.passphrase>YourPasswordGoesHere</gpg.passphrase>
</properties>
</profile>
</profiles>
""")
print('NOTE: Running s3cmd might require you to create a config file with your credentials, if the s3cmd does not support suppliying them via the command line!')
print('*** Once the release is deployed and published send out the following mail to dev@elastic.co:')
string_format_dict = {'version' : release_version, 'hash': shortHash, 'major_minor_version' : major_minor_version}
print(MAIL_TEMPLATE % string_format_dict)
print('To publish the release and the repo on S3 execute the following commands:')
print(' s3cmd cp --recursive s3://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/repos/elasticsearch/%(major_minor_version)s/ s3://packages.elasticsearch.org/elasticsearch/%(major_minor_version)s' % string_format_dict)
print(' s3cmd cp --recursive s3://download.elasticsearch.org/elasticsearch/staging/%(version)s-%(hash)s/org/ s3://download.elasticsearch.org/elasticsearch/release/org' % string_format_dict)
print('Now go ahead and tag the release:')
print(' git tag -a v%(version)s %(hash)s' % string_format_dict)
print(' git push origin v%(version)s' % string_format_dict )
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking"""
import uuid
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import test
from nova.virt import driver
LOG = logging.getLogger(__name__)
class FakeContext(object):
def __init__(self, is_admin=False):
self.is_admin = is_admin
def elevated(self):
return FakeContext(is_admin=True)
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver"""
def get_available_resource(self):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self):
self.memory_mb = 5
self.local_gb = 6
self.vcpus = 1
self.memory_mb_used = 0
self.local_gb_used = 0
def get_available_resource(self):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
}
return d
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = FakeContext()
self._instances = []
self.stubs.Set(db, 'instance_get_all_by_filters',
self._fake_instance_get_all_by_filters)
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"stats": [{"key": "num_instances", "value": "1"}]
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance(self, *args, **kwargs):
instance = {
'uuid': str(uuid.uuid1()),
'vm_state': vm_states.BUILDING,
'task_state': None,
'memory_mb': 2,
'root_gb': 3,
'ephemeral_gb': 1,
'os_type': 'Linux',
'project_id': '123456',
'vcpus': 1,
'host': None,
}
instance.update(kwargs)
self._instances.append(instance)
return instance
def _fake_instance_get_all_by_filters(self, ctx, filters, **kwargs):
return self._instances
def _tracker(self, unsupported=False):
host = "fakehost"
if unsupported:
driver = UnsupportedVirtDriver()
else:
driver = FakeVirtDriver()
tracker = resource_tracker.ResourceTracker(host, driver)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker(unsupported=True)
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def testDisabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertEqual(None, self.tracker.compute_node)
def testDisabledClaim(self):
# basic claim:
claim = self.tracker.begin_resource_claim(self.context, 1, 1)
self.assertEqual(None, claim)
def testDisabledInstanceClaim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertEqual(None, claim)
def testDisabledInstanceContextClaim(self):
# instance context manager variation:
instance = self._fake_instance()
with self.tracker.resource_claim(self.context, instance):
pass
self.assertEqual(0, len(self.tracker.claims))
def testDisabledFinishClaim(self):
self.assertEqual(None, self.tracker.finish_resource_claim(None))
def testDisabledAbortClaim(self):
self.assertEqual(None, self.tracker.abort_resource_claim(self.context,
None))
def testDisabledUpdateUsage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = FakeContext(is_admin=True)
self.tracker = self._tracker()
def testMissingService(self):
"""No service record in DB."""
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_all_compute_by_host',
self._fake_service_get_all_compute_by_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_all_compute_by_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return [service]
def testCreatedComputeNode(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def testEnabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class ResourceTestCase(BaseTestCase):
def setUp(self):
super(ResourceTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_all_compute_by_host',
self._fake_service_get_all_compute_by_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.tracker.update_available_resource(self.context)
def _fake_service_get_all_compute_by_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return [self.service]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.updated = True
values['stats'] = [{"key": "num_instances", "value": "1"}]
self.compute.update(values)
return self.compute
def testUpdateUseOnlyForTracked(self):
"""Only update usage is a previous claim has added instance to
list of tracked instances.
"""
instance = self._fake_instance(memory_mb=3, root_gb=1, ephemeral_gb=1,
task_state=None)
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.tracker.compute_node['current_workload'])
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(3, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1, self.tracker.compute_node['current_workload'])
def testFreeRamResourceValue(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def testFreeDiskResourceValue(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def testUpdateComputeNode(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def testCpuUnlimited(self):
"""Test default of unlimited CPU"""
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=100000)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(100000, self.tracker.compute_node['vcpus_used'])
def testCpuOversubscription(self):
"""Test client-supplied oversubscription of CPU"""
self.assertEqual(1, self.tracker.compute_node['vcpus'])
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1,
vcpus=3)
limits = {'vcpu': 5}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertNotEqual(None, claim)
self.assertEqual(3, self.tracker.compute_node['vcpus_used'])
def testMemoryOversubscription(self):
"""Test client-supplied oversubscription of memory"""
instance = self._fake_instance(memory_mb=8, root_gb=1, ephemeral_gb=1)
limits = {'memory_mb': 8}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertNotEqual(None, claim)
self.assertEqual(8, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
def testDiskOversubscription(self):
"""Test client-supplied oversubscription of disk space"""
instance = self._fake_instance(memory_mb=1, root_gb=10, ephemeral_gb=1)
limits = {'disk_gb': 12}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertNotEqual(None, claim)
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(11, self.tracker.compute_node['local_gb_used'])
def testUnlimitedMemoryClaim(self):
"""Test default of unlimited memory"""
instance = self._fake_instance(memory_mb=200000000000, root_gb=1,
ephemeral_gb=1)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(200000000000,
self.tracker.compute_node['memory_mb_used'])
def testInsufficientMemoryClaimWithOversubscription(self):
"""Exceed oversubscribed memory limit of 10MB"""
instance = self._fake_instance(memory_mb=10, root_gb=0,
ephemeral_gb=0)
limits = {'memory_mb': 10}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertNotEqual(None, claim)
instance = self._fake_instance(memory_mb=1, root_gb=0,
ephemeral_gb=0)
limits = {'memory_mb': 10}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertEqual(None, claim)
def testUnlimitDiskClaim(self):
"""Test default of unlimited disk space"""
instance = self._fake_instance(memory_mb=0, root_gb=200000000,
ephemeral_gb=0)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(200000000, self.tracker.compute_node['local_gb_used'])
def testInsufficientDiskClaimWithOversubscription(self):
"""Exceed oversubscribed disk limit of 10GB"""
instance = self._fake_instance(memory_mb=1, root_gb=4,
ephemeral_gb=5) # 9 GB
limits = {'disk_gb': 10}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertNotEqual(None, claim)
instance = self._fake_instance(memory_mb=1, root_gb=1,
ephemeral_gb=1) # 2 GB
limits = {'disk_gb': 10}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertEqual(None, claim)
def testInsufficientCpuClaim(self):
instance = self._fake_instance(memory_mb=0, root_gb=0,
ephemeral_gb=0, vcpus=1)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(memory_mb=0, root_gb=0,
ephemeral_gb=0, vcpus=1)
limits = {'vcpu': 1}
claim = self.tracker.begin_resource_claim(self.context, instance,
limits)
self.assertEqual(None, claim)
def testClaimAndFinish(self):
self.assertEqual(5, self.tracker.compute_node['memory_mb'])
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(6, self.tracker.compute_node['local_gb'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem, self.compute['memory_mb_used'])
self.assertEqual(5 - claim_mem, self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
# Finally, finish the claimm and update from the virt layer again.
# Resource usage will be consistent again:
self.tracker.finish_resource_claim(claim)
self.tracker.update_available_resource(self.context)
self.assertEqual(claim_mem, self.compute['memory_mb_used'])
self.assertEqual(5 - claim_mem, self.compute['free_ram_mb'])
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(6 - claim_disk, self.compute['free_disk_gb'])
def testClaimAndAbort(self):
self.assertEqual(5, self.tracker.compute_node['memory_mb'])
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(6, self.tracker.compute_node['local_gb'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
claim_mem = 3
claim_disk = 2
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0)
claim = self.tracker.begin_resource_claim(self.context, instance)
self.assertNotEqual(None, claim)
self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(claim_mem, self.compute["memory_mb_used"])
self.assertEqual(5 - claim_mem, self.compute["free_ram_mb"])
self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(6 - claim_disk, self.compute["free_disk_gb"])
self.tracker.abort_resource_claim(self.context, claim)
self.assertEqual(5, self.compute["memory_mb"])
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(5, self.compute["free_ram_mb"])
self.assertEqual(6, self.compute["local_gb"])
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(6, self.compute["free_disk_gb"])
def testExpiredClaims(self):
"""Test that old claims get cleaned up automatically if not finished
or aborted explicitly.
"""
instance = self._fake_instance(memory_mb=2, root_gb=2, ephemeral_gb=0)
claim = self.tracker.begin_resource_claim(self.context, instance)
claim.expire_ts = timeutils.utcnow_ts() - 1
self.assertTrue(claim.is_expired())
# and an unexpired claim
instance2 = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=0)
claim2 = self.tracker.begin_resource_claim(self.context, instance2)
self.assertEqual(2, len(self.tracker.claims))
self.assertEqual(2 + 1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 + 1, self.tracker.compute_node['local_gb_used'])
# expired claims get expunged when audit runs:
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.claims))
self.assertEqual(2, len(self.tracker.tracked_instances))
# the expired claim's instance is assumed to still exist, so the
# resources should be counted:
self.assertEqual(2 + 1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 + 1, self.tracker.compute_node['local_gb_used'])
# this abort should do nothing because the claim was purged due to
# expiration:
self.tracker.abort_resource_claim(self.context, claim)
# call finish on claim2:
self.tracker.finish_resource_claim(claim2)
# should have usage from both instances:
self.assertEqual(1 + 2, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(1 + 2, self.tracker.compute_node['local_gb_used'])
def testInstanceClaim(self):
instance = self._fake_instance(memory_mb=1, root_gb=0, ephemeral_gb=2)
self.tracker.begin_resource_claim(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
def testContextClaimWithException(self):
try:
with self.tracker.resource_claim(self.context, memory_mb=1,
disk_gb=1):
# <insert exciting things that utilize resources>
raise Exception("THE SKY IS FALLING")
except Exception:
pass
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
def testInstanceContextClaim(self):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
with self.tracker.resource_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1, self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2, self.tracker.compute_node['local_gb_used'])
self.assertEqual(1, self.compute['memory_mb_used'])
self.assertEqual(2, self.compute['local_gb_used'])
def testUpdateLoadStatsForInstance(self):
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.resource_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
def testCpuStats(self):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=1)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.resource_claim(self.context, instance, limits):
pass
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
instance = self._fake_instance(vcpus=10)
with self.tracker.resource_claim(self.context, instance, limits):
pass
self.assertEqual(11, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
self.assertEqual(1, self.tracker.compute_node['vcpus_used'])
|
|
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from keystoneclient.common import cms
import six
from keystone.common import controller
from keystone.common import dependency
from keystone.common import wsgi
from keystone import config
from keystone.contrib import federation
from keystone import exception
from keystone.i18n import _, _LI
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
LOG = log.getLogger(__name__)
CONF = config.CONF
# registry of authentication methods
AUTH_METHODS = {}
AUTH_PLUGINS_LOADED = False
def load_auth_methods():
global AUTH_PLUGINS_LOADED
if AUTH_PLUGINS_LOADED:
# Only try and load methods a single time.
return
# config.setup_authentication should be idempotent, call it to ensure we
# have setup all the appropriate configuration options we may need.
config.setup_authentication()
for plugin in CONF.auth.methods:
if '.' in plugin:
# NOTE(morganfainberg): if '.' is in the plugin name, it should be
# imported rather than used as a plugin identifier.
plugin_class = plugin
driver = importutils.import_object(plugin)
if not hasattr(driver, 'method'):
raise ValueError(_('Cannot load an auth-plugin by class-name '
'without a "method" attribute defined: %s'),
plugin_class)
else:
plugin_class = CONF.auth.get(plugin)
driver = importutils.import_object(plugin_class)
if hasattr(driver, 'method'):
if driver.method != plugin:
raise ValueError(_('Driver requested method %(req)s does '
'not match plugin name %(plugin)s.') %
{'req': driver.method,
'plugin': plugin})
else:
LOG.warning(_('Auth Plugin %s does not have a "method" '
'attribute.'), plugin)
setattr(driver, 'method', plugin)
if driver.method in AUTH_METHODS:
raise ValueError(_('Auth plugin %(plugin)s is requesting '
'previously registered method %(method)s') %
{'plugin': plugin_class, 'method': driver.method})
AUTH_METHODS[driver.method] = driver
AUTH_PLUGINS_LOADED = True
def get_auth_method(method_name):
global AUTH_METHODS
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
return AUTH_METHODS[method_name]
class AuthContext(dict):
"""Retrofitting auth_context to reconcile identity attributes.
The identity attributes must not have conflicting values among the
auth plug-ins. The only exception is `expires_at`, which is set to its
earliest value.
"""
# identity attributes need to be reconciled among the auth plugins
IDENTITY_ATTRIBUTES = frozenset(['user_id', 'project_id',
'access_token_id', 'domain_id',
'expires_at'])
def __setitem__(self, key, val):
if key in self.IDENTITY_ATTRIBUTES and key in self:
existing_val = self[key]
if key == 'expires_at':
# special treatment for 'expires_at', we are going to take
# the earliest expiration instead.
if existing_val != val:
LOG.info(_LI('"expires_at" has conflicting values '
'%(existing)s and %(new)s. Will use the '
'earliest value.'),
{'existing': existing_val, 'new': val})
if existing_val is None or val is None:
val = existing_val or val
else:
val = min(existing_val, val)
elif existing_val != val:
msg = _('Unable to reconcile identity attribute %(attribute)s '
'as it has conflicting values %(new)s and %(old)s') % (
{'attribute': key,
'new': val,
'old': existing_val})
raise exception.Unauthorized(msg)
return super(AuthContext, self).__setitem__(key, val)
# TODO(blk-u): this class doesn't use identity_api directly, but makes it
# available for consumers. Consumers should probably not be getting
# identity_api from this since it's available in global registry, then
# identity_api should be removed from this list.
@dependency.requires('assignment_api', 'identity_api', 'trust_api')
class AuthInfo(object):
"""Encapsulation of "auth" request."""
@staticmethod
def create(context, auth=None):
auth_info = AuthInfo(context, auth=auth)
auth_info._validate_and_normalize_auth_data()
return auth_info
def __init__(self, context, auth=None):
self.context = context
self.auth = auth
self._scope_data = (None, None, None)
# self._scope_data is (domain_id, project_id, trust_ref)
# project scope: (None, project_id, None)
# domain scope: (domain_id, None, None)
# trust scope: (None, None, trust_ref)
# unscoped: (None, None, None)
def _assert_project_is_enabled(self, project_ref):
# ensure the project is enabled
try:
self.assignment_api.assert_project_enabled(
project_id=project_ref['id'],
project=project_ref)
except AssertionError as e:
LOG.warning(e)
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _assert_domain_is_enabled(self, domain_ref):
try:
self.assignment_api.assert_domain_enabled(
domain_id=domain_ref['id'],
domain=domain_ref)
except AssertionError as e:
LOG.warning(e)
six.reraise(exception.Unauthorized, exception.Unauthorized(e),
sys.exc_info()[2])
def _lookup_domain(self, domain_info):
domain_id = domain_info.get('id')
domain_name = domain_info.get('name')
domain_ref = None
if not domain_id and not domain_name:
raise exception.ValidationError(attribute='id or name',
target='domain')
try:
if domain_name:
domain_ref = self.assignment_api.get_domain_by_name(
domain_name)
else:
domain_ref = self.assignment_api.get_domain(domain_id)
except exception.DomainNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_domain_is_enabled(domain_ref)
return domain_ref
def _lookup_project(self, project_info):
project_id = project_info.get('id')
project_name = project_info.get('name')
project_ref = None
if not project_id and not project_name:
raise exception.ValidationError(attribute='id or name',
target='project')
try:
if project_name:
if 'domain' not in project_info:
raise exception.ValidationError(attribute='domain',
target='project')
domain_ref = self._lookup_domain(project_info['domain'])
project_ref = self.assignment_api.get_project_by_name(
project_name, domain_ref['id'])
else:
project_ref = self.assignment_api.get_project(project_id)
# NOTE(morganfainberg): The _lookup_domain method will raise
# exception.Unauthorized if the domain isn't found or is
# disabled.
self._lookup_domain({'id': project_ref['domain_id']})
except exception.ProjectNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
self._assert_project_is_enabled(project_ref)
return project_ref
def _lookup_trust(self, trust_info):
trust_id = trust_info.get('id')
if not trust_id:
raise exception.ValidationError(attribute='trust_id',
target='trust')
trust = self.trust_api.get_trust(trust_id)
if not trust:
raise exception.TrustNotFound(trust_id=trust_id)
return trust
def _validate_and_normalize_scope_data(self):
"""Validate and normalize scope data."""
if 'scope' not in self.auth:
return
if sum(['project' in self.auth['scope'],
'domain' in self.auth['scope'],
'OS-TRUST:trust' in self.auth['scope']]) != 1:
raise exception.ValidationError(
attribute='project, domain, or OS-TRUST:trust',
target='scope')
if 'project' in self.auth['scope']:
project_ref = self._lookup_project(self.auth['scope']['project'])
self._scope_data = (None, project_ref['id'], None)
elif 'domain' in self.auth['scope']:
domain_ref = self._lookup_domain(self.auth['scope']['domain'])
self._scope_data = (domain_ref['id'], None, None)
elif 'OS-TRUST:trust' in self.auth['scope']:
if not CONF.trust.enabled:
raise exception.Forbidden('Trusts are disabled.')
trust_ref = self._lookup_trust(
self.auth['scope']['OS-TRUST:trust'])
# TODO(ayoung): when trusts support domains, fill in domain data
if trust_ref.get('project_id') is not None:
project_ref = self._lookup_project(
{'id': trust_ref['project_id']})
self._scope_data = (None, project_ref['id'], trust_ref)
else:
self._scope_data = (None, None, trust_ref)
def _validate_auth_methods(self):
if 'identity' not in self.auth:
raise exception.ValidationError(attribute='identity',
target='auth')
# make sure auth methods are provided
if 'methods' not in self.auth['identity']:
raise exception.ValidationError(attribute='methods',
target='identity')
# make sure all the method data/payload are provided
for method_name in self.get_method_names():
if method_name not in self.auth['identity']:
raise exception.ValidationError(attribute=method_name,
target='identity')
# make sure auth method is supported
for method_name in self.get_method_names():
if method_name not in AUTH_METHODS:
raise exception.AuthMethodNotSupported()
def _validate_and_normalize_auth_data(self):
"""Make sure "auth" is valid."""
# make sure "auth" exist
if not self.auth:
raise exception.ValidationError(attribute='auth',
target='request body')
self._validate_auth_methods()
self._validate_and_normalize_scope_data()
def get_method_names(self):
"""Returns the identity method names.
:returns: list of auth method names
"""
# Sanitizes methods received in request's body
# Filters out duplicates, while keeping elements' order.
method_names = []
for method in self.auth['identity']['methods']:
if method not in method_names:
method_names.append(method)
return method_names
def get_method_data(self, method):
"""Get the auth method payload.
:returns: auth method payload
"""
if method not in self.auth['identity']['methods']:
raise exception.ValidationError(attribute=method,
target='identity')
return self.auth['identity'][method]
def get_scope(self):
"""Get scope information.
Verify and return the scoping information.
:returns: (domain_id, project_id, trust_ref).
If scope to a project, (None, project_id, None)
will be returned.
If scoped to a domain, (domain_id, None, None)
will be returned.
If scoped to a trust, (None, project_id, trust_ref),
Will be returned, where the project_id comes from the
trust definition.
If unscoped, (None, None, None) will be returned.
"""
return self._scope_data
def set_scope(self, domain_id=None, project_id=None, trust=None):
"""Set scope information."""
if domain_id and project_id:
msg = _('Scoping to both domain and project is not allowed')
raise ValueError(msg)
if domain_id and trust:
msg = _('Scoping to both domain and trust is not allowed')
raise ValueError(msg)
if project_id and trust:
msg = _('Scoping to both project and trust is not allowed')
raise ValueError(msg)
self._scope_data = (domain_id, project_id, trust)
@dependency.requires('assignment_api', 'identity_api', 'token_api',
'token_provider_api', 'trust_api')
class Auth(controller.V3Controller):
# Note(atiwari): From V3 auth controller code we are
# calling protection() wrappers, so we need to setup
# the member_name and collection_name attributes of
# auth controller code.
# In the absence of these attributes, default 'entity'
# string will be used to represent the target which is
# generic. Policy can be defined using 'entity' but it
# would not reflect the exact entity that is in context.
# We are defining collection_name = 'tokens' and
# member_name = 'token' to facilitate policy decisions.
collection_name = 'tokens'
member_name = 'token'
def __init__(self, *args, **kw):
super(Auth, self).__init__(*args, **kw)
config.setup_authentication()
def authenticate_for_token(self, context, auth=None):
"""Authenticate user and issue a token."""
include_catalog = 'nocatalog' not in context['query_string']
try:
auth_info = AuthInfo.create(context, auth=auth)
auth_context = AuthContext(extras={},
method_names=[],
bind={})
self.authenticate(context, auth_info, auth_context)
if auth_context.get('access_token_id'):
auth_info.set_scope(None, auth_context['project_id'], None)
self._check_and_set_default_scoping(auth_info, auth_context)
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
self.trust_api.consume_use(trust['id'])
method_names = auth_info.get_method_names()
method_names += auth_context.get('method_names', [])
# make sure the list is unique
method_names = list(set(method_names))
expires_at = auth_context.get('expires_at')
# NOTE(morganfainberg): define this here so it is clear what the
# argument is during the issue_v3_token provider call.
metadata_ref = None
(token_id, token_data) = self.token_provider_api.issue_v3_token(
auth_context['user_id'], method_names, expires_at, project_id,
domain_id, auth_context, trust, metadata_ref, include_catalog)
return render_token_data_response(token_id, token_data,
created=True)
except exception.TrustNotFound as e:
raise exception.Unauthorized(e)
def _check_and_set_default_scoping(self, auth_info, auth_context):
(domain_id, project_id, trust) = auth_info.get_scope()
if trust:
project_id = trust['project_id']
if domain_id or project_id or trust:
# scope is specified
return
# Skip scoping when unscoped federated token is being issued
if federation.IDENTITY_PROVIDER in auth_context:
return
# fill in default_project_id if it is available
try:
user_ref = self.identity_api.get_user(auth_context['user_id'])
except exception.UserNotFound as e:
LOG.exception(e)
raise exception.Unauthorized(e)
default_project_id = user_ref.get('default_project_id')
if not default_project_id:
# User has no default project. He shall get an unscoped token.
return
# make sure user's default project is legit before scoping to it
try:
default_project_ref = self.assignment_api.get_project(
default_project_id)
default_project_domain_ref = self.assignment_api.get_domain(
default_project_ref['domain_id'])
if (default_project_ref.get('enabled', True) and
default_project_domain_ref.get('enabled', True)):
if self.assignment_api.get_roles_for_user_and_project(
user_ref['id'], default_project_id):
auth_info.set_scope(project_id=default_project_id)
else:
msg = _("User %(user_id)s doesn't have access to"
" default project %(project_id)s. The token will"
" be unscoped rather than scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
else:
msg = _("User %(user_id)s's default project %(project_id)s is"
" disabled. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg,
{'user_id': user_ref['id'],
'project_id': default_project_id})
except (exception.ProjectNotFound, exception.DomainNotFound):
# default project or default project domain doesn't exist,
# will issue unscoped token instead
msg = _("User %(user_id)s's default project %(project_id)s not"
" found. The token will be unscoped rather than"
" scoped to the project.")
LOG.warning(msg, {'user_id': user_ref['id'],
'project_id': default_project_id})
def authenticate(self, context, auth_info, auth_context):
"""Authenticate user."""
# The 'external' method allows any 'REMOTE_USER' based authentication
if 'REMOTE_USER' in context['environment']:
try:
external = get_auth_method('external')
external.authenticate(context, auth_info, auth_context)
except exception.AuthMethodNotSupported:
# This will happen there is no 'external' plugin registered
# and the container is performing authentication.
# The 'kerberos' and 'saml' methods will be used this way.
# In those cases, it is correct to not register an
# 'external' plugin; if there is both an 'external' and a
# 'kerberos' plugin, it would run the check on identity twice.
pass
# need to aggregate the results in case two or more methods
# are specified
auth_response = {'methods': []}
for method_name in auth_info.get_method_names():
method = get_auth_method(method_name)
resp = method.authenticate(context,
auth_info.get_method_data(method_name),
auth_context)
if resp:
auth_response['methods'].append(method_name)
auth_response[method_name] = resp
if auth_response["methods"]:
# authentication continuation required
raise exception.AdditionalAuthRequired(auth_response)
if 'user_id' not in auth_context:
msg = _('User not found')
raise exception.Unauthorized(msg)
@controller.protected()
def check_token(self, context):
token_id = context.get('subject_token_id')
token_data = self.token_provider_api.validate_v3_token(
token_id)
# NOTE(morganfainberg): The code in
# ``keystone.common.wsgi.render_response`` will remove the content
# body.
return render_token_data_response(token_id, token_data)
@controller.protected()
def revoke_token(self, context):
token_id = context.get('subject_token_id')
return self.token_provider_api.revoke_token(token_id)
@controller.protected()
def validate_token(self, context):
token_id = context.get('subject_token_id')
include_catalog = 'nocatalog' not in context['query_string']
token_data = self.token_provider_api.validate_v3_token(
token_id)
if not include_catalog and 'catalog' in token_data['token']:
del token_data['token']['catalog']
return render_token_data_response(token_id, token_data)
@controller.protected()
def revocation_list(self, context, auth=None):
if not CONF.token.revoke_by_id:
raise exception.Gone()
tokens = self.token_api.list_revoked_tokens()
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, six.text_type)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = jsonutils.dumps(data)
signed_text = cms.cms_sign_text(json_data,
CONF.signing.certfile,
CONF.signing.keyfile)
return {'signed': signed_text}
# FIXME(gyee): not sure if it belongs here or keystone.common. Park it here
# for now.
def render_token_data_response(token_id, token_data, created=False):
"""Render token data HTTP response.
Stash token ID into the X-Subject-Token header.
"""
headers = [('X-Subject-Token', token_id)]
if created:
status = (201, 'Created')
else:
status = (200, 'OK')
return wsgi.render_response(body=token_data,
status=status, headers=headers)
|
|
#! /usr/bin/env python3
from sys import argv, stderr, stdout
from collections import defaultdict
import operator
def fix_casing(label):
if label == 'OTHER':
return label
label = label.lower()
return label[0].upper() + label[1:]
def print_err_mat(err_mat, sorted_errs, total_errors):
print('\\begin{table}[!h]')
print('\\begin{center}')
print('\\begin{tabular}{l|%s}' % ('c' * len(sorted_errs)))
stdout.write(' & ')
for i, l_et_c in enumerate(sorted_errs):
gold_label, _count = l_et_c
if i + 1 == len(sorted_errs):
stdout.write(fix_casing(gold_label) + '\\\\\n')
else:
stdout.write(fix_casing(gold_label) + ' & ')
print('\\hline')
print('\\noalign{\smallskip}')
row_count = 0
for gold_label, _gold_count in sorted_errs:
stdout.write('%s & ' % (fix_casing(gold_label)))
for i, l_et_c in enumerate(sorted_errs):
sys_label, _sys_count = l_et_c
cell = '%.1f' % (err_mat[gold_label][sys_label]
* 100.0 / total_errors)
if i + 1 == len(sorted_errs):
stdout.write(cell + '\\\\\n')
else:
stdout.write(cell + ' & ')
row_count += 1
print('\\end{tabular}')
print('\\caption{}')
print('\\end{center}')
print('\\end{table}')
def print_tabular_errs(err_list):
total_errors = sum([x[1] for x in err_list]) * 1.0
print('\\begin{table}[!h]')
print('\\begin{center}')
print('\\begin{tabular}{lcc}')
print('\\noalign{\smallskip}')
print(' & absolute & relative\\\\')
print('main POS & frequency & frequency (\\%)\\\\')
print('\\hline')
print('\\noalign{\smallskip}')
for i, l_et_c in enumerate(err_list):
label, count = l_et_c
line = "%s & %u & %.1f" % (fix_casing(label), count, 100 * count / total_errors)
if i + 1 == len(err_list):
print(line)
else:
print(line + '\\\\')
print('\\end{tabular}')
print('\\caption{}')
print('\\end{center}')
print('\\end{table}')
def get_main_pos(label, is_tdt):
if is_tdt:
if label.find('|') == -1:
return label
label = label[:label.find('|')]
else:
if label.find('[POS=') == -1:
return None
label = label[label.find('[POS=') + 5:]
label = label[:label.find(']')]
return label
def get_labels(f):
labels = []
for line in f:
line = line.strip()
if line == '':
continue
_wf, _feats, _lemma, label, _ann = line.split('\t')
labels.append(label)
return labels
if __name__=='__main__':
if len(argv) != 5:
stderr.write("%s ftb_sys_file ftb_gold_file tdt_sys_file tdt_gold_file\n" % argv[0])
exit(1)
ftb_sys_labels = get_labels(open(argv[1]))
ftb_gold_labels = get_labels(open(argv[2]))
tdt_sys_labels = get_labels(open(argv[3]))
tdt_gold_labels = get_labels(open(argv[4]))
assert(len(ftb_sys_labels) == len(ftb_gold_labels))
assert(len(tdt_sys_labels) == len(tdt_gold_labels))
ftb_errs = defaultdict(lambda : 0.0)
tdt_errs = defaultdict(lambda : 0.0)
tot = len(ftb_sys_labels)
corr = 0.0
for i, sys_label in enumerate(ftb_sys_labels):
gold_label = ftb_gold_labels[i]
if sys_label != gold_label:
mpos_gold = get_main_pos(gold_label, 0)
ftb_errs[mpos_gold] += 1
else:
corr += 1
print("FTB ACC: %.2f%%" % (100 * corr / tot))
tot = len(tdt_sys_labels)
corr = 0
for i, sys_label in enumerate(tdt_sys_labels):
gold_label = tdt_gold_labels[i]
if sys_label != gold_label:
mpos_gold = get_main_pos(gold_label, 1)
tdt_errs[mpos_gold] += 1
else:
corr += 1
print("TDT ACC: %.2f%%" % (100 * corr / tot))
sorted_ftb_errs = sorted(ftb_errs.items(),
key=operator.itemgetter(1))
sorted_ftb_errs.reverse()
sorted_tdt_errs = sorted(tdt_errs.items(),
key=operator.itemgetter(1))
sorted_tdt_errs.reverse()
ftb_err_mat = defaultdict(lambda : defaultdict(lambda : 0.0))
tdt_err_mat = defaultdict(lambda : defaultdict(lambda : 0.0))
top_ftb_labels = [x[0] for x in sorted_ftb_errs[:5]]
top_tdt_labels = [x[0] for x in sorted_tdt_errs[:5]]
for i, sys_label in enumerate(ftb_sys_labels):
gold_label = ftb_gold_labels[i]
if sys_label != gold_label:
mpos_gold = get_main_pos(gold_label, 0)
mpos_sys = get_main_pos(sys_label, 0)
if not mpos_sys in top_ftb_labels:
mpos_sys = 'OTHER'
if not mpos_gold in top_ftb_labels:
mpos_gold = 'OTHER'
ftb_err_mat[mpos_gold][mpos_sys] += 1
for i, sys_label in enumerate(tdt_sys_labels):
gold_label = tdt_gold_labels[i]
if sys_label != gold_label:
mpos_gold = get_main_pos(gold_label, 1)
mpos_sys = get_main_pos(sys_label, 1)
if not mpos_sys in top_tdt_labels:
mpos_sys = 'OTHER'
if not mpos_gold in top_tdt_labels:
mpos_gold = 'OTHER'
tdt_err_mat[mpos_gold][mpos_sys] += 1
print_tabular_errs(sorted_tdt_errs)
print()
print_tabular_errs(sorted_ftb_errs)
print()
ftb_total_errs = sum([x[1] for x in sorted_ftb_errs])
tdt_total_errs = sum([x[1] for x in sorted_tdt_errs])
sorted_ftb_errs = sorted_ftb_errs[:5] + [('OTHER', 0)]
sorted_tdt_errs = sorted_tdt_errs[:5] + [('OTHER', 0)]
print_err_mat(tdt_err_mat, sorted_tdt_errs, tdt_total_errs)
print()
print_err_mat(ftb_err_mat, sorted_ftb_errs, ftb_total_errs)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow import flags
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
def __init__(self, *args, **kwargs):
super(UnidirectionalSequenceRnnTest, self).__init__(*args, **kwargs)
# Define constants
# Unrolled through 28 time steps
self.time_steps = 28
# Rows of 28 pixels
self.n_input = 28
# Learning rate for Adam optimizer
self.learning_rate = 0.001
# MNIST is meant to be classified in 10 classes(0-9).
self.n_classes = 10
# Batch size
self.batch_size = 16
# Rnn Units.
self.num_units = 16
def setUp(self):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn1"),
tf.lite.experimental.nn.TfLiteRNNCell(self.num_units, name="rnn2")
])
def buildModel(self, rnn_layer, is_dynamic_rnn):
"""Build Mnist recognition model.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the model.
- Prediction tensor of the model.
- Output class tensor of the model.
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(
"float", [None, self.time_steps, self.n_input], name="INPUT_IMAGE")
# x is shaped [batch_size,time_steps,num_inputs]
if is_dynamic_rnn:
rnn_input = tf.transpose(x, perm=[1, 0, 2])
outputs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_layer, rnn_input, dtype="float32")
outputs = tf.unstack(outputs, axis=0)
else:
rnn_input = tf.unstack(x, self.time_steps, 1)
outputs, _ = tf.nn.static_rnn(rnn_layer, rnn_input, dtype="float32")
# Compute logits by multiplying outputs[-1] of shape [batch_size,num_units]
# by the softmax layer's out_weight of shape [num_units,n_classes]
# plus out_bias
prediction = tf.matmul(outputs[-1], out_weights) + out_bias
output_class = tf.nn.softmax(prediction, name="OUTPUT_CLASS")
return x, prediction, output_class
def trainModel(self, x, prediction, output_class, sess):
"""Train the model.
Args:
x: The input tensor.
prediction: The prediction class tensor.
output_class: The output tensor.
sess: The graph session.
"""
# input label placeholder
y = tf.placeholder("float", [None, self.n_classes])
# Loss function
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))
# Optimization
opt = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(loss)
# Initialize variables
sess.run(tf.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
def saveAndRestoreModel(self, rnn_layer, sess, saver, is_dynamic_rnn):
"""Saves and restores the model to mimic the most common use case.
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:
A tuple containing:
- Input tensor of the restored model.
- Prediction tensor of the restored model.
- Output tensor, which is the softwmax result of the prediction tensor.
- new session of the restored model.
"""
model_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
saver.save(sess, model_dir)
# Reset the graph.
tf.reset_default_graph()
x, prediction, output_class = self.buildModel(rnn_layer, is_dynamic_rnn)
new_sess = tf.Session(config=CONFIG)
saver = tf.train.Saver()
saver.restore(new_sess, model_dir)
return x, prediction, output_class, new_sess
def getInferenceResult(self, x, output_class, sess):
"""Get inference result given input tensor and output tensor.
Args:
x: The input tensor.
output_class: The output tensor.
sess: Current session.
Returns:
A tuple containing:
- Input of the next batch, batch size is 1.
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})
return sample_input, expected_output
def tfliteInvoke(self, sess, test_inputs, input_tensor, output_tensor):
"""Get tflite inference result.
This method will convert tensorflow from session to tflite model then based
on the inputs, run tflite inference and return the results.
Args:
sess: Current tensorflow session.
test_inputs: The test inputs for tflite.
input_tensor: The input tensor of tensorflow graph.
output_tensor: The output tensor of tensorflow graph.
Returns:
The tflite inference result.
"""
converter = tf.lite.TFLiteConverter.from_session(sess, [input_tensor],
[output_tensor])
tflite = converter.convert()
interpreter = tf.lite.Interpreter(model_content=tflite)
interpreter.allocate_tensors()
input_index = interpreter.get_input_details()[0]["index"]
interpreter.set_tensor(input_index, test_inputs)
interpreter.invoke()
output_index = interpreter.get_output_details()[0]["index"]
result = interpreter.get_tensor(output_index)
# Reset all variables so it will not pollute other inferences.
interpreter.reset_all_variables()
return result
def testStaticRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=False)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=False)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
@test_util.enable_control_flow_v2
def testDynamicRnnMultiRnnCell(self):
sess = tf.Session(config=CONFIG)
x, prediction, output_class = self.buildModel(
self.buildRnnLayer(), is_dynamic_rnn=True)
self.trainModel(x, prediction, output_class, sess)
saver = tf.train.Saver()
x, prediction, output_class, new_sess = self.saveAndRestoreModel(
self.buildRnnLayer(), sess, saver, is_dynamic_rnn=True)
test_inputs, expected_output = self.getInferenceResult(
x, output_class, new_sess)
result = self.tfliteInvoke(new_sess, test_inputs, x, output_class)
self.assertTrue(np.allclose(expected_output, result, rtol=1e-6, atol=1e-2))
if __name__ == "__main__":
test.main()
|
|
"""
Test the parallel module.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2010-2011 Gael Varoquaux
# License: BSD Style, 3 clauses.
import time
import sys
import io
import os
try:
import cPickle as pickle
PickleError = TypeError
except:
import pickle
PickleError = pickle.PicklingError
if sys.version_info[0] == 3:
PickleError = pickle.PicklingError
try:
# Python 2/Python 3 compat
unicode('str')
except NameError:
unicode = lambda s: s
from ..parallel import Parallel, delayed, SafeFunction, WorkerInterrupt, \
multiprocessing, cpu_count
from ..my_exceptions import JoblibException
import nose
###############################################################################
def division(x, y):
return x / y
def square(x):
return x ** 2
def exception_raiser(x):
if x == 7:
raise ValueError
return x
def interrupt_raiser(x):
time.sleep(.05)
raise KeyboardInterrupt
def f(x, y=0, z=0):
""" A module-level function so that it can be spawn with
multiprocessing.
"""
return x ** 2 + y + z
###############################################################################
def test_cpu_count():
assert cpu_count() > 0
###############################################################################
# Test parallel
def test_simple_parallel():
X = range(5)
for n_jobs in (1, 2, -1, -2):
yield (nose.tools.assert_equal, [square(x) for x in X],
Parallel(n_jobs=-1)(
delayed(square)(x) for x in X))
try:
# To smoke-test verbosity, we capture stdout
orig_stdout = sys.stdout
orig_stderr = sys.stdout
if sys.version_info[0] == 3:
sys.stderr = io.StringIO()
sys.stderr = io.StringIO()
else:
sys.stdout = io.BytesIO()
sys.stderr = io.BytesIO()
for verbose in (2, 11, 100):
Parallel(n_jobs=-1, verbose=verbose)(
delayed(square)(x) for x in X)
Parallel(n_jobs=1, verbose=verbose)(
delayed(square)(x) for x in X)
Parallel(n_jobs=2, verbose=verbose, pre_dispatch=2)(
delayed(square)(x) for x in X)
except Exception as e:
my_stdout = sys.stdout
my_stderr = sys.stderr
sys.stdout = orig_stdout
sys.stderr = orig_stderr
print(unicode(my_stdout.getvalue()))
print(unicode(my_stderr.getvalue()))
raise e
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def nested_loop():
Parallel(n_jobs=2)(delayed(square)(.01) for _ in range(2))
def test_nested_loop():
Parallel(n_jobs=2)(delayed(nested_loop)() for _ in range(2))
def test_parallel_kwargs():
""" Check the keyword argument processing of pmap.
"""
lst = range(10)
for n_jobs in (1, 4):
yield (nose.tools.assert_equal,
[f(x, y=1) for x in lst],
Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)
)
def test_parallel_pickling():
""" Check that pmap captures the errors when it is passed an object
that cannot be pickled.
"""
def g(x):
return x ** 2
nose.tools.assert_raises(PickleError,
Parallel(),
(delayed(g)(x) for x in range(10))
)
def test_error_capture():
# Check that error are captured, and that correct exceptions
# are raised.
if multiprocessing is not None:
# A JoblibException will be raised only if there is indeed
# multiprocessing
nose.tools.assert_raises(JoblibException,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
nose.tools.assert_raises(WorkerInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
else:
nose.tools.assert_raises(KeyboardInterrupt,
Parallel(n_jobs=2),
[delayed(interrupt_raiser)(x) for x in (1, 0)],
)
nose.tools.assert_raises(ZeroDivisionError,
Parallel(n_jobs=2),
[delayed(division)(x, y) for x, y in zip((0, 1), (1, 0))],
)
try:
ex = JoblibException()
Parallel(n_jobs=1)(
delayed(division)(x, y) for x, y in zip((0, 1), (1, 0)))
except Exception:
# Cannot use 'except as' to maintain Python 2.5 compatibility
ex = sys.exc_info()[1]
nose.tools.assert_false(isinstance(ex, JoblibException))
class Counter(object):
def __init__(self, list1, list2):
self.list1 = list1
self.list2 = list2
def __call__(self, i):
self.list1.append(i)
nose.tools.assert_equal(len(self.list1), len(self.list2))
def consumer(queue, item):
queue.append('Consumed %s' % item)
def test_dispatch_one_job():
""" Test that with only one job, Parallel does act as a iterator.
"""
queue = list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=1)(delayed(consumer)(queue, x) for x in producer())
nose.tools.assert_equal(queue,
['Produced 0', 'Consumed 0',
'Produced 1', 'Consumed 1',
'Produced 2', 'Consumed 2',
'Produced 3', 'Consumed 3',
'Produced 4', 'Consumed 4',
'Produced 5', 'Consumed 5']
)
nose.tools.assert_equal(len(queue), 12)
def test_dispatch_multiprocessing():
""" Check that using pre_dispatch Parallel does indeed dispatch items
lazily.
"""
if multiprocessing is None:
raise nose.SkipTest()
manager = multiprocessing.Manager()
queue = manager.list()
def producer():
for i in range(6):
queue.append('Produced %i' % i)
yield i
Parallel(n_jobs=2, pre_dispatch=3)(delayed(consumer)(queue, i)
for i in producer())
nose.tools.assert_equal(list(queue)[:4],
['Produced 0', 'Produced 1', 'Produced 2',
'Consumed 0', ])
nose.tools.assert_equal(len(queue), 12)
def test_exception_dispatch():
"Make sure that exception raised during dispatch are indeed captured"
nose.tools.assert_raises(
ValueError,
Parallel(n_jobs=6, pre_dispatch=16, verbose=0),
(delayed(exception_raiser)(i) for i in range(30)),
)
def _reload_joblib():
# Retrieve the path of the parallel module in a robust way
joblib_path = Parallel.__module__.split(os.sep)
joblib_path = joblib_path[:1]
joblib_path.append('parallel.py')
joblib_path = '/'.join(joblib_path)
module = __import__(joblib_path)
# Reload the module. This should trigger a fail
reload(module)
def test_multiple_spawning():
# Test that attempting to launch a new Python after spawned
# subprocesses will raise an error, to avoid infinite loops on
# systems that do not support fork
if not int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)):
raise nose.SkipTest()
nose.tools.assert_raises(ImportError, Parallel(n_jobs=2),
[delayed(_reload_joblib)() for i in range(10)])
###############################################################################
# Test helpers
def test_joblib_exception():
# Smoke-test the custom exception
e = JoblibException('foobar')
# Test the repr
repr(e)
# Test the pickle
pickle.dumps(e)
def test_safe_function():
safe_division = SafeFunction(division)
nose.tools.assert_raises(JoblibException, safe_division, 1, 0)
|
|
from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import FSM
from direct.fsm import State
from direct.interval.IntervalGlobal import *
from direct.showbase.PythonUtil import Functor
from direct.task.Task import Task
from pandac.PandaModules import *
import string
import types
import Suit
import SuitDNA
from otp.avatar import Avatar
from toontown.battle import BattleParticles
from toontown.battle import BattleProps
from toontown.nametag import NametagGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
GenericModel = 'phase_9/models/char/bossCog'
ModelDict = {'s': 'phase_9/models/char/sellbotBoss',
'm': 'phase_10/models/char/cashbotBoss',
'l': 'phase_11/models/char/lawbotBoss',
'c': 'phase_12/models/char/bossbotBoss'}
AnimList = ('Ff_speech', 'ltTurn2Wave', 'wave', 'Ff_lookRt', 'turn2Fb', 'Ff_neutral', 'Bb_neutral', 'Ff2Bb_spin', 'Bb2Ff_spin', 'Fb_neutral', 'Bf_neutral', 'Fb_firstHit', 'Fb_downNeutral', 'Fb_downHit', 'Fb_fall', 'Fb_down2Up', 'Fb_downLtSwing', 'Fb_downRtSwing', 'Fb_DownThrow', 'Fb_UpThrow', 'Fb_jump', 'golf_swing')
class BossCog(Avatar.Avatar):
notify = DirectNotifyGlobal.directNotify.newCategory('BossCog')
healthColors = Suit.Suit.healthColors
healthGlowColors = Suit.Suit.healthGlowColors
def __init__(self):
Avatar.Avatar.__init__(self)
self.setFont(ToontownGlobals.getSuitFont())
self.setPlayerType(NametagGlobals.CCSuit)
self.setPickable(0)
self.doorA = None
self.doorB = None
self.bubbleL = None
self.bubbleR = None
self.raised = 1
self.forward = 1
self.happy = 1
self.dizzy = 0
self.nowRaised = 1
self.nowForward = 1
self.nowHappy = 1
self.currentAnimIval = None
self.queuedAnimIvals = []
self.treadsLeftPos = 0
self.treadsRightPos = 0
self.healthBar = None
self.healthCondition = 0
self.animDoneEvent = 'BossCogAnimDone'
self.animIvalName = 'BossCogAnimIval'
return
def delete(self):
Avatar.Avatar.delete(self)
self.removeHealthBar()
self.setDizzy(0)
self.stopAnimate()
if self.doorA:
self.doorA.request('Off')
self.doorB.request('Off')
self.doorA = None
self.doorB = None
return
def setDNAString(self, dnaString):
self.dna = SuitDNA.SuitDNA()
self.dna.makeFromNetString(dnaString)
self.setDNA(self.dna)
def setDNA(self, dna):
if self.style:
pass
else:
self.style = dna
self.generateBossCog()
self.initializeDropShadow()
if base.wantNametags:
self.initializeNametag3d()
def generateBossCog(self):
self.throwSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_frisbee_gears.ogg')
self.swingSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_swipe.ogg')
self.spinSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_spin.ogg')
self.rainGearsSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_raining_gears.ogg')
self.swishSfx = loader.loadSfx('phase_5/audio/sfx/General_throw_miss.ogg')
self.boomSfx = loader.loadSfx('phase_3.5/audio/sfx/ENC_cogfall_apart.ogg')
self.deathSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_big_death.ogg')
self.upSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_raise_up.ogg')
self.downSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_collapse.ogg')
self.reelSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_reeling_backwards.ogg')
self.birdsSfx = loader.loadSfx('phase_4/audio/sfx/SZ_TC_bird1.ogg')
self.dizzyAlert = loader.loadSfx('phase_5/audio/sfx/AA_sound_aoogah.ogg')
self.grunt = loader.loadSfx('phase_9/audio/sfx/Boss_COG_VO_grunt.ogg')
self.murmur = loader.loadSfx('phase_9/audio/sfx/Boss_COG_VO_murmur.ogg')
self.statement = loader.loadSfx('phase_9/audio/sfx/Boss_COG_VO_statement.ogg')
self.question = loader.loadSfx('phase_9/audio/sfx/Boss_COG_VO_question.ogg')
self.dialogArray = [self.grunt,
self.murmur,
self.statement,
self.question,
self.statement,
self.statement]
dna = self.style
filePrefix = ModelDict[dna.dept]
self.loadModel(GenericModel + '-legs-zero', 'legs')
self.loadModel(filePrefix + '-torso-zero', 'torso')
self.loadModel(filePrefix + '-head-zero', 'head')
self.twoFaced = dna.dept == 's'
self.attach('head', 'torso', 'joint34')
self.attach('torso', 'legs', 'joint_pelvis')
self.rotateNode = self.attachNewNode('rotate')
geomNode = self.getGeomNode()
geomNode.reparentTo(self.rotateNode)
self.frontAttack = self.rotateNode.attachNewNode('frontAttack')
self.frontAttack.setPos(0, -10, 10)
self.frontAttack.setScale(2)
self.setHeight(26)
self.nametag3d.setScale(2)
for partName in ('legs', 'torso', 'head'):
animDict = {}
for anim in AnimList:
animDict[anim] = '%s-%s-%s' % (GenericModel, partName, anim)
self.loadAnims(animDict, partName)
self.stars = BattleProps.globalPropPool.getProp('stun')
self.stars.setPosHprScale(7, 0, 0, 0, 0, -90, 3, 3, 3)
self.stars.loop('stun')
self.pelvis = self.getPart('torso')
self.pelvisForwardHpr = VBase3(0, 0, 0)
self.pelvisReversedHpr = VBase3(-180, 0, 0)
self.neck = self.getPart('head')
self.neckForwardHpr = VBase3(0, 0, 0)
self.neckReversedHpr = VBase3(0, -540, 0)
self.axle = self.find('**/joint_axle')
self.doorA = self.__setupDoor('**/joint_doorFront', 'doorA', self.doorACallback, VBase3(0, 0, 0), VBase3(0, 0, -80), CollisionPolygon(Point3(5, -4, 0.32), Point3(0, -4, 0), Point3(0, 4, 0), Point3(5, 4, 0.32)))
self.doorB = self.__setupDoor('**/joint_doorRear', 'doorB', self.doorBCallback, VBase3(0, 0, 0), VBase3(0, 0, 80), CollisionPolygon(Point3(-5, 4, 0.84), Point3(0, 4, 0), Point3(0, -4, 0), Point3(-5, -4, 0.84)))
treadsModel = loader.loadModel('%s-treads' % GenericModel)
treadsModel.reparentTo(self.axle)
self.treadsLeft = treadsModel.find('**/right_tread')
self.treadsRight = treadsModel.find('**/left_tread')
self.doorA.request('Closed')
self.doorB.request('Closed')
def initializeBodyCollisions(self, collIdStr):
Avatar.Avatar.initializeBodyCollisions(self, collIdStr)
if not self.ghostMode:
self.collNode.setCollideMask(self.collNode.getIntoCollideMask() | ToontownGlobals.PieBitmask)
def generateHealthBar(self):
self.removeHealthBar()
chestNull = self.find('**/joint_lifeMeter')
if chestNull.isEmpty():
return
model = loader.loadModel('phase_3.5/models/gui/matching_game_gui')
button = model.find('**/minnieCircle')
button.setScale(6.0)
button.setP(-20)
button.setColor(self.healthColors[0])
button.reparentTo(chestNull)
self.healthBar = button
glow = BattleProps.globalPropPool.getProp('glow')
glow.reparentTo(self.healthBar)
glow.setScale(0.28)
glow.setPos(-0.005, 0.01, 0.015)
glow.setColor(self.healthGlowColors[0])
button.flattenLight()
self.healthBarGlow = glow
self.healthCondition = 0
def updateHealthBar(self):
if self.healthBar == None:
return
health = 1.0 - float(self.bossDamage) / float(self.bossMaxDamage)
if health > 0.95:
condition = 0
elif health > 0.7:
condition = 1
elif health > 0.3:
condition = 2
elif health > 0.05:
condition = 3
elif health > 0.0:
condition = 4
else:
condition = 5
if self.healthCondition != condition:
if condition == 4:
blinkTask = Task.loop(Task(self.__blinkRed), Task.pause(0.75), Task(self.__blinkGray), Task.pause(0.1))
taskMgr.add(blinkTask, self.uniqueName('blink-task'))
elif condition == 5:
if self.healthCondition == 4:
taskMgr.remove(self.uniqueName('blink-task'))
blinkTask = Task.loop(Task(self.__blinkRed), Task.pause(0.25), Task(self.__blinkGray), Task.pause(0.1))
taskMgr.add(blinkTask, self.uniqueName('blink-task'))
else:
self.healthBar.setColor(self.healthColors[condition], 1)
self.healthBarGlow.setColor(self.healthGlowColors[condition], 1)
self.healthCondition = condition
return
def __blinkRed(self, task):
self.healthBar.setColor(self.healthColors[3], 1)
self.healthBarGlow.setColor(self.healthGlowColors[3], 1)
if self.healthCondition == 5:
self.healthBar.setScale(1.17)
return Task.done
def __blinkGray(self, task):
self.healthBar.setColor(self.healthColors[4], 1)
self.healthBarGlow.setColor(self.healthGlowColors[4], 1)
if self.healthCondition == 5:
self.healthBar.setScale(1.0)
return Task.done
def removeHealthBar(self):
if self.healthBar:
self.healthBar.removeNode()
self.healthBar = None
if self.healthCondition == 4 or self.healthCondition == 5:
taskMgr.remove(self.uniqueName('blink-task'))
self.healthCondition = 0
return
def reverseHead(self):
self.neck.setHpr(self.neckReversedHpr)
def forwardHead(self):
self.neck.setHpr(self.neckForwardHpr)
def reverseBody(self):
self.pelvis.setHpr(self.pelvisReversedHpr)
def forwardBody(self):
self.pelvis.setHpr(self.pelvisForwardHpr)
def getShadowJoint(self):
return self.getGeomNode()
def getNametagJoints(self):
return []
def getDialogueArray(self):
return self.dialogArray
def doorACallback(self, isOpen):
pass
def doorBCallback(self, isOpen):
pass
def __rollTreadsInterval(self, object, start = 0, duration = 0, rate = 1):
def rollTexMatrix(t, object = object):
object.setTexOffset(TextureStage.getDefault(), t, 0)
return LerpFunctionInterval(rollTexMatrix, fromData=start, toData=start + rate * duration, duration=duration)
def rollLeftTreads(self, duration, rate):
start = self.treadsLeftPos
self.treadsLeftPos += duration * rate
return self.__rollTreadsInterval(self.treadsLeft, start=start, duration=duration, rate=rate)
def rollRightTreads(self, duration, rate):
start = self.treadsRightPos
self.treadsRightPos += duration * rate
return self.__rollTreadsInterval(self.treadsRight, start=start, duration=duration, rate=rate)
class DoorFSM(FSM.FSM):
def __init__(self, name, animate, callback, openedHpr, closedHpr, uniqueName):
FSM.FSM.__init__(self, name)
self.animate = animate
self.callback = callback
self.openedHpr = openedHpr
self.closedHpr = closedHpr
self.uniqueName = uniqueName
self.ival = 0
self.openSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_door_open.ogg')
self.closeSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_door_close.ogg')
self.request('Closed')
def filterOpening(self, request, args):
if request == 'close':
return 'Closing'
return self.defaultFilter(request, args)
def enterOpening(self):
intervalName = self.uniqueName('open-%s' % self.animate.getName())
self.callback(0)
ival = Parallel(SoundInterval(self.openSfx, node=self.animate, volume=0.2), self.animate.hprInterval(1, self.openedHpr, blendType='easeInOut'), Sequence(Wait(0.2), Func(self.callback, 1)), name=intervalName)
ival.start()
self.ival = ival
def exitOpening(self):
self.ival.pause()
self.ival = None
return
def filterOpened(self, request, args):
if request == 'close':
return 'Closing'
return self.defaultFilter(request, args)
def enterOpened(self):
self.animate.setHpr(self.openedHpr)
self.callback(1)
def filterClosing(self, request, args):
if request == 'open':
return 'Opening'
return self.defaultFilter(request, args)
def enterClosing(self):
intervalName = self.uniqueName('close-%s' % self.animate.getName())
self.callback(1)
ival = Parallel(SoundInterval(self.closeSfx, node=self.animate, volume=0.2), self.animate.hprInterval(1, self.closedHpr, blendType='easeInOut'), Sequence(Wait(0.8), Func(self.callback, 0)), name=intervalName)
ival.start()
self.ival = ival
def exitClosing(self):
self.ival.pause()
self.ival = None
return
def filterClosed(self, request, args):
if request == 'open':
return 'Opening'
return self.defaultFilter(request, args)
def enterClosed(self):
self.animate.setHpr(self.closedHpr)
self.callback(0)
def __setupDoor(self, jointName, name, callback, openedHpr, closedHpr, cPoly):
joint = self.find(jointName)
children = joint.getChildren()
animate = joint.attachNewNode(name)
children.reparentTo(animate)
cnode = CollisionNode('BossZap')
cnode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask | ToontownGlobals.CameraBitmask)
cnode.addSolid(cPoly)
animate.attachNewNode(cnode)
fsm = self.DoorFSM(name, animate, callback, openedHpr, closedHpr, self.uniqueName)
return fsm
def doAnimate(self, anim = None, now = 0, queueNeutral = 1, raised = None, forward = None, happy = None):
if now:
self.stopAnimate()
if not self.twoFaced:
happy = 1
if raised == None:
raised = self.raised
if forward == None:
forward = self.forward
if happy == None:
happy = self.happy
if now:
self.raised = raised
self.forward = forward
self.happy = happy
if self.currentAnimIval == None:
self.accept(self.animDoneEvent, self.__getNextAnim)
else:
queueNeutral = 0
ival, changed = self.__getAnimIval(anim, raised, forward, happy)
if changed or queueNeutral:
self.queuedAnimIvals.append((ival,
self.raised,
self.forward,
self.happy))
if self.currentAnimIval == None:
self.__getNextAnim()
return
def stopAnimate(self):
self.ignore(self.animDoneEvent)
self.queuedAnimIvals = []
if self.currentAnimIval:
self.currentAnimIval.setDoneEvent('')
self.currentAnimIval.finish()
self.currentAnimIval = None
self.raised = self.nowRaised
self.forward = self.nowForward
self.happy = self.nowHappy
return
def __getNextAnim(self):
if self.queuedAnimIvals:
ival, raised, forward, happy = self.queuedAnimIvals[0]
del self.queuedAnimIvals[0]
else:
ival, changed = self.__getAnimIval(None, self.raised, self.forward, self.happy)
raised = self.raised
forward = self.forward
happy = self.happy
if self.currentAnimIval:
self.currentAnimIval.setDoneEvent('')
self.currentAnimIval.finish()
self.currentAnimIval = ival
self.currentAnimIval.start()
self.nowRaised = raised
self.nowForward = forward
self.nowHappy = happy
return
def __getAnimIval(self, anim, raised, forward, happy):
ival, changed = self.__doGetAnimIval(anim, raised, forward, happy)
seq = Sequence(ival, name=self.animIvalName)
seq.setDoneEvent(self.animDoneEvent)
return (seq, changed)
def __doGetAnimIval(self, anim, raised, forward, happy):
if raised == self.raised and forward == self.forward and happy == self.happy:
return (self.getAnim(anim), anim != None)
startsHappy = self.happy
endsHappy = self.happy
ival = Sequence()
if raised and not self.raised:
upIval = self.getAngryActorInterval('Fb_down2Up')
if self.forward:
ival = upIval
else:
ival = Sequence(Func(self.reverseBody), upIval, Func(self.forwardBody))
ival = Parallel(SoundInterval(self.upSfx, node=self), ival)
if forward != self.forward:
if forward:
animName = 'Bb2Ff_spin'
else:
animName = 'Ff2Bb_spin'
ival = Sequence(ival, ActorInterval(self, animName))
startsHappy = 1
endsHappy = 1
startNeckHpr = self.neckForwardHpr
endNeckHpr = self.neckForwardHpr
if self.happy != startsHappy:
startNeckHpr = self.neckReversedHpr
if happy != endsHappy:
endNeckHpr = self.neckReversedHpr
if startNeckHpr != endNeckHpr:
ival = Sequence(Func(self.neck.setHpr, startNeckHpr), ParallelEndTogether(ival, Sequence(self.neck.hprInterval(0.5, endNeckHpr, startHpr=startNeckHpr, blendType='easeInOut'), Func(self.neck.setHpr, self.neckForwardHpr))))
elif endNeckHpr != self.neckForwardHpr:
ival = Sequence(Func(self.neck.setHpr, startNeckHpr), ival, Func(self.neck.setHpr, self.neckForwardHpr))
if not raised and self.raised:
downIval = self.getAngryActorInterval('Fb_down2Up', playRate=-1)
if forward:
ival = Sequence(ival, downIval)
else:
ival = Sequence(ival, Func(self.reverseBody), downIval, Func(self.forwardBody))
ival = Parallel(SoundInterval(self.downSfx, node=self), ival)
self.raised = raised
self.forward = forward
self.happy = happy
if anim != None:
ival = Sequence(ival, self.getAnim(anim))
return (ival, 1)
def setDizzy(self, dizzy):
if dizzy and not self.dizzy:
base.playSfx(self.dizzyAlert)
self.dizzy = dizzy
if dizzy:
self.stars.reparentTo(self.neck)
base.playSfx(self.birdsSfx, looping=1)
else:
self.stars.detachNode()
self.birdsSfx.stop()
def getAngryActorInterval(self, animName, **kw):
if self.happy:
ival = Sequence(Func(self.reverseHead), ActorInterval(self, animName, **kw), Func(self.forwardHead))
else:
ival = ActorInterval(self, animName, **kw)
return ival
def getAnim(self, anim):
ival = None
if anim == None:
partName = None
if self.happy:
animName = 'Ff_neutral'
else:
animName = 'Fb_neutral'
if self.raised:
ival = ActorInterval(self, animName)
else:
ival = Parallel(ActorInterval(self, animName, partName=['torso', 'head']), ActorInterval(self, 'Fb_downNeutral', partName='legs'))
if not self.forward:
ival = Sequence(Func(self.reverseBody), ival, Func(self.forwardBody))
elif anim == 'down2Up':
ival = Parallel(SoundInterval(self.upSfx, node=self), self.getAngryActorInterval('Fb_down2Up'))
self.raised = 1
elif anim == 'up2Down':
ival = Parallel(SoundInterval(self.downSfx, node=self), self.getAngryActorInterval('Fb_down2Up', playRate=-1))
self.raised = 0
elif anim == 'throw':
self.doAnimate(None, raised=1, happy=0, queueNeutral=0)
ival = Parallel(Sequence(SoundInterval(self.throwSfx, node=self), duration=0), self.getAngryActorInterval('Fb_UpThrow'))
elif anim == 'hit':
if self.raised:
self.raised = 0
ival = self.getAngryActorInterval('Fb_firstHit')
else:
ival = self.getAngryActorInterval('Fb_downHit')
ival = Parallel(SoundInterval(self.reelSfx, node=self), ival)
elif anim == 'ltSwing' or anim == 'rtSwing':
self.doAnimate(None, raised=0, happy=0, queueNeutral=0)
if anim == 'ltSwing':
ival = Sequence(Track((0, self.getAngryActorInterval('Fb_downLtSwing')), (0.9, SoundInterval(self.swingSfx, node=self)), (1, Func(self.bubbleL.unstash))), Func(self.bubbleL.stash))
else:
ival = Sequence(Track((0, self.getAngryActorInterval('Fb_downRtSwing')), (0.9, SoundInterval(self.swingSfx, node=self)), (1, Func(self.bubbleR.unstash))), Func(self.bubbleR.stash))
elif anim == 'frontAttack':
self.doAnimate(None, raised=1, happy=0, queueNeutral=0)
pe = BattleParticles.loadParticleFile('bossCogFrontAttack.ptf')
ival = Sequence(Func(self.reverseHead), ActorInterval(self, 'Bb2Ff_spin'), Func(self.forwardHead))
if self.forward:
ival = Sequence(Func(self.reverseBody), ParallelEndTogether(ival, self.pelvis.hprInterval(0.5, self.pelvisForwardHpr, blendType='easeInOut')))
ival = Sequence(Track((0, ival), (0, SoundInterval(self.spinSfx, node=self)), (0.9, Parallel(SoundInterval(self.rainGearsSfx, node=self), ParticleInterval(pe, self.frontAttack, worldRelative=0, duration=1.5, cleanup=True), duration=0)), (1.9, Func(self.bubbleF.unstash))), Func(self.bubbleF.stash))
self.forward = 1
self.happy = 0
self.raised = 1
elif anim == 'areaAttack':
if self.twoFaced:
self.doAnimate(None, raised=1, happy=0, queueNeutral=0)
else:
self.doAnimate(None, raised=1, happy=1, queueNeutral=1)
ival = Parallel(ActorInterval(self, 'Fb_jump'), Sequence(SoundInterval(self.swishSfx, duration=1.1, node=self), SoundInterval(self.boomSfx, duration=1.9)), Sequence(Wait(1.21), Func(self.announceAreaAttack)))
if self.twoFaced:
self.happy = 0
else:
self.happy = 1
self.raised = 1
elif anim == 'Fb_fall':
ival = Parallel(ActorInterval(self, 'Fb_fall'), Sequence(SoundInterval(self.reelSfx, node=self), SoundInterval(self.deathSfx)))
elif isinstance(anim, types.StringType):
ival = ActorInterval(self, anim)
else:
ival = anim
return ival
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Train and eval DrQ (SAC) and PSEs + DrQ (SAC)."""
import os
from absl import logging
import gin
import tensorflow.compat.v2 as tf
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.train import actor
from tf_agents.train import learner
from tf_agents.train import triggers
from tf_agents.train.utils import replay_buffer_utils
from tf_agents.train.utils import spec_utils
from tf_agents.train.utils import train_utils
from tf_agents.utils import common
from pse.dm_control.agents import pse_drq_agent
from pse.dm_control.utils import dataset_utils
from pse.dm_control.utils import env_utils
from pse.dm_control.utils import model_utils
from pse.dm_control.utils import networks
def img_summary(experience, summary_writer, train_step):
"""Generates image summaries for the augmented images."""
obs = experience['experience'].observation['pixels']
if experience['augmented_obs']:
aug_obs = experience['augmented_obs'][0]['pixels']
aug_next_obs = experience['augmented_next_obs'][0]['pixels']
images = tf.stack([
obs[0, :, :, 0:3],
aug_obs[:, :, 0:3],
aug_next_obs[:, :, 0:3],
], axis=0)
else:
images = tf.expand_dims(obs[0, Ellipsis, 0:3], axis=0)
with summary_writer.as_default(), \
common.soft_device_placement(), \
tf.compat.v2.summary.record_if(True):
tf.summary.image('Sample crops', images, max_outputs=10, step=train_step)
def contrastive_img_summary(episode_tuple, agent, summary_writer, train_step):
"""Generates image summaries for the augmented images."""
_, sim_matrix = agent.contrastive_metric_loss(
episode_tuple, return_representation=True)
sim_matrix = tf.expand_dims(tf.expand_dims(sim_matrix, axis=0), axis=-1)
with summary_writer.as_default(), \
common.soft_device_placement(), \
tf.compat.v2.summary.record_if(True):
tf.summary.image('Sim matrix', sim_matrix, step=train_step)
@gin.configurable(module='drq_agent')
def train_eval(
root_dir,
# Dataset params
env_name,
data_dir=None,
load_pretrained=False,
pretrained_model_dir=None,
img_pad=4,
frame_shape=(84, 84, 3),
frame_stack=3,
num_augmentations=2, # K and M in DrQ
# Training params
contrastive_loss_weight=1.0,
contrastive_loss_temperature=0.5,
image_encoder_representation=True,
initial_collect_steps=1000,
num_train_steps=3000000,
actor_fc_layers=(1024, 1024),
critic_joint_fc_layers=(1024, 1024),
# Agent params
batch_size=256,
actor_learning_rate=1e-3,
critic_learning_rate=1e-3,
alpha_learning_rate=1e-3,
encoder_learning_rate=1e-3,
actor_update_freq=2,
gamma=0.99,
target_update_tau=0.01,
target_update_period=2,
reward_scale_factor=1.0,
# Replay params
reverb_port=None,
replay_capacity=100000,
# Others
checkpoint_interval=10000,
policy_save_interval=5000,
eval_interval=10000,
summary_interval=250,
debug_summaries=False,
eval_episodes_per_run=10,
summarize_grads_and_vars=False):
"""Trains and evaluates SAC."""
collect_env = env_utils.load_dm_env_for_training(
env_name,
frame_shape,
frame_stack=frame_stack)
eval_env = env_utils.load_dm_env_for_eval(
env_name,
frame_shape,
frame_stack=frame_stack)
logging.info('Data directory: %s', data_dir)
logging.info('Num train steps: %d', num_train_steps)
logging.info('Contrastive loss coeff: %.2f', contrastive_loss_weight)
logging.info(
'Contrastive loss temperature: %.4f', contrastive_loss_temperature)
logging.info('load_pretrained: %s', 'yes' if load_pretrained else 'no')
logging.info('encoder representation: %s',
'yes' if image_encoder_representation else 'no')
load_episode_data = (contrastive_loss_weight > 0)
observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(collect_env))
train_step = train_utils.create_train_step()
image_encoder = networks.ImageEncoder(observation_tensor_spec)
actor_net = model_utils.Actor(
observation_tensor_spec,
action_tensor_spec,
image_encoder=image_encoder,
fc_layers=actor_fc_layers,
image_encoder_representation=image_encoder_representation)
critic_net = networks.Critic((observation_tensor_spec, action_tensor_spec),
image_encoder=image_encoder,
joint_fc_layers=critic_joint_fc_layers)
critic_net_2 = networks.Critic((observation_tensor_spec, action_tensor_spec),
image_encoder=image_encoder,
joint_fc_layers=critic_joint_fc_layers)
target_image_encoder = networks.ImageEncoder(observation_tensor_spec)
target_critic_net_1 = networks.Critic(
(observation_tensor_spec, action_tensor_spec),
image_encoder=target_image_encoder)
target_critic_net_2 = networks.Critic(
(observation_tensor_spec, action_tensor_spec),
image_encoder=target_image_encoder)
agent = pse_drq_agent.DrQSacModifiedAgent(
time_step_tensor_spec,
action_tensor_spec,
actor_network=actor_net,
critic_network=critic_net,
critic_network_2=critic_net_2,
target_critic_network=target_critic_net_1,
target_critic_network_2=target_critic_net_2,
actor_update_frequency=actor_update_freq,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
contrastive_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=encoder_learning_rate),
contrastive_loss_weight=contrastive_loss_weight,
contrastive_loss_temperature=contrastive_loss_temperature,
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=tf.math.squared_difference,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
use_log_alpha_in_alpha_loss=False,
gradient_clipping=None,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step,
num_augmentations=num_augmentations)
agent.initialize()
# Setup the replay buffer.
reverb_replay, rb_observer = (
replay_buffer_utils.get_reverb_buffer_and_observer(
agent.collect_data_spec,
sequence_length=2,
replay_capacity=replay_capacity,
port=reverb_port))
# pylint: disable=g-long-lambda
if num_augmentations == 0:
image_aug = lambda traj, meta: (dict(
experience=traj, augmented_obs=[], augmented_next_obs=[]), meta)
else:
image_aug = lambda traj, meta: pse_drq_agent.image_aug(
traj, meta, img_pad, num_augmentations)
augmented_dataset = reverb_replay.as_dataset(
sample_batch_size=batch_size, num_steps=2).unbatch().map(
image_aug, num_parallel_calls=3)
augmented_iterator = iter(augmented_dataset)
trajs = augmented_dataset.batch(batch_size).prefetch(50)
if load_episode_data:
# Load full episodes and zip them
episodes = dataset_utils.load_episodes(
os.path.join(data_dir, 'episodes2'), img_pad)
episode_iterator = iter(episodes)
dataset = tf.data.Dataset.zip((trajs, episodes)).prefetch(10)
else:
dataset = trajs
experience_dataset_fn = lambda: dataset
saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)
learning_triggers = [
triggers.PolicySavedModelTrigger(
saved_model_dir, agent, train_step, interval=policy_save_interval),
triggers.StepPerSecondLogTrigger(train_step, interval=summary_interval),
]
agent_learner = model_utils.Learner(
root_dir,
train_step,
agent,
experience_dataset_fn=experience_dataset_fn,
triggers=learning_triggers,
checkpoint_interval=checkpoint_interval,
summary_interval=summary_interval,
load_episode_data=load_episode_data,
use_kwargs_in_agent_train=True,
# Turn off the initialization of the optimizer variables since, the agent
# expects different batching for the `training_data_spec` and
# `train_argspec` which can't be handled in general by the initialization
# logic in the learner.
run_optimizer_variable_init=False)
# If we haven't trained yet make sure we collect some random samples first to
# fill up the Replay Buffer with some experience.
train_dir = os.path.join(root_dir, learner.TRAIN_DIR)
# Code for loading pretrained policy.
if load_pretrained:
# Note that num_train_steps is same as the max_train_step we want to
# load the pretrained policy for our experiments
pretrained_policy = model_utils.load_pretrained_policy(
pretrained_model_dir, num_train_steps)
initial_collect_policy = pretrained_policy
agent.policy.update_partial(pretrained_policy)
agent.collect_policy.update_partial(pretrained_policy)
logging.info('Restored pretrained policy.')
else:
initial_collect_policy = random_py_policy.RandomPyPolicy(
collect_env.time_step_spec(), collect_env.action_spec())
initial_collect_actor = actor.Actor(
collect_env,
initial_collect_policy,
train_step,
steps_per_run=initial_collect_steps,
observers=[rb_observer])
logging.info('Doing initial collect.')
initial_collect_actor.run()
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
collect_actor = actor.Actor(
collect_env,
collect_policy,
train_step,
steps_per_run=1,
observers=[rb_observer],
metrics=actor.collect_metrics(buffer_size=10),
summary_dir=train_dir,
summary_interval=summary_interval,
name='CollectActor')
# If restarting with train_step > 0, the replay buffer will be empty
# except for random experience. Populate the buffer with some on-policy
# experience.
if load_pretrained or (agent_learner.train_step_numpy > 0):
for _ in range(batch_size * 50):
collect_actor.run()
tf_greedy_policy = agent.policy
greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_greedy_policy, use_tf_function=True)
eval_actor = actor.Actor(
eval_env,
greedy_policy,
train_step,
episodes_per_run=eval_episodes_per_run,
metrics=actor.eval_metrics(buffer_size=10),
summary_dir=os.path.join(root_dir, 'eval'),
summary_interval=-1,
name='EvalTrainActor')
if eval_interval:
logging.info('Evaluating.')
img_summary(
next(augmented_iterator)[0], eval_actor.summary_writer, train_step)
if load_episode_data:
contrastive_img_summary(
next(episode_iterator), agent, eval_actor.summary_writer, train_step)
eval_actor.run_and_log()
logging.info('Saving operative gin config file.')
gin_path = os.path.join(train_dir, 'train_operative_gin_config.txt')
with tf.io.gfile.GFile(gin_path, mode='w') as f:
f.write(gin.operative_config_str())
logging.info('Training Staring at: %r', train_step.numpy())
while train_step < num_train_steps:
collect_actor.run()
agent_learner.run(iterations=1)
if (not eval_interval) and (train_step % 10000 == 0):
img_summary(
next(augmented_iterator)[0], agent_learner.train_summary_writer,
train_step)
if eval_interval and agent_learner.train_step_numpy % eval_interval == 0:
logging.info('Evaluating.')
img_summary(
next(augmented_iterator)[0], eval_actor.summary_writer, train_step)
if load_episode_data:
contrastive_img_summary(next(episode_iterator), agent,
eval_actor.summary_writer, train_step)
eval_actor.run_and_log()
|
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Groups scattered swig generated classes under a single class.
"""
import os
import re
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string("csharp_dir", None,
"Path with generated csharp proxy classes to be processed.")
NAMESPACE_RE = re.compile(r"^\s*namespace\s+(.*)\s+{\s*$", re.MULTILINE)
class NamespaceException(Exception):
pass
class TargetMismatchException(Exception):
pass
class TargetMissingException(Exception):
pass
def reparent_files(target_class_filename, extra_files_list):
"""Moves the implemenation of C# classes, structs, and enums.
SWIG is hard-coded to extract all classes in the c++ headers to separate
files, and instead we want those classes to be nested under the created
target class which contains otherwise global functions and members.
This provides better isolated scope that's more natural for C# APIs in many
cases.
Here we'll parse all of the C# ".cs" files produced from the swig generation,
and strip off the surrounding namespace:
namespace Firebase {
// Capture from here...
public class Goodies {
}
// To here.
}
If there is no outter namespace, we'll assume the whole file contains the
class.
Then, we'll put all of these classes at the end of the module class which swig
generates to contain any global scope functions and variables (or the target
class if the module is intended to be empty).
Depending on whether or not we expect a namespace, we can simply count
trailing }'s to find out where to insert everything, because C# files should
always just contain one top level class.
Args:
target_class_filename: This is the name of the target file to be modified.
extra_files_list: This is a list of filenames that should be absorbed
into the target_class_filename.
Raises:
NamespaceException: This is raised if there are namespaces in any of the
files that are inconsistent with each other.
"""
target_extras = ""
common_namespace = ""
for cs_filename in extra_files_list:
with open(cs_filename, "r") as cs_file:
file_buffer = cs_file.read()
# cut off the surrounding brackets from the namespace, if there is one.
ns_start = file_buffer.find("{")
match = NAMESPACE_RE.search(file_buffer[:ns_start+1])
if match:
namespace_name = match.groups()[0]
common_namespace = common_namespace or namespace_name
if common_namespace != namespace_name:
raise NamespaceException(
"Inconsistent namespace in file %s vs %s. Expected %s found %s" %
(cs_filename, str(extra_files_list), common_namespace,
namespace_name))
file_buffer = file_buffer[ns_start+1:file_buffer.rfind("}")]
# Split into lines to indent everything one level.
file_buffer = "\n".join([" " + line for line in file_buffer.splitlines()])
target_extras += file_buffer + "\n"
with open(target_class_filename, "r") as target_class_file:
target_class = target_class_file.read()
match = NAMESPACE_RE.search(target_class[:target_class.find("{")+1])
if match:
namespace_name = match.groups()[0]
common_namespace = common_namespace or namespace_name
if common_namespace != namespace_name:
raise NamespaceException(
"Inconsistent namespace in file %s vs %s. Expected %s found %s" %
(target_class_filename, str(extra_files_list), common_namespace,
namespace_name))
end_namespace_pos = target_class.rfind("}")
else:
end_namespace_pos = len(target_class)
end_class_pos = target_class.rfind("}", 0, end_namespace_pos)
target_class = (target_class[:end_class_pos] +
target_extras +
target_class[end_class_pos:])
with open(target_class_filename, "w") as target_class_file:
target_class_file.write(target_class)
# We'll cleanup here, in a second pass, so that there's less risk of losing
# content if things break.
for cs_filename in extra_files_list:
os.remove(cs_filename)
def main(unused_argv):
"""Moves swig generated classes to a common container class.
The .csproj filename is used to find the .cs file (with the same name) which
will contain all of the other SWIG generated classes.
Args:
unused_argv: Extra arguments not consumed by the config flags.
Returns:
The exit code status; 1 for error, 0 for success.
Raises:
TargetMismatchException: This is raised if the .csproj name does not match
the basename of a .cs file in the folder. For example, App.csproj must have
an App.cs file present.
TargetMissingException: This is raised if files are missing that are used
for determining the target container class.
"""
cs_dir = FLAGS.csharp_dir
# Get all of the files in the proxy dir.
files = [f for f in os.listdir(cs_dir)
if not os.path.isdir(os.path.join(cs_dir, f))]
# Find the name of the target file by finding the .csproj file.
# Find the name of the moudle file by looking for the PINVOKE file.
module_name = ""
target_name = ""
for f in files:
filename, extension = os.path.splitext(f)
if extension == ".csproj":
target_name = filename
if filename.endswith("PINVOKE"):
module_name = filename[:-7]
if not target_name:
raise TargetMissingException(
"No \".csproj\" file found in the csharp_dir.")
if not module_name:
raise TargetMissingException(
"No \"*PINVOKE.cs\" file found in the csharp_dir.")
# Now remove the target name related files, and what's left should all be
# classes, enums, and structs stripped out of the C++ API, we want to fix.
if (target_name + ".cs") not in files:
raise TargetMismatchException(
("%s.cs does not exist.\n"
"Make sure that the -n argument of build_plugin.sh (currently:%s) "
"matches either the %%module property in your SWIG file or a class "
"name you're exporting and want to be the primary interface." %
(target_name, target_name)))
files.remove(target_name + ".csproj")
files.remove(target_name + ".cs")
files.remove(module_name + "PINVOKE.cs")
files.remove("AssemblyInfo.cs")
logging.info(("The contents of the following files %s are being moved to "
"%s.cs."), str(files), target_name)
# Make the list into full paths.
paths = [os.path.join(FLAGS.csharp_dir, f) for f in files]
if paths:
reparent_files(os.path.join(FLAGS.csharp_dir, target_name + ".cs"),
paths)
with open(os.path.join(FLAGS.csharp_dir, target_name + ".csproj"),
"r+") as csproj:
csproj_lines = csproj.readlines()
csproj.seek(0)
for line in csproj_lines:
if not any([i in line for i in files]):
csproj.write(line)
csproj.truncate()
# For the files moved, update the PInvoke file so that references to the
# classes as parameters include the target class with the path.
classes = []
for f in files:
base, ext = os.path.splitext(f)
if ext == ".cs":
classes.append(base)
class_re = re.compile(r"((%s)( |\.\S* )[a-zA-Z_][a-zA-Z_0-9]*)" %
"|".join(classes))
replacement = target_name + r".\1"
with open(os.path.join(FLAGS.csharp_dir, module_name +"PINVOKE.cs"),
"r+") as pinvoke:
pinvoke_lines = pinvoke.readlines()
pinvoke.seek(0)
for line in pinvoke_lines:
line = class_re.sub(replacement, line)
pinvoke.write(line)
pinvoke.truncate()
return 0
if __name__ == "__main__":
flags.mark_flag_as_required("csharp_dir")
app.run(main)
|
|
"""Module for generating wepy systems"""
from copy import copy, deepcopy
import numpy as np
import simtk.openmm.app as omma
import simtk.openmm as omm
import simtk.unit as unit
import mdtraj as mdj
from wepy.util.mdtraj import mdtraj_to_json_topology
from wepy.util.json_top import (json_top_residue_fields,
json_top_residue_df,
json_top_atom_df,
json_top_subset)
# OpenMM helpers
from wepy.runners.openmm import OpenMMRunner, OpenMMState
from wepy.walker import Walker
from wepy.runners.openmm import gen_walker_state, UNIT_NAMES, GET_STATE_KWARG_DEFAULTS
from wepy.orchestration.snapshot import WepySimApparatus, SimSnapshot
from wepy.orchestration.configuration import Configuration
from wepy.orchestration.orchestrator import Orchestrator
# resamplers
from wepy.resampling.resamplers.resampler import NoResampler
from wepy.resampling.resamplers.wexplore import WExploreResampler
from wepy.resampling.resamplers.revo import REVOResampler
# boundary conditions
from wepy.boundary_conditions.boundary import NoBC, RandomBC
# integrators
from simtk.openmm import LangevinIntegrator
# mappers
from wepy.work_mapper.mapper import Mapper
from wepy.work_mapper.worker import WorkerMapper, Worker
from wepy.work_mapper.task_mapper import TaskMapper, WalkerTaskProcess
from wepy.runners.openmm import (
OpenMMCPUWorker, OpenMMGPUWorker,
OpenMMCPUWalkerTaskProcess, OpenMMGPUWalkerTaskProcess,
)
# reporters
from wepy.reporter.hdf5 import WepyHDF5Reporter
from wepy.reporter.dashboard import (
DashboardReporter,
ResamplerDashboardSection,
RunnerDashboardSection,
BCDashboardSection)
from wepy.reporter.wexplore.dashboard import WExploreDashboardSection
from wepy.reporter.revo.dashboard import REVODashboardSection
from wepy.reporter.openmm import OpenMMRunnerDashboardSection
from wepy.reporter.restree import ResTreeReporter
from wepy.reporter.walker import WalkerReporter
# extras for reporters
from wepy.runners.openmm import UNIT_NAMES
# workers
class OpenMMSimMaker():
### Default settings
## Resamplers
RESAMPLERS = [NoResampler, WExploreResampler, REVOResampler,]
WEXPLORE_DEFAULTS = {
'pmax' : 0.5,
'pmin' : 1e-12,
'max_n_regions' : (10, 10, 10, 10),
# ALERT: shouldn't be automatically setting these distances
# 'max_region_sizes' : (1, 0.5, 0.35, 0.25),
}
REVO_DEFAULTS = {
'pmax' : 0.5,
'pmin' : 1e-12,
'char_dist' : 1,
# ALERT: shouldn't be automatically setting these distances
# 'merge_dist' : 2.5,
'dist_exponent' : 4,
'weights' : True,
}
DEFAULT_RESAMPLER_PARAMS = {
'NoResampler' : {},
'WExploreResampler' : WEXPLORE_DEFAULTS,
'REVOResampler' : REVO_DEFAULTS,
}
## BCs
BCS = [NoBC, RandomBC,]
DEFAULT_BC_PARAMS = {
'NoBC' : {},
'RandomBC' : {},
}
## Integrators
INTEGRATORS = [LangevinIntegrator,]
#(TEMPERATURE, FRICTION_COEFFICIENT, STEP_SIZE)
LANGEVIN_DEFAULTS = (
300.0*unit.kelvin,
1/unit.picosecond,
0.002*unit.picoseconds,
)
DEFAULT_INTEGRATOR_PARAMS = {
'LangevinIntegrator' : LANGEVIN_DEFAULTS,
}
# these are used just to generate states
INTEGRATOR_FIXTURE = omm.LangevinIntegrator
INTEGRATOR_FIXTURE_PARAMS = DEFAULT_INTEGRATOR_PARAMS['LangevinIntegrator']
## OpenMM platforms
DEFAULT_PLATFORM_PARAMS = {
'Reference' : {},
'CPU' : {},
'OpenCL' : {},
'CUDA' : {},
}
## other runner options
GET_STATE_KWARGS = {}
## Work Mappers
MAPPERS = [Mapper, WorkerMapper, TaskMapper]
DEFAULT_MAPPER_PARAMS = {
'Mapper' : {},
'WorkerMapper' : {},
'TaskMapper' : {},
}
## Reporters
REPORTERS = [
WepyHDF5Reporter,
DashboardReporter,
ResTreeReporter,
WalkerReporter,
]
WEPY_HDF5_REPORTER_DEFAULTS = {
'main_rep_idxs' : None,
'save_fields' : None,
'units' : dict(UNIT_NAMES),
'sparse_fields' : {'velocities' : 10},
'all_atoms_rep_freqs' : 10,
'alt_reps' : None,
'swmr_mode' : True,
}
RESTREE_REPORTER_DEFAULTS = {
'node_radius' : 3.0,
'row_spacing' : 5.0,
'step_spacing' : 20.0,
'colormap_name' : 'plasma',
}
DEFAULT_REPORTER_PARAMS = {
'WepyHDF5Reporter' : WEPY_HDF5_REPORTER_DEFAULTS,
'DashboardReporter' : {},
'ResTreeReporter' : RESTREE_REPORTER_DEFAULTS,
'WalkerReporter' : {},
}
def __init__(self,
distance=None,
init_state=None,
system=None,
topology=None,
):
self.distance = distance
self.init_state = init_state
self.system = system
self.topology = topology
self.getState_kwargs = dict(GET_STATE_KWARG_DEFAULTS)
if self.GET_STATE_KWARGS is not None:
self.getState_kwargs.update(self.GET_STATE_KWARGS)
def make_state(self, system, positions):
# a temporary integrator just for this
integrator = self.INTEGRATOR_FIXTURE(*self.INTEGRATOR_FIXTURE_PARAMS)
init_state = gen_walker_state(positions, system, integrator,
getState_kwargs=self.getState_kwargs)
return init_state
@classmethod
def make_initial_walkers(cls, state, n_walkers):
init_weight = 1.0 / n_walkers
init_walkers = [Walker(deepcopy(state), init_weight) for i in range(n_walkers)]
return init_walkers
def make_apparatus(self,
platform='Reference',
platform_params=None,
runner_params=None,
integrator='LangevinIntegrator',
integrator_params=None,
resampler='WExploreResampler',
resampler_params=None,
bc=None,
bc_params=None,
):
## RUNNER
# choose which integrator to use
integrator_class = [i for i in self.INTEGRATORS
if i.__name__ == integrator][0]
integrator_name = integrator_class.__name__
# use either the default params or the user params
if integrator_params is None:
integrator_params = self.DEFAULT_INTEGRATOR_PARAMS[integrator_name]
integrator = integrator_class(*integrator_params)
# TODO: not handling the params here
if platform_params is None:
platform_params = self.DEFAULT_PLATFORM_PARAMS[platform]
# handle additional runner options
if runner_params is None:
runner_params = {}
# make the runner for the test system
runner = OpenMMRunner(self.system,
self.topology,
integrator,
platform=platform,
**runner_params)
# RESAMPLER
# choose which resampler to use
resampler_class = [res for res in self.RESAMPLERS
if res.__name__ == resampler][0]
resampler_name = resampler_class.__name__
# use either the default params or the user params
if resampler_params is None:
resampler_params = self.DEFAULT_RESAMPLER_PARAMS[resampler_name]
resampler = resampler_class(distance=self.distance,
init_state=self.init_state,
**resampler_params)
# BOUNDARY_CONDITIONS
# you don't have to specify a boundary condition
bc_name = bc
if bc_name is not None:
# choose which bc to use
bc_class = [res for res in self.BCS
if res.__name__ == bc_name][0]
bc_name = bc_class.__name__
# use either the default params or the user params
if bc_params is None:
bc_params = self.DEFAULT_BC_PARAMS[bc_name]
bc = self.make_bc(bc_class, bc_params)
# APPARATUS
# build the apparatus
sim_apparatus = WepySimApparatus(runner, resampler=resampler,
boundary_conditions=bc)
return sim_apparatus
def make_bc(self, bc_class, bc_params):
bc = bc_class(**bc_params)
return bc
@classmethod
def choose_work_mapper_platform_params(cls, platform, mapper_name):
work_mapper_params = {}
if mapper_name == 'WorkerMapper':
if platform == 'Reference':
worker_type = Worker
elif platform == 'CPU':
worker_type = OpenMMCPUWorker
elif platform in ('CUDA', 'OpenCL',):
worker_type = OpenMMGPUWorker
else:
worker_type = Worker
work_mapper_params['worker_type'] = worker_type
elif mapper_name == 'TaskMapper':
if platform == 'Reference':
worker_type = Worker
elif platform == 'CPU':
worker_type = OpenMMCPUWalkerTaskProcess
elif platform in ('CUDA', 'OpenCL',):
worker_type = OpenMMGPUWalkerTaskProcess
else:
worker_type = Worker
work_mapper_params['walker_task_type'] = worker_type
return work_mapper_params
def choose_dashboard_sections(self, apparatus):
# defaults for the dashboard sections
if apparatus.boundary_conditions is not None:
dashboard_sections = {'resampler' : ResamplerDashboardSection(apparatus.resampler),
'runner' : RunnerDashboardSection(apparatus.runner),
'bc' : BCDashboardSection(apparatus.boundary_conditions),
}
else:
dashboard_sections = {'resampler' : ResamplerDashboardSection(apparatus.resampler),
'runner' : RunnerDashboardSection(apparatus.runner),
'bc' : None,
}
# the choices here should be fairly portable between different
# systems as they are not specific to any system details
## resampler
# WExplore
if type(apparatus.resampler).__name__ == 'WExploreResampler':
dashboard_sections['resampler'] = WExploreDashboardSection(apparatus.resampler)
# REVO
elif type(apparatus.resampler).__name__ == 'REVOResampler':
dashboard_sections['resampler'] = REVODashboardSection(apparatus.resampler)
## BC
# NoBC
if type(apparatus.boundary_conditions).__name__ == 'NoBC':
dashboard_sections['bc'] = BCDashboardSection(apparatus.boundary_conditions)
# Random
elif type(apparatus.boundary_conditions).__name__ == 'RandomBC':
dashboard_sections['bc'] = BCDashboardSection(apparatus.boundary_conditions)
## Runner
# OpenMM
if type(apparatus.runner).__name__ == 'OpenMMRunner':
dashboard_sections['runner'] = OpenMMRunnerDashboardSection(
apparatus.runner)
return dashboard_sections
def resolve_reporter_params(self, apparatus, reporter_specs, reporters_kwargs=None):
if reporters_kwargs is not None:
raise NotImplementedError("Only the defaults are supported currently")
# ellipsis means use all of the defaults
if reporter_specs is Ellipsis:
# defaults to use
reporter_specs = [
'WepyHDF5Reporter',
'DashboardReporter',
# DEBUG: this isn't compatible right now, needs refactoring
#'ResTreeReporter',
'WalkerReporter',
]
# if it is None, we use no reporters
elif reporter_specs is None:
return [], []
# augment the dashboard with the sections relevant to our
# components
if 'DashboardReporter' in reporter_specs:
# customize the dashboard based on what components are in
# the apparatus or what a subclass has customized this for
dashboard_sections = self.choose_dashboard_sections(apparatus)
# get the actual classes
reporter_classes = []
for reporter_spec in reporter_specs:
match = False
for reporter_class in self.REPORTERS:
if reporter_class.__name__ == reporter_spec:
match = reporter_class
break
if not match:
raise ValueError("Unkown reporter for spec {}".format(reporter_spec))
reporter_classes.append(match)
# then get the default params for them and commensurate them
# with the given kwargs
reporters_params = []
for reporter_spec in reporter_specs:
reporter_params = {}
if reporter_spec == 'WepyHDF5Reporter':
# always set these ones automatically
auto_params = {
'topology' : self.json_top(),
'resampler' : apparatus.resampler,
'boundary_conditions' : apparatus.boundary_conditions,
}
reporter_params.update(auto_params)
reporter_params.update(deepcopy(self.DEFAULT_REPORTER_PARAMS[reporter_spec]))
elif reporter_spec == 'DashboardReporter':
# always set these ones automatically
auto_params = {
# the 'getStepSize' method is an abstract one for
# all integrators so we can rely on it being here.
'step_time' : apparatus.runner.integrator.getStepSize(),
# the dashboard sections
'resampler_dash' : dashboard_sections['resampler'],
'runner_dash' : dashboard_sections['runner'],
'bc_dash' : dashboard_sections['bc'],
}
reporter_params.update(auto_params)
reporter_params.update(deepcopy(self.DEFAULT_REPORTER_PARAMS[reporter_spec]))
elif reporter_spec == 'WalkerReporter':
# always set these ones automatically
auto_params = {
'json_topology' : self.json_top(),
'init_state' : self.init_state,
}
reporter_params.update(auto_params)
reporter_params.update(deepcopy(self.DEFAULT_REPORTER_PARAMS[reporter_spec]))
elif reporter_spec == 'ResTreeReporter':
# always set these ones automatically
auto_params = {
'resampler' : apparatus.resampler,
'boundary_condition' : apparatus.boundary_conditions,
}
reporter_params.update(auto_params)
reporter_params.update(deepcopy(self.DEFAULT_REPORTER_PARAMS[reporter_spec]))
else:
reporter_params.update(deepcopy(self.DEFAULT_REPORTER_PARAMS[reporter_spec]))
# add them to the list for this reporter
reporters_params.append(reporter_params)
return reporter_classes, reporters_params
def make_configuration(self,
apparatus,
work_mapper_class=None,
work_mapper_spec='TaskMapper',
work_mapper_params=None,
platform='Reference',
# defaults to using all of the defaults
reporters=Ellipsis,
reporter_kwargs=None,
work_dir=None,
monitor_class=None,
monitor_params=None,
):
# MAPPER
# choose which mapper to use
# use the class if given
if work_mapper_class is not None:
pass
# use the spec string given
elif work_mapper_spec is not None:
work_mapper_class = [mapper for mapper in self.MAPPERS
if mapper.__name__ == work_mapper_spec][0]
else:
raise ValueError("neither work_mapper_class or work_mapper_spec were not given")
mapper_name = work_mapper_class.__name__
# use either the default params or the user params
if work_mapper_params is None:
work_mapper_params = self.DEFAULT_MAPPER_PARAMS[mapper_name]
# depending on the platform and work mapper choose the worker
# type and update the params in place
work_mapper_params.update(
self.choose_work_mapper_platform_params(platform, mapper_name))
# REPORTERS
reporter_classes, reporter_params = \
self.resolve_reporter_params(apparatus, reporters, reporter_kwargs)
## Monitor
config = Configuration(
work_mapper_class=work_mapper_class,
work_mapper_partial_kwargs=work_mapper_params,
reporter_classes=reporter_classes,
reporter_partial_kwargs=reporter_params,
work_dir=work_dir,
monitor_class=monitor_class,
monitor_partial_kwargs=monitor_params,
)
return config
def make_sim_manager(self, n_walkers, apparatus, config):
walkers = self.make_initial_walkers(self.init_state, n_walkers)
snapshot = SimSnapshot(walkers, apparatus)
sim_manager = Orchestrator.gen_sim_manager(snapshot, config)
return sim_manager
class OpenMMToolsTestSysSimMaker(OpenMMSimMaker):
TEST_SYS = None
@classmethod
def num_atoms(cls):
json_top = cls.json_top()
# get the atom dataframe and select them from the ligand residue
return len(json_top_atom_df(json_top))
@classmethod
def box_vectors(cls):
# just munge the thing they give you to be a nice array Quantity
bvs = cls.TEST_SYS().system.getDefaultPeriodicBoxVectors()
return np.array([bv.value_in_unit(unit.nanometer) for bv in bvs]) * unit.nanometer
@classmethod
def json_top(cls):
test_sys = cls.TEST_SYS()
# convert to a JSON top
json_top = mdtraj_to_json_topology(mdj.Topology.from_openmm(test_sys.topology))
return json_top
|
|
import collections
import json
import os
import platform
import random
import string
import tkMessageBox
from Tkinter import *
from ttk import Progressbar
sys.path.append(os.getcwd() + '/bin')
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
import httplib2
import os
import re
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/gmail-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'LibCalBooker'
class GUI:
def __init__(self, master):
################-GLOBAL-VARS-############################
self.version = 1.1
self.loadingMsg = "Loading......................."
self.driver = ""
self.tupleDates = []
self.roomTimeList = []
self.roomIndex = 0
self.outputTimeArray = []
self.userInfo = []
self.master = master
self.outputEmail = ""
###############-WINDOW-SETUP-############################
self.master.title("LibCal Booker v" + str(self.version))
self.master.resizable(width=False, height=False)
self.master.protocol("WM_DELETE_WINDOW", self.window_close)
self.master.createcommand('exit', self.window_close)
self.master.bind('<Return>', self.submit_click)
###############-LOAD-FILE-###############################
# TODO: Check fields of json, if do not match, transfer over files
try:
with open("userInfo.json") as data_file:
self.userInfo = json.load(data_file)
except:
print "No existing user"
with open("userInfo.json", "w+") as data_file:
self.userInfo = dict(version=self.version, first="Mitchell", last="Pynn", email="mpynn@lakeheadu.ca", override=0, confirm=1,
browser=0, firstLoad=True, authEmail="")
json.dump(self.userInfo, data_file)
###################-DATE SELECTION-######################
# set up availDates
self.availDates = [self.loadingMsg]
# set up chosen date and give default value
self.chosenDate = StringVar(self.master)
self.chosenDate.set(self.loadingMsg)
# set up dateOptionMenu
self.dateOptionMenu = OptionMenu(self.master, self.chosenDate, *self.availDates, command=self.date_click)
self.dateOptionMenu.grid(row=0, column=0, columnspan=1, sticky=(N, S, E, W), pady=(8, 0), padx=(5, 0))
##################-ROOM SELECTION-######################
# set up availRooms
self.availRooms = ['LI 1004', 'LI 1006', 'LI 1007', 'LI 1008', 'LI 1009', 'LI 1010', 'LI 4001', 'LI 4002',
'LI 4003', 'LI 4004', 'LI 4005', 'LI 4006', 'LI 4007', 'LI 4008', 'LI 4009', 'LI 4010']
self.availTimes = ['8:00am - 8:30am', '8:30am - 9:00am', '9:00am - 9:30am', '9:30am - 10:00am',
'10:00am - 10:30am',
'10:30am - 11:00am', '11:00am - 11:30am', '11:30am - 12:00pm', '12:00pm - 12:30pm',
'12:30pm - 1:00pm',
'1:00pm - 1:30pm', '1:30pm - 2:00pm', '2:00pm - 2:30pm', '2:30pm - 3:00pm',
'3:00pm - 3:30pm',
'3:30pm - 4:00pm', '4:00pm - 4:30pm', '4:30pm - 5:00pm', '5:00pm - 5:30pm',
'5:30pm - 6:00pm', '6:00pm - 6:30pm',
'6:30pm - 7:00pm', '7:00pm - 7:30pm', '7:30pm - 8:00pm', '8:00pm - 8:30pm',
'8:30pm - 9:00pm', '9:00pm - 9:30pm',
'9:30pm - 10:00pm', '10:00pm - 10:30pm', '10:30pm - 11:00pm', '11:00pm - 11:30pm',
'11:30pm - 11:59pm']
# set up chosen room and give default value
self.chosenRoom = StringVar(self.master)
self.chosenRoom.set(self.availRooms[0])
# set up roomOptionMenu
self.roomOptionMenu = OptionMenu(self.master, self.chosenRoom, *self.availRooms, command=self.room_click)
self.roomOptionMenu.grid(row=1, column=0, columnspan=1, sticky=(N, S, E, W), padx=(5, 0), pady=(5, 0))
##################-TIME SELECTION-####################
self.timeOptionList = Listbox(self.master, selectmode=EXTENDED, height=20, exportselection=0, takefocus=0)
self.timeOptionList.grid(row=2, column=0, rowspan=200, columnspan=1, sticky=(N, S, E, W), padx=(5, 0), pady=5)
self.timeOptionList.insert(0, self.loadingMsg)
self.timeOptionList.config(state=DISABLED)
#################-BUTTONS-##########################
# user info
self.infoLabel = Label(self.master, text="[ U S E R I N F O ]", font=("Helvetica", 16, "bold"))
self.infoLabel.grid(row=0, column=1, columnspan=2, sticky=E + W)
# first Name label and input
self.fnameLabel = Label(self.master, text="First: ", font=("Helvetica", 12, "bold"))
self.fnameLabel.grid(row=1, column=1, sticky=W)
self.fnameEntry = Entry(self.master)
self.fnameEntry.grid(row=1, column=2, stick=E + W, padx=(0, 5))
self.fnameEntry.insert(0, self.userInfo["first"])
# last name label and input
self.lnameLabel = Label(self.master, text="Last: ", font=("Helvetica", 12, "bold"))
self.lnameLabel.grid(row=2, column=1, sticky=W)
self.lnameEntry = Entry(self.master)
self.lnameEntry.grid(row=2, column=2, stick=E + W, padx=(0, 5))
self.lnameEntry.insert(0, self.userInfo["last"])
# email label and entry
self.emailLabel = Label(self.master, text="Email: ", font=("Helvetica", 12, "bold"))
self.emailLabel.grid(row=3, column=1, sticky=W)
self.emailEntry = Entry(self.master)
self.emailEntry.grid(row=3, column=2, stick=E + W, padx=(0, 5))
self.emailEntry.insert(0, self.userInfo["email"])
# showBrowser checkbox
self.browserVal = IntVar(self.master)
self.browserVal.set(self.userInfo["browser"])
self.browser = Checkbutton(self.master, text="Show web browser", variable=self.browserVal,
command=self.browser_show, onvalue=1, offvalue=0, font=("Helvetica", 12),
takefocus=0)
self.browser.grid(row=4, column=2, sticky=W)
# override checkbox
self.overrideVal = IntVar(self.master)
self.overrideVal.set(self.userInfo["override"])
self.override = Checkbutton(self.master, text="Override 2hr max", variable=self.overrideVal,
onvalue=1, offvalue=0, font=("Helvetica", 12), takefocus=0)
self.override.grid(row=5, column=2, sticky=W)
# confirm checkbox
self.confirmVal = IntVar(self.master)
self.confirmVal.set(self.userInfo["confirm"])
self.confirm = Checkbutton(self.master, text="Enable confirm dialog", variable=self.confirmVal,
onvalue=1, offvalue=0, font=("Helvetica", 12), takefocus=0)
self.confirm.grid(row=6, column=2, sticky=W)
# submit button
self.submit = Button(self.master, text="Submit", command=self.submit_click, takefocus=0)
self.submit.grid(row=8, column=2, sticky=(N, S, E, W), padx=(0, 5), pady=(0, 5))
self.submit["state"] = "disabled"
# loading bar
self.loadingBar = Progressbar(self.master, orient=HORIZONTAL, length=100, mode='determinate')
self.loadingBar.grid(row=9, column=2, sticky=(N, S, E, W), padx=(0, 5), pady=(0, 5))
self.loadingBar["value"] = 10
# email
self.emm = Button(self.master, text="email tester", command=self.test_email_click, takefocus=0)
self.emm.grid(row=10, column=2, sticky=(N, S, E, W), padx=(0, 5), pady=(0, 5))
# update skeleton GUI, then load data
self.master.update()
self.load_data()
# make sure window on top
self.master.lift()
# show welcome message if first load
if self.userInfo["firstLoad"]:
tkMessageBox.showinfo("Welcome",
"Currently, booking multiple rooms is not permitted. You are limited to only booking one room per session. However, booking multiple time slots per room is permitted.\n\nMake sure to update the [ USER INFO ] section with your own name and email. \n\nCreated by Mitchell Pynn ")
def test_email_click(self):
self.outputEmail = "mpynn@lakeheadu.ca"
def email_click(self, randVals):
"""
Shows basic usage of the Gmail API.
Creates a Gmail API service object and outputs a list of label names
of the user's Gmail account.
"""
credentials = self.get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('gmail', 'v1', http=http)
userProfile = service.users().getProfile(userId='me').execute()
# set user JSON auth email
self.userInfo["authEmail"] = userProfile['emailAddress']
for idItem in randVals:
print idItem
try:
# please newer_than:1d to: + idItem
email1 = service.users().messages().list(userId='me',
q='to:' + idItem).execute()
output = email1['messages']
text = service.users().messages().get(userId='me', id=output[0]['id']).execute()
ampFix = text['snippet'].replace('&', '&')
confirmUrl = \
re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+|]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', ampFix)[
0] + '&m=confirm'
print(confirmUrl)
self.driver.execute_script(
"window.open('" + confirmUrl + "', 'new_window')")
self.driver.switch_to.window(self.driver.window_handles[1])
try:
# wait until page is loaded and success element found
element = WebDriverWait(self.driver, 5).until(
EC.presence_of_element_located((By.CLASS_NAME, "final_msg"))
)
# close tab and return to main page
self.driver.close()
self.driver.switch_to.window(self.driver.window_handles[0])
except:
print "Could not book room " + idItem
except:
print('An error occurred confirming the room: ????')
def get_credentials(self):
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
self.outputEmail + '.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def browser_show(self):
if self.browserVal.get() == 1:
# hide window/throw in corner
self.driver.set_window_position(0, 0)
self.driver.set_window_size(100, 600)
else:
# hide window/throw in corner
self.driver.set_window_position(3000, 3000)
self.driver.set_window_size(100, 300)
def save_data(self):
# update userFile when submitted
with open("userInfo.json", 'r+') as data_file:
self.userInfo["first"] = self.fnameEntry.get()
self.userInfo["last"] = self.lnameEntry.get()
self.userInfo["email"] = self.emailEntry.get()
self.userInfo["confirm"] = self.confirmVal.get()
self.userInfo["override"] = self.overrideVal.get()
self.userInfo["browser"] = self.browserVal.get()
self.userInfo["firstLoad"] = False
data_file.seek(0)
json.dump(self.userInfo, data_file)
data_file.truncate()
def window_close(self):
# save data
self.save_data()
# destroy GUI and quit driver
self.master.destroy()
self.driver.quit()
# clean log files if exist (maybe good to keep?)
dir = os.listdir(os.getcwd())
for item in dir:
if item.endswith(".log"):
os.remove(os.path.join(os.getcwd(), item))
# from -> http://stackoverflow.com/a/12578715
def is_windows_64bit(self):
if 'PROCESSOR_ARCHITEW6432' in os.environ:
return True
return os.environ['PROCESSOR_ARCHITECTURE'].endswith('64')
def load_data(self):
self.loadingBar["value"] = 25
self.master.update_idletasks()
# connect to webdriver - Try Google Chrome, then Firefox
chrome = False
try:
chromeOptions = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2,
"profile.managed_default_content_settings.stylesheet": 2}
chromeOptions.add_experimental_option("prefs", prefs)
if platform.system() == 'Darwin':
self.driver = webdriver.Chrome(executable_path=os.getcwd() + '/bin/chrome/chromedriverDarwin',
chrome_options=chromeOptions)
elif platform.system() == 'Windows':
self.driver = webdriver.Chrome(executable_path=os.getcwd() + '/bin/chrome/chromedriverWindows.exe',
chrome_options=chromeOptions)
elif platform.system() == 'Linux':
self.driver = webdriver.Chrome(executable_path=os.getcwd() + '/bin/chrome/chromedriverLinux',
chrome_options=chromeOptions)
else:
print "Fatal error - incompatible operating system"
exit()
chrome = True
except:
print "Could not load Chrome, trying Firefox."
if not chrome:
try:
# setup firefox profile, no images, no css for speed
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.add_extension(os.getcwd() + "/bin/ext/quickjava-2.1.2-fx.xpi")
firefox_profile.set_preference("thatoneguydotnet.QuickJava.curVersion",
"2.1.2.1") ## Prevents loading the 'thank you for installing screen'
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Images",
2) ## Turns images off
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.AnimatedImage",
2) ## Turns animated images off
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.CSS", 2) ## CSS
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Flash", 2) ## Flash
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Java", 2) ## Java
firefox_profile.set_preference("thatoneguydotnet.QuickJava.startupStatus.Silverlight",
2) ## Silverlight
# load driver according to operating system,
if platform.system() == 'Darwin':
self.driver = webdriver.Firefox(executable_path=os.getcwd() + '/bin/gecko/geckodriverDarwin',
firefox_profile=firefox_profile)
elif platform.system() == 'Windows':
if self.is_windows_64bit():
self.driver = webdriver.Firefox(
executable_path=os.getcwd() + '/bin/gecko/geckodriverWindows64.exe',
firefox_profile=firefox_profile)
else:
self.driver = webdriver.Firefox(
executable_path=os.getcwd() + '/bin/gecko/geckodriverWindows.exe',
firefox_profile=firefox_profile)
elif platform.system() == 'Linux':
self.driver = webdriver.Firefox(executable_path=os.getcwd() + '/bin/gecko/geckodriverLinux',
firefox_profile=firefox_profile)
else:
print "Fatal error - incompatible operating system"
exit()
except:
# get no firefox exception
print "You must have Firefox or Google Chrome installed"
exit(6)
# hide window/throw in corner or show
self.browser_show()
self.master.lift()
self.loadingBar["value"] = 50
self.master.update_idletasks()
# load intial site
self.driver.get("http://libcal.lakeheadu.ca/rooms_acc.php?gid=13445")
assert "The Chancellor Paterson Library" in self.driver.title
# scrape dates, only have to do once
self.tupleDates = collections.OrderedDict()
dateWheel = Select(self.driver.find_element_by_id("datei"))
for date in dateWheel.options:
self.tupleDates[date.text] = date.get_attribute("value")
self.loadingBar["value"] = 75
self.master.update_idletasks()
# remove loading option, pull dates from tuple to list
# set chosen date to top entry in date list
# format: {'Monday June 1st': '06-01-17'}
self.availDates.remove(self.loadingMsg)
for key, value in self.tupleDates.iteritems():
self.availDates.append(key)
self.chosenDate.set(self.availDates[0])
# get menu, clear and add dates
menu = self.dateOptionMenu["menu"]
menu.delete(0, "end")
for val in self.availDates:
menu.add_command(label=val, command=lambda pvalue=val: self.date_click(pvalue))
# update GUI, will show current date in selection
self.master.update()
# initial load data
self.date_click()
# make sure on top
self.master.lift()
def date_click(self, value=""):
# do nothing if same day clicked
if value:
if self.chosenDate.get() == value:
return
else:
self.chosenDate.set(value)
self.loadingBar["value"] = 20
self.submit["state"] = "disabled"
self.master.update_idletasks()
# clear timeOptionList contents, showing loading message
self.timeOptionList.delete(0, END)
self.timeOptionList.insert(0, self.loadingMsg)
self.timeOptionList.config(state=DISABLED)
if value:
self.loadingBar["value"] = 40
self.master.update()
# load selected date webpage
self.driver.get(
"http://libcal.lakeheadu.ca/rooms_acc.php?gid=13445&d=" + self.tupleDates[self.chosenDate.get()] + "&cap=0")
# does this wait until page is loaded?
assert "The Chancellor Paterson Library" in self.driver.title
# make sure loaded?
# create 2D array of [0]'s
self.roomTimeList = [[0] for _ in range(16)]
if value:
self.loadingBar["value"] = 60
self.master.update_idletasks()
self.room_click("room")
def room_click(self, value=""):
if value == "room":
self.loadingBar["value"] = 95
else:
self.loadingBar["value"] = 30
self.submit["state"] = "disabled"
self.master.update_idletasks()
# set roomIndex to new room
self.roomIndex = self.availRooms.index(self.chosenRoom.get())
# clear timeOptionList contents, showing loading message
self.timeOptionList.delete(0, END)
self.timeOptionList.insert(0, self.loadingMsg)
self.timeOptionList.config(state=DISABLED)
if value != "room":
self.loadingBar["value"] = 40
self.master.update()
form = self.driver.find_element_by_id("roombookingform")
if value != "room":
self.loadingBar["value"] = 60
self.master.update_idletasks()
rooms = form.find_elements_by_tag_name("fieldset")
if value != "room":
self.loadingBar["value"] = 80
self.master.update_idletasks()
# don't scrape again if already loaded
if self.roomTimeList[self.roomIndex] == [0]:
for room in rooms:
# find selected room
if room.find_element_by_tag_name("h2").text[:7] != self.chosenRoom.get():
continue
# go through each room, pulling times to array
times_out = []
check_boxes = room.find_elements_by_class_name("checkbox")
for box in check_boxes:
# output checkbox text
times_out.append(self.availTimes.index(box.find_element_by_tag_name("label").text))
self.roomTimeList.insert(self.roomIndex, times_out)
break
# for the selected room, set time slots
self.timeOptionList.config(state=NORMAL)
self.timeOptionList.delete(0, END)
if len(self.roomTimeList[self.roomIndex]) > 0:
for timeSlot in self.roomTimeList[self.roomIndex]:
self.timeOptionList.insert(END, self.availTimes[timeSlot])
else:
self.timeOptionList.insert(END, "No times available")
self.timeOptionList.config(state=DISABLED)
# update height, update the whole list if it isnt first run
self.timeOptionList.configure(height=len(self.roomTimeList[self.roomIndex]))
# colorize alternating lines of the listbox
for i in range(0, len(self.roomTimeList[self.roomIndex]), 2):
self.timeOptionList.itemconfigure(i, background='#f0f0ff')
self.loadingBar["value"] = 100
self.master.update_idletasks()
# update GUI size and remove loading bar
self.submit["state"] = "normal"
self.master.update()
def submit_click(self, value=""):
if self.emailEntry.get().strip()[-13:] != "@lakeheadu.ca":
tkMessageBox.showerror("Email format error", "Please make sure to use a valid @lakeheadu.ca email address")
return
self.save_data()
selection = self.timeOptionList.curselection()
if selection == ():
return
if len(selection) > 4 and self.overrideVal.get() != 1:
tkMessageBox.showerror("2hr Limit", "Sorry, you can only book 2hrs per day")
return
if self.confirmVal.get() == 1:
outputTimes = ""
lineBreak = '-' * (len(self.chosenDate.get()) + 2)
for index in selection:
pIndex = self.roomTimeList[self.roomIndex][int(index)]
outputTimes += self.availTimes[pIndex] + "\n"
if not tkMessageBox.askokcancel("Please confirm the following times",
self.chosenDate.get() + "\n" + lineBreak + "\n" +
self.availRooms[self.roomIndex] + "\n" + outputTimes):
return
# if output, get times
self.outputTimeArray = []
for index in selection:
pIndex = self.roomTimeList[self.roomIndex][int(index)]
self.outputTimeArray.append(self.availTimes[pIndex])
# book rooms
self.book_times()
# check if any rooms were unavailable
if len(self.outputTimeArray) > 0:
outputText = "The following times were unavailable to book\n----------------------------------------\n"
for item in self.outputTimeArray:
outputText += item + "\n"
tkMessageBox.showerror("Unavailable", outputText)
# clear booked room time slots,
self.roomTimeList[self.roomIndex] = [0]
self.room_click()
def book_times(self):
# refresh page
try:
self.driver.refresh()
except:
print "Could not load the browser"
assert "The Chancellor Paterson Library" in self.driver.title
outputText = "Successfully booked the following rooms \n" + self.chosenDate.get() + "\n" + '-' * (
len(self.chosenDate.get()) + 2) + "\n" + self.chosenRoom.get()
outputLength = 0
outputRandomId = []
self.outputEmail = self.emailEntry.get().strip()[:-13] + "@lakeheadu.ca"
while True:
# get rooms
form = self.driver.find_element_by_id("roombookingform")
rooms = form.find_elements_by_tag_name("fieldset")
for room in rooms:
# find selected room
if room.find_element_by_tag_name("h2").text[:7] != self.availRooms[self.roomIndex]:
continue
consect = 0
selectedTimes = []
checkBoxes = room.find_elements_by_class_name("checkbox")
for box in checkBoxes:
# loops through times from start every time, how can I keep place of where left off?
timeSlot = box.find_element_by_tag_name("label")
if timeSlot.text in self.outputTimeArray:
consect += 1
# adding another time will not overflow 2hrs, add it
if consect <= 4:
# check for consecutive, if array is blank, add time regardless
# try the simplify
if selectedTimes != []:
for selectedIndex in selectedTimes:
# check if time is in consecutive order
# can I put or in between two conditions????
if (self.availTimes.index(timeSlot.text) - 1) == selectedIndex or (
self.availTimes.index(timeSlot.text) + 1) == selectedIndex:
outputText += "\n" + timeSlot.text
# add to selectedTimes
selectedTimes.append(self.availTimes.index(timeSlot.text))
# remove timeSlot from array
self.outputTimeArray.remove(timeSlot.text)
# click timeSlot
timeSlot.click()
break
else:
outputText += "\n" + timeSlot.text
# add to selectedTimes
selectedTimes.append(self.availTimes.index(timeSlot.text))
# remove timeSlot from array
self.outputTimeArray.remove(timeSlot.text)
# click timeSlot
timeSlot.click()
# if 4 boxes are selected, break and book the selected rooms
else:
break
# if at least one box is selected, book it, must be consecutive
elif consect > 0:
break
break
if consect > 0:
# fill info of form
self.driver.find_element_by_id("fname").send_keys(self.fnameEntry.get())
self.driver.find_element_by_id("lname").send_keys(self.lnameEntry.get())
# always uses random_id, override 2hrs just prevents user from booking more
# then 2hrs at a time
randId = self.id_generator()
outputRandomId.append(randId)
self.driver.find_element_by_id("email").send_keys(self.emailEntry.get().strip()[:-13] + "+" + randId + "@lakeheadu.ca")
# submit
self.driver.find_element_by_id("s-lc-rm-ac-but").click()
# assert it is success
try:
# search for success element / fail
# handle failure
element = WebDriverWait(self.driver, 10).until(
EC.visibility_of_element_located((By.ID, "s-lc-rm-ac-success"))
)
except TimeoutException:
tkMessageBox.showerror("ERROR", "Error booking the room. Exiting...")
self.driver.quit()
# refresh page
self.driver.refresh()
# if out of times to book or cannot book times, break out
if len(self.outputTimeArray) <= 0 or len(self.outputTimeArray) == outputLength:
break
outputLength = len(self.outputTimeArray)
# print success message
if len(self.outputTimeArray) <= 0:
print outputRandomId
tkMessageBox.showinfo("Success", outputText)
# ask to confirm
# if email used to book rooms does not match email authEmail
## show message that redirection is happening
if tkMessageBox.askyesno("Confirm times", "Would you like to confirm your booked times?"):
# check if
if self.userInfo['authEmail'] != self.outputEmail:
tkMessageBox.showinfo("Redirecting...", "Press OK and login with " + self.outputEmail + " in the browser that will open." )
self.email_click(outputRandomId)
# random ID generator -> http://stackoverflow.com/a/2257449
def id_generator(self, size=8, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
root = Tk()
libGui = GUI(root)
root.mainloop()
|
|
import os
import json
from datetime import date
import calendar
import operator
class Player:
MAIN_CMP_FACTOR = "wlr"
def __init__(self, name):
self.name = name
self.gp = 0
self.w = 0
self.l = 0
self.otw = 0
self.otl = 0
self.gf = 0
self.ga = 0
self.win_counts = {}
self.loss_counts = {}
def __cmp__(self, other):
for prop in [ Player.MAIN_CMP_FACTOR , "diff" , "gp" , "ot" ]:
result = cmp(getattr(self, prop), getattr(other, prop))
if result != 0:
return result
return 0
@property
def wlr(self):
wins = (self.w - self.otw) + self.otw * 0.5
return round(wins/float(self.l) if self.l > 0 else float('inf'), 2)
@property
def ppg(self):
totalgames = self.w + self.l
return round((self.w * 2 + self.otl) / float(totalgames), 2)
@property
def diff(self):
return self.gf - self.ga
@property
def match_counts(self):
return dict(self.win_counts.items() + self.loss_counts.items() + [(k, self.win_counts[k] + self.loss_counts[k]) for k in set(self.loss_counts) & set(self.win_counts)])
class Model:
def __init__(self):
today = date.today()
self.games_file = "games.json"
self.current_season = "%d_%s" % (today.year, calendar.month_abbr[today.month])
if os.path.exists(self.games_file):
with open(self.games_file) as data_file:
self.data = json.load(data_file)
else:
self.data = []
if not self.current_season in self.data:
self.data[self.current_season] = []
self._load_players()
def _load_players(self):
# "GP" : Games played
# "WLR" : Win/lose ratio
# "W" : Wins
# "L" : Losses
# "OT" : OT/Shootout wins
# "GF" : Goals for
# "GA" : Goals against
# "DIFF" : Goal difference
self.player_dicts = {}
self.player_wlr_lists = {}
self.player_ppg_lists = {}
for season, data in self.data.iteritems():
self.player_dicts[season] = {}
self.player_wlr_lists[season] = []
self.player_ppg_lists[season] = []
for entry in self.data[season]:
home = entry["home"]
away = entry["away"]
home_score = entry[home]
away_score = entry[away]
OT = entry["OT"]
if not home in self.player_dicts[season]:
self.player_dicts[season][home] = Player(home)
if not away in self.player_dicts[season]:
self.player_dicts[season][away] = Player(away)
home_player = self.player_dicts[season][home]
away_player = self.player_dicts[season][away]
home_player.gp += 1
home_player.gf += home_score
home_player.ga += away_score
away_player.gp += 1
away_player.gf += away_score
away_player.ga += home_score
if home_score > away_score:
away_player.l += 1
home_player.w += 1
if OT:
home_player.otw += 1
away_player.otl += 1
if not away in home_player.win_counts:
home_player.win_counts[away] = 1
else:
home_player.win_counts[away] += 1
if not home in away_player.loss_counts:
away_player.loss_counts[home] = 1
else:
away_player.loss_counts[home] += 1
elif home_score < away_score:
home_player.l += 1
away_player.w += 1
if OT:
away_player.otw += 1
home_player.otl += 1
if not away in home_player.loss_counts:
home_player.loss_counts[away] = 1
else:
home_player.loss_counts[away] += 1
if not home in away_player.win_counts:
away_player.win_counts[home] = 1
else:
away_player.win_counts[home] += 1
for name1, player in self.player_dicts[season].iteritems():
for name2 in self.player_dicts[season]:
if not (name1 == name2 or name2 in player.win_counts):
player.win_counts[name2] = 0
if not (name1 == name2 or name2 in player.loss_counts):
player.loss_counts[name2] = 0
Player.MAIN_CMP_FACTOR = "wlr"
self.player_wlr_lists[season] = list(self.player_dicts[season].values())
self.player_wlr_lists[season].sort(reverse=True)
Player.MAIN_CMP_FACTOR = "ppg"
self.player_ppg_lists[season] = list(self.player_dicts[season].values())
self.player_ppg_lists[season].sort(reverse=True)
def add(self, home, away, home_score, away_score, overtime):
if not self.current_season in self.data:
self.data[self.current_season] = []
self.data[self.current_season].append({ "home" : home
, "away" : away
, home : home_score
, away : away_score
, "OT" : overtime
})
def get_player(self, player_name, season=None):
if not season:
season = self.current_season
return self.player_dicts[season][player_name]
def get_least_played_player(self, player_name, excluded_players=[], season=None):
if not season:
season = self.current_season
player = self.player_dicts[season][player_name]
filtered_match_counts = { pn: mc for (pn, mc) in player.match_counts.iteritems() if pn not in excluded_players }
least_played_player_name = min(filtered_match_counts.iteritems(), key=operator.itemgetter(1))[0]
return self.player_dicts[season][least_played_player_name]
def get_player_least_played_games(self, excluded_players, season=None):
if not season:
season = self.current_season
filtered_players = { pn: p for (pn, p) in self.player_dicts[season].iteritems() if pn not in excluded_players }
player_least_played_games_name = min(filtered_players.values(), key=operator.attrgetter('gp')).name
return self.player_dicts[season][player_least_played_games_name]
def get_wlr_leader(self, excluded_players=[], season=None):
Player.MAIN_CMP_FACTOR = "wlr"
return self._get_leader(excluded_players, season)
def get_ppg_leader(self, excluded_players=[], season=None):
Player.MAIN_CMP_FACTOR = "ppg"
return self._get_leader(excluded_players, season)
def _get_leader(self, excluded_players, season):
if not season:
season = self.current_season
filtered_players = { pn: p for (pn, p) in self.player_dicts[season].iteritems() if pn not in excluded_players }
leader_name = max(filtered_players.values()).name
return self.player_dicts[season][leader_name]
def save(self):
with open(self.games_file, 'w') as data_file:
json.dump(self.data, data_file, indent=4, separators=(',', ': '))
@property
def player_dict(self):
return self.player_dicts[self.current_season]
@property
def player_wlr_list(self):
return self.player_wlr_lists[self.current_season]
@property
def player_ppg_list(self):
return self.player_ppg_lists[self.current_season]
@property
def seasons(self):
return [ season for season, data in self.data.iteritems() ]
|
|
"""
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
|
|
"""Serialization utilities."""
import codecs
import os
import pickle
import sys
from collections import namedtuple
from contextlib import contextmanager
from io import BytesIO
from .exceptions import (ContentDisallowed, DecodeError, EncodeError,
SerializerNotInstalled, reraise)
from .utils.compat import entrypoints
from .utils.encoding import bytes_to_str, str_to_bytes
__all__ = ('pickle', 'loads', 'dumps', 'register', 'unregister')
SKIP_DECODE = frozenset(['binary', 'ascii-8bit'])
TRUSTED_CONTENT = frozenset(['application/data', 'application/text'])
if sys.platform.startswith('java'): # pragma: no cover
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
pickle_load = pickle.load
#: We have to use protocol 4 until we drop support for Python 3.6 and 3.7.
pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 4))
codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder'))
@contextmanager
def _reraise_errors(wrapper,
include=(Exception,), exclude=(SerializerNotInstalled,)):
try:
yield
except exclude:
raise
except include as exc:
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))
def parenthesize_alias(first, second):
return f'{first} ({second})' if first else second
class SerializerRegistry:
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
self.name_to_type = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
"""Register a new encoder/decoder.
Arguments:
name (str): A convenience name for the serialization method.
encoder (callable): A method that will be passed a python data
structure and should return a string representing the
serialized data. If :const:`None`, then only a decoder
will be registered. Encoding will not be possible.
decoder (Callable): A method that will be passed a string
representing serialized data and should return a python
data structure. If :const:`None`, then only an encoder
will be registered. Decoding will not be possible.
content_type (str): The mime-type describing the serialized
structure.
content_encoding (str): The content encoding (character set) that
the `decoder` method will be returning. Will usually be
`utf-8`, `us-ascii`, or `binary`.
"""
if encoder:
self._encoders[name] = codec(
content_type, content_encoding, encoder,
)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def enable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.discard(name)
def disable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.add(name)
def unregister(self, name):
"""Unregister registered encoder/decoder.
Arguments:
name (str): Registered serialization method name.
Raises:
SerializerNotInstalled: If a serializer by that name
cannot be found.
"""
try:
content_type = self.name_to_type[name]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
self.name_to_type.pop(name, None)
except KeyError:
raise SerializerNotInstalled(
f'No encoder/decoder installed for {name}')
def _set_default_serializer(self, name):
"""Set the default serialization method used by this library.
Arguments:
name (str): The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
Raises:
SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
f'No encoder installed for {name}')
def dumps(self, data, serializer=None):
"""Encode data.
Serialize a data structure into a string suitable for sending
as an AMQP message body.
Arguments:
data (List, Dict, str): The message data to send.
serializer (str): An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
Returns:
Tuple[str, str, str]: A three-item tuple containing the
content type (e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized data.
Raises:
SerializerNotInstalled: If the serialization method
requested is not available.
"""
if serializer == 'raw':
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
f'No encoder installed for {serializer}')
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return 'application/data', 'binary', data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, str):
with _reraise_errors(EncodeError, exclude=()):
payload = data.encode('utf-8')
return 'text/plain', 'utf-8', payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
with _reraise_errors(EncodeError):
payload = encoder(data)
return content_type, content_encoding, payload
def loads(self, data, content_type, content_encoding,
accept=None, force=False, _trusted_content=TRUSTED_CONTENT):
"""Decode serialized data.
Deserialize a data stream as serialized using `dumps`
based on `content_type`.
Arguments:
data (bytes, buffer, str): The message data to deserialize.
content_type (str): The content-type of the data.
(e.g., `application/json`).
content_encoding (str): The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
accept (Set): List of content-types to accept.
Raises:
ContentDisallowed: If the content-type is not accepted.
Returns:
Any: The unserialized data.
"""
content_type = (bytes_to_str(content_type) if content_type
else 'application/data')
if accept is not None:
if content_type not in _trusted_content \
and content_type not in accept:
raise self._for_untrusted_content(content_type, 'untrusted')
else:
if content_type in self._disabled_content_types and not force:
raise self._for_untrusted_content(content_type, 'disabled')
content_encoding = (content_encoding or 'utf-8').lower()
if data:
decode = self._decoders.get(content_type)
if decode:
with _reraise_errors(DecodeError):
return decode(data)
if content_encoding not in SKIP_DECODE and \
not isinstance(data, str):
with _reraise_errors(DecodeError):
return _decode(data, content_encoding)
return data
def _for_untrusted_content(self, ctype, why):
return ContentDisallowed(
'Refusing to deserialize {} content of type {}'.format(
why,
parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype),
),
)
#: Global registry of serializers/deserializers.
registry = SerializerRegistry()
dumps = registry.dumps
loads = registry.loads
register = registry.register
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, str):
content_encoding = 'utf-8'
with _reraise_errors(EncodeError, exclude=()):
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from kombu.utils import json as _json
registry.register('json', _json.dumps, _json.loads,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates
"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""Raise SerializerNotInstalled.
Used in case a client receives a yaml message, but yaml
isn't installed.
"""
raise SerializerNotInstalled(
'No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
def unpickle(s):
return pickle_loads(str_to_bytes(s))
def register_pickle():
"""Register pickle serializer.
The fastest serialization method, but restricts
you to python clients.
"""
def pickle_dumps(obj, dumper=pickle.dumps):
return dumper(obj, protocol=pickle_protocol)
registry.register('pickle', pickle_dumps, unpickle,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""Register msgpack serializer.
See Also:
https://msgpack.org/.
"""
pack = unpack = None
try:
import msgpack
if msgpack.version >= (0, 4):
from msgpack import packb, unpackb
def pack(s):
return packb(s, use_bin_type=True)
def unpack(s):
return unpackb(s, raw=False)
else:
def version_mismatch(*args, **kwargs):
raise SerializerNotInstalled(
'msgpack requires msgpack-python >= 0.4.0')
pack = unpack = version_mismatch
except (ImportError, ValueError):
def not_available(*args, **kwargs):
raise SerializerNotInstalled(
'No decoder installed for msgpack. '
'Please install the msgpack-python library')
pack = unpack = not_available
registry.register(
'msgpack', pack, unpack,
content_type='application/x-msgpack',
content_encoding='binary',
)
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# Default serializer is 'json'
registry._set_default_serializer('json')
_setupfuns = {
'json': register_json,
'pickle': register_pickle,
'yaml': register_yaml,
'msgpack': register_msgpack,
'application/json': register_json,
'application/x-yaml': register_yaml,
'application/x-python-serialize': register_pickle,
'application/x-msgpack': register_msgpack,
}
NOTSET = object()
def enable_insecure_serializers(choices=NOTSET):
"""Enable serializers that are considered to be unsafe.
Note:
Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, but you
can also specify a list of serializers (by name or content type)
to enable.
"""
choices = ['pickle', 'yaml', 'msgpack'] if choices is NOTSET else choices
if choices is not None:
for choice in choices:
try:
registry.enable(choice)
except KeyError:
pass
def disable_insecure_serializers(allowed=NOTSET):
"""Disable untrusted serializers.
Will disable all serializers except ``json``
or you can specify a list of deserializers to allow.
Note:
Producers will still be able to serialize data
in these formats, but consumers will not accept
incoming data using the untrusted content types.
"""
allowed = ['json'] if allowed is NOTSET else allowed
for name in registry._decoders:
registry.disable(name)
if allowed is not None:
for name in allowed:
registry.enable(name)
# Insecure serializers are disabled by default since v3.0
disable_insecure_serializers()
# Load entrypoints from installed extensions
for ep, args in entrypoints('kombu.serializers'): # pragma: no cover
register(ep.name, *args)
def prepare_accept_content(content_types, name_to_type=None):
"""Replace aliases of content_types with full names from registry.
Raises:
SerializerNotInstalled: If the serialization method
requested is not available.
"""
name_to_type = registry.name_to_type if not name_to_type else name_to_type
if content_types is not None:
try:
return {n if '/' in n else name_to_type[n] for n in content_types}
except KeyError as e:
raise SerializerNotInstalled(
f'No encoder/decoder installed for {e.args[0]}')
return content_types
|
|
"""Some utils for SSD."""
import numpy as np
import tensorflow as tf
class BBoxUtility(object):
"""Utility class to do some stuff with bounding boxes and priors.
# Arguments
num_classes: Number of classes including background.
priors: Priors and variances, numpy tensor of shape (num_priors, 8),
priors[i] = [xmin, ymin, xmax, ymax, varxc, varyc, varw, varh].
overlap_threshold: Threshold to assign box to a prior.
nms_thresh: Nms threshold.
top_k: Number of total bboxes to be kept per image after nms step.
# References
https://arxiv.org/abs/1512.02325
"""
# TODO add setter methods for nms_thresh and top_K
def __init__(self, num_classes, priors=None, overlap_threshold=0.5,
nms_thresh=0.45, top_k=400):
self.num_classes = num_classes
self.priors = priors
self.num_priors = 0 if priors is None else len(priors)
self.overlap_threshold = overlap_threshold
self._nms_thresh = nms_thresh
self._top_k = top_k
self.boxes = tf.placeholder(dtype='float32', shape=(None, 4))
self.scores = tf.placeholder(dtype='float32', shape=(None,))
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
self.sess = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))
@property
def nms_thresh(self):
return self._nms_thresh
@nms_thresh.setter
def nms_thresh(self, value):
self._nms_thresh = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
@property
def top_k(self):
return self._top_k
@top_k.setter
def top_k(self, value):
self._top_k = value
self.nms = tf.image.non_max_suppression(self.boxes, self.scores,
self._top_k,
iou_threshold=self._nms_thresh)
def iou(self, box):
"""Compute intersection over union for the box with all priors.
# Arguments
box: Box, numpy tensor of shape (4,).
# Return
iou: Intersection over union,
numpy tensor of shape (num_priors).
"""
# compute intersection
inter_upleft = np.maximum(self.priors[:, :2], box[:2])
inter_botright = np.minimum(self.priors[:, 2:4], box[2:])
inter_wh = inter_botright - inter_upleft
inter_wh = np.maximum(inter_wh, 0)
inter = inter_wh[:, 0] * inter_wh[:, 1]
# compute union
area_pred = (box[2] - box[0]) * (box[3] - box[1])
area_gt = (self.priors[:, 2] - self.priors[:, 0])
area_gt *= (self.priors[:, 3] - self.priors[:, 1])
union = area_pred + area_gt - inter
# compute iou
iou = inter / union
return iou
def encode_box(self, box, return_iou=True):
"""Encode box for training, do it only for assigned priors.
# Arguments
box: Box, numpy tensor of shape (4,).
return_iou: Whether to concat iou to encoded values.
# Return
encoded_box: Tensor with encoded box
numpy tensor of shape (num_priors, 4 + int(return_iou)).
"""
iou = self.iou(box)
encoded_box = np.zeros((self.num_priors, 4 + return_iou))
assign_mask = iou > self.overlap_threshold
if not assign_mask.any():
assign_mask[iou.argmax()] = True
if return_iou:
encoded_box[:, -1][assign_mask] = iou[assign_mask]
assigned_priors = self.priors[assign_mask]
box_center = 0.5 * (box[:2] + box[2:])
box_wh = box[2:] - box[:2]
assigned_priors_center = 0.5 * (assigned_priors[:, :2] +
assigned_priors[:, 2:4])
assigned_priors_wh = (assigned_priors[:, 2:4] -
assigned_priors[:, :2])
# we encode variance
encoded_box[:, :2][assign_mask] = box_center - assigned_priors_center
encoded_box[:, :2][assign_mask] /= assigned_priors_wh
encoded_box[:, :2][assign_mask] /= assigned_priors[:, -4:-2]
encoded_box[:, 2:4][assign_mask] = np.log(box_wh /
assigned_priors_wh)
encoded_box[:, 2:4][assign_mask] /= assigned_priors[:, -2:]
return encoded_box.ravel()
def assign_boxes(self, boxes):
"""Assign boxes to priors for training.
# Arguments
boxes: Box, numpy tensor of shape (num_boxes, 4 + num_classes),
num_classes without background.
# Return
assignment: Tensor with assigned boxes,
numpy tensor of shape (num_boxes, 4 + num_classes + 8),
priors in ground truth are fictitious,
assignment[:, -8] has 1 if prior should be penalized
or in other words is assigned to some ground truth box,
assignment[:, -7:] are all 0. See loss for more details.
"""
assignment = np.zeros((self.num_priors, 4 + self.num_classes + 8))
assignment[:, 4] = 1.0
if len(boxes) == 0:
return assignment
encoded_boxes = np.apply_along_axis(self.encode_box, 1, boxes[:, :4])
encoded_boxes = encoded_boxes.reshape(-1, self.num_priors, 5)
best_iou = encoded_boxes[:, :, -1].max(axis=0)
best_iou_idx = encoded_boxes[:, :, -1].argmax(axis=0)
best_iou_mask = best_iou > 0
best_iou_idx = best_iou_idx[best_iou_mask]
assign_num = len(best_iou_idx)
encoded_boxes = encoded_boxes[:, best_iou_mask, :]
assignment[:, :4][best_iou_mask] = encoded_boxes[best_iou_idx,
np.arange(assign_num),
:4]
assignment[:, 4][best_iou_mask] = 0
assignment[:, 5:-8][best_iou_mask] = boxes[best_iou_idx, 4:]
assignment[:, -8][best_iou_mask] = 1
return assignment
def decode_boxes(self, mbox_loc, mbox_priorbox, variances):
"""Convert bboxes from local predictions to shifted priors.
# Arguments
mbox_loc: Numpy array of predicted locations.
mbox_priorbox: Numpy array of prior boxes.
variances: Numpy array of variances.
# Return
decode_bbox: Shifted priors.
"""
prior_width = mbox_priorbox[:, 2] - mbox_priorbox[:, 0]
prior_height = mbox_priorbox[:, 3] - mbox_priorbox[:, 1]
prior_center_x = 0.5 * (mbox_priorbox[:, 2] + mbox_priorbox[:, 0])
prior_center_y = 0.5 * (mbox_priorbox[:, 3] + mbox_priorbox[:, 1])
decode_bbox_center_x = mbox_loc[:, 0] * prior_width * variances[:, 0]
decode_bbox_center_x += prior_center_x
decode_bbox_center_y = mbox_loc[:, 1] * prior_width * variances[:, 1]
decode_bbox_center_y += prior_center_y
decode_bbox_width = np.exp(mbox_loc[:, 2] * variances[:, 2])
decode_bbox_width *= prior_width
decode_bbox_height = np.exp(mbox_loc[:, 3] * variances[:, 3])
decode_bbox_height *= prior_height
decode_bbox_xmin = decode_bbox_center_x - 0.5 * decode_bbox_width
decode_bbox_ymin = decode_bbox_center_y - 0.5 * decode_bbox_height
decode_bbox_xmax = decode_bbox_center_x + 0.5 * decode_bbox_width
decode_bbox_ymax = decode_bbox_center_y + 0.5 * decode_bbox_height
decode_bbox = np.concatenate((decode_bbox_xmin[:, None],
decode_bbox_ymin[:, None],
decode_bbox_xmax[:, None],
decode_bbox_ymax[:, None]), axis=-1)
decode_bbox = np.minimum(np.maximum(decode_bbox, 0.0), 1.0)
return decode_bbox
def detection_out(self, predictions, background_label_id=0, keep_top_k=200,
confidence_threshold=0.01):
"""Do non maximum suppression (nms) on prediction results.
# Arguments
predictions: Numpy array of predicted values.
num_classes: Number of classes for prediction.
background_label_id: Label of background class.
keep_top_k: Number of total bboxes to be kept per image
after nms step.
confidence_threshold: Only consider detections,
whose confidences are larger than a threshold.
# Return
results: List of predictions for every picture. Each prediction is:
[label, confidence, xmin, ymin, xmax, ymax]
"""
mbox_loc = predictions[:, :, :4]
variances = predictions[:, :, -4:]
mbox_priorbox = predictions[:, :, -8:-4]
mbox_conf = predictions[:, :, 4:-8]
results = []
for i in range(len(mbox_loc)):
results.append([])
decode_bbox = self.decode_boxes(mbox_loc[i],
mbox_priorbox[i], variances[i])
for c in range(self.num_classes):
if c == background_label_id:
continue
c_confs = mbox_conf[i, :, c]
c_confs_m = c_confs > confidence_threshold
if len(c_confs[c_confs_m]) > 0:
boxes_to_process = decode_bbox[c_confs_m]
confs_to_process = c_confs[c_confs_m]
feed_dict = {self.boxes: boxes_to_process,
self.scores: confs_to_process}
idx = self.sess.run(self.nms, feed_dict=feed_dict)
good_boxes = boxes_to_process[idx]
confs = confs_to_process[idx][:, None]
labels = c * np.ones((len(idx), 1))
c_pred = np.concatenate((labels, confs, good_boxes),
axis=1)
results[-1].extend(c_pred)
if len(results[-1]) > 0:
results[-1] = np.array(results[-1])
argsort = np.argsort(results[-1][:, 1])[::-1]
results[-1] = results[-1][argsort]
results[-1] = results[-1][:keep_top_k]
return results
|
|
"""Example of Bayesian confirmatory factor analysis in "long form" and the factors are
estimated directly.
"""
from itertools import product
import numpy as np
import pandas as pd
import pymc3 as pm
import theano.tensor as tt
import matplotlib.pyplot as plt
from os.path import exists
from matplotlib import rcParams
from pymc3.math import matrix_dot
from tabulate import tabulate
from theano.tensor.nlinalg import matrix_inverse
def bcfam(items, factors, paths, nu_sd=2.5, alpha_sd=2.5, d_beta=2.5):
r"""Constructs a Bayesian CFA model in "multivariate form".
Args:
items (np.array): Data.
factors (np.array): Factor design matrix.
paths (np.array): Paths design matrix.
nu_sd (:obj:`float`, optional): Standard deviation of normal prior on item
intercepts.
alpha_sd (:obj:`float`, optional): Standard deviation of normal prior on factor
intercepts.
d_beta (:obj:`float`, optional): Scale parameter of half-Cauchy prior on factor
standard deviation.
Returns:
None: Places model in context.
"""
# get numbers of cases, items, and factors
n, p = items.shape
p_, m = factors.shape
assert p == p_, "Mismatch between data and factor-loading matrices"
# priors on item intercepts
nu = pm.Normal(name=r"$\nu$", mu=0, sd=nu_sd, shape=p, testval=items.mean(axis=0))
# priors on factor intercepts
alpha = pm.Normal(name=r"$\alpha$", mu=0, sd=alpha_sd, shape=m, testval=np.zeros(m))
# priors on factor loadings
l = np.asarray(factors).sum()
lam = pm.Normal(name=r"$\lambda$", mu=0, sd=1, shape=l, testval=np.ones(l))
# loading matrix
Lambda = tt.zeros(factors.shape)
k = 0
for i, j in product(range(p), range(m)):
if factors[i, j] == 1:
Lambda = tt.inc_subtensor(Lambda[i, j], lam[k])
k += 1
pm.Deterministic(name=r"$\Lambda$", var=Lambda)
# item means
mu = nu + matrix_dot(Lambda, alpha)
# item residual covariance matrix
d = pm.HalfCauchy(
name=r"$\sqrt{\theta}$", beta=d_beta, shape=p, testval=items.std(axis=0)
)
Theta = tt.diag(d) ** 2
# factor covariance matrix
Psi = I = np.eye(m)
# priors on paths
g = np.asarray(paths).sum()
gam = pm.Normal(name=r"$\gamma$", mu=0, sd=1, shape=g, testval=np.ones(g))
# path matrix
Gamma = tt.zeros(paths.shape)
k = 0
for i, j in product(range(m), range(m)):
if paths[i, j] == 1:
Gamma = tt.inc_subtensor(Gamma[i, j], gam[k])
k += 1
pm.Deterministic(name=r"$\Gamma$", var=Gamma)
# item covariance matrix
Sigma = (
matrix_dot(
Lambda,
matrix_inverse(I - Gamma),
Psi,
matrix_inverse(I - Gamma.T),
Lambda.T,
)
+ Theta
)
# observations
pm.MvNormal(name="$Y$", mu=mu, cov=Sigma, observed=items, shape=items.shape)
def bcfau(items, factors, paths, nu_sd=2.5, alpha_sd=2.5, d_beta=2.5):
r"""Constructs a Bayesian CFA model in "univariate form" by directly estimating the
factors.
Args:
items (np.array): Data.
factors (np.array): Factor design matrix.
paths (np.array): Paths design matrix.
nu_sd (:obj:`float`, optional): Standard deviation of normal prior on item
intercepts.
alpha_sd (:obj:`float`, optional): Standard deviation of normal prior on factor
intercepts.
d_beta (:obj:`float`, optional): Scale parameter of half-Cauchy prior on factor
standard deviation.
Returns:
None: Places model in context.
"""
# get numbers of cases, items, and factors
n, p = items.shape
p_, m = factors.shape
assert p == p_, "Mismatch between data and factor-loading matrices"
# priors on item intercepts
nu = pm.Normal(name=r"$\nu$", mu=0, sd=nu_sd, shape=p, testval=items.mean(axis=0))
# priors on factor intercepts
alpha = pm.Normal(name=r"$\alpha$", mu=0, sd=alpha_sd, shape=m, testval=np.zeros(m))
# priors on factor loadings
l = np.asarray(factors).sum()
lam = pm.Normal(name=r"$\lambda$", mu=0, sd=1, shape=l, testval=np.ones(l))
# loading matrix
Lambda = tt.zeros(factors.shape)
k = 0
for i, j in product(range(p), range(m)):
if factors[i, j] == 1:
Lambda = tt.inc_subtensor(Lambda[i, j], lam[k])
k += 1
pm.Deterministic(name=r"$\Lambda$", var=Lambda)
# priors on paths
g = np.asarray(paths).sum()
gam = pm.Normal(name=r"$\gamma$", mu=0, sd=1, shape=g, testval=np.ones(g))
# path matrix
Gamma = tt.zeros(paths.shape)
k = 0
for i, j in product(range(m), range(m)):
if paths[i, j] == 1:
Gamma = tt.inc_subtensor(Gamma[i, j], gam[k])
k += 1
pm.Deterministic(name=r"$\Gamma$", var=Gamma)
# priors on factor residuals
zeta = pm.Normal(name=r"$\zeta$", mu=0, sigma=1, shape=(n, m), testval=0)
# latent variables
I = np.eye(m)
M = nu + matrix_dot(
matrix_dot((alpha + zeta), matrix_inverse(I - Gamma.T)), Lambda.T
)
# item residual standard deviations
S = pm.HalfCauchy(
name=r"$\sqrt{\theta}$", beta=d_beta, shape=p, testval=items.std(axis=0)
)
# observations
pm.Normal(name="$Y$", mu=M, sigma=S, observed=items, shape=items.shape)
def bcfab(items, factors, paths, nu_sd=2.5, alpha_sd=2.5):
r"""Constructs a Bayesian CFA model in "binomial form".
Args:
items (np.array): Data.
factors (np.array): Factor design matrix.
paths (np.array): Paths design matrix.
nu_sd (:obj:`float`, optional): Standard deviation of normal prior on item
intercepts.
alpha_sd (:obj:`float`, optional): Standard deviation of normal prior on factor
intercepts.
Returns:
None: Places model in context.
"""
# get numbers of cases, items, and factors
n, p = items.shape
p_, m = factors.shape
assert p == p_, "Mismatch between data and factor-loading matrices"
# priors on item intercepts
nu = pm.Normal(name=r"$\nu$", mu=0, sd=nu_sd, shape=p, testval=np.zeros(p))
# priors on factor intercepts
alpha = pm.Normal(name=r"$\alpha$", mu=0, sd=alpha_sd, shape=m, testval=np.zeros(m))
# priors on factor loadings
l = np.asarray(factors).sum()
lam = pm.Normal(name=r"$\lambda$", mu=0, sd=1, shape=l, testval=np.zeros(l))
# loading matrix
Lambda = tt.zeros(factors.shape)
k = 0
for i, j in product(range(p), range(m)):
if factors[i, j] == 1:
Lambda = tt.inc_subtensor(Lambda[i, j], lam[k])
k += 1
pm.Deterministic(name=r"$\Lambda$", var=Lambda)
# priors on paths
g = np.asarray(paths).sum()
gam = pm.Normal(name=r"$\gamma$", mu=0, sd=1, shape=g, testval=np.zeros(g))
# path matrix
Gamma = tt.zeros(paths.shape)
k = 0
for i, j in product(range(m), range(m)):
if paths[i, j] == 1:
Gamma = tt.inc_subtensor(Gamma[i, j], gam[k])
k += 1
pm.Deterministic(name=r"$\Gamma$", var=Gamma)
# priors on factor residuals
zeta = pm.Normal(name=r"$\zeta$", mu=0, sigma=1, shape=(n, m), testval=0)
# latent variables
I = np.eye(m)
Pi = pm.math.sigmoid(
nu
+ matrix_dot(matrix_dot((alpha + zeta), matrix_inverse(I - Gamma.T)), Lambda.T)
)
# observations
pm.Binomial(
name="$Y$", p=Pi, n=items.max(axis=0), observed=items, shape=items.shape
)
def main():
# load the data
df = pd.read_csv("../../assets/data/HS.csv", index_col=0)
# define items to keep
item_names = [
"visual",
"cubes",
"paper",
"flags",
"general",
"paragrap",
"sentence",
"wordc",
"wordm",
"addition",
"code",
"counting",
"straight",
"wordr",
"numberr",
"figurer",
"object",
"numberf",
"figurew",
]
# define the factor structure
factors = np.array(
[
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 1, 0],
]
)
# define paths
paths = np.array(
[
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0],
]
)
# iterate over the two schools
for school, sdf in df.groupby("school"):
# define the path to save results
f = f"../data/BSEM (long) examples/{school} (bin)"
# select the 19 commonly used variables
items = sdf[item_names]
# for numerical convenience, standardize the data
# items = (items - items.mean()) / items.std()
with pm.Model():
# construct the model
bcfab(items, factors, paths)
if not exists(f):
# sample and save
trace = pm.sample(5000, tune=1000, chains=1)
pm.save_trace(trace, f)
pm.traceplot(trace, compact=True)
rcParams["font.size"] = 14
plt.savefig(f"{f}/traceplot.png")
else:
trace = pm.load_trace(f)
# create a nice summary table
loadings = pd.DataFrame(
trace[r"$\Lambda$"].mean(axis=0).round(2)[:, :-1],
index=[v.title() for v in item_names],
columns=["Spatial", "Verbal", "Speed", "Memory"],
)
loadings.to_csv(f"{f}/loadings.csv")
print(tabulate(loadings, tablefmt="pipe", headers="keys"))
_paths = pd.DataFrame(
trace[r"$\Gamma$"].mean(axis=0).round(2)[:-1, -1],
index=["Spatial", "Verbal", "Speed", "Memory"],
columns=["g"],
)
_paths.to_csv(f"{f}/factor_paths.csv")
print(tabulate(_paths, tablefmt="pipe", headers="keys"))
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, ensure_slash, split_filename, get_project_data,
parse_requirement, parse_name_and_version, ServerProxy,
normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if 'digests' in info:
digests = info['digests']
for algo in ('sha256', 'md5'):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
pass # logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
# else:
# logger.debug('skipping pre-release '
# 'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.daemon = True
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, unicode_literals
# TODO: this is simply copied from PyMongo, for now - remove it once we've
# standardized on something for all driver tests
"""Tools for testing high availability in PyMongo."""
import os
import pprint
import random
import shutil
import signal
import socket
import subprocess
import sys
import time
import warnings
from stat import S_IRUSR
import pymongo
import pymongo.errors
from pymongo.read_preferences import ReadPreference
home = os.environ.get('HOME')
default_dbpath = os.path.join(home, 'data', 'motor_ha')
dbpath = os.environ.get('DBPATH', default_dbpath)
default_logpath = os.path.join(home, 'log', 'motor_ha')
logpath = os.environ.get('LOGPATH', default_logpath)
hostname = os.environ.get('HOSTNAME', 'localhost')
port = int(os.environ.get('DBPORT', 27017))
mongod = os.environ.get('MONGOD', 'mongod')
mongos = os.environ.get('MONGOS', 'mongos')
set_name = os.environ.get('SETNAME', 'repl0')
use_greenlets = bool(os.environ.get('GREENLETS'))
ha_tools_debug = bool(os.environ.get('HA_TOOLS_DEBUG'))
tornado_warnings = bool(os.environ.get('TORNADO_WARNINGS'))
config_dbs = {}
nodes = {}
routers = {}
cur_port = port
key_file = None
try:
from subprocess import DEVNULL # Python 3.
except ImportError:
DEVNULL = open(os.devnull, 'wb')
def kill_members(members, sig, hosts=nodes):
for member in sorted(members):
try:
if ha_tools_debug:
print('killing %s' % member)
proc = hosts[member]['proc']
# Not sure if cygwin makes sense here...
if sys.platform in ('win32', 'cygwin'):
os.kill(proc.pid, signal.CTRL_C_EVENT)
else:
os.kill(proc.pid, sig)
except OSError:
if ha_tools_debug:
print('%s already dead?' % member)
def kill_all_members():
kill_members(nodes.keys(), 2, nodes)
kill_members(routers.keys(), 2, routers)
kill_members(config_dbs.keys(), 2, config_dbs)
def wait_for(proc, port_num):
trys = 0
while proc.poll() is None and trys < 160:
trys += 1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect((hostname, port_num))
return True
except (IOError, socket.error):
time.sleep(0.25)
finally:
s.close()
kill_all_members()
return False
def start_subprocess(cmd):
"""Run cmd (a list of strings) and return a Popen instance."""
return subprocess.Popen(cmd, stdout=DEVNULL, stderr=DEVNULL)
def start_replica_set(members, auth=False, fresh=True):
global cur_port, key_file
if fresh:
if os.path.exists(dbpath):
try:
shutil.rmtree(dbpath)
except OSError:
pass
try:
os.makedirs(dbpath)
except OSError as e:
print(e)
print('\tWhile creating %s' % dbpath)
if auth:
key_file = os.path.join(dbpath, 'key.txt')
if not os.path.exists(key_file):
f = open(key_file, 'wb')
try:
f.write(b'my super secret system password')
finally:
f.close()
os.chmod(key_file, S_IRUSR)
for i in range(len(members)):
host = '%s:%d' % (hostname, cur_port)
members[i].update({'_id': i, 'host': host})
path = os.path.join(dbpath, 'db' + str(i))
if not os.path.exists(path):
os.makedirs(path)
member_logpath = os.path.join(logpath, 'db' + str(i) + '.log')
if not os.path.exists(os.path.dirname(member_logpath)):
os.makedirs(os.path.dirname(member_logpath))
cmd = [mongod,
'--dbpath', path,
'--port', str(cur_port),
'--replSet', set_name,
'--nojournal', '--oplogSize', '64',
'--logappend', '--logpath', member_logpath]
if auth:
cmd += ['--keyFile', key_file]
if ha_tools_debug:
print('starting %s' % ' '.join(cmd))
proc = start_subprocess(cmd)
nodes[host] = {'proc': proc, 'cmd': cmd}
assert wait_for(proc, cur_port)
cur_port += 1
config = {'_id': set_name, 'members': members}
primary = members[0]['host']
c = pymongo.MongoClient(primary, use_greenlets=use_greenlets)
try:
if ha_tools_debug:
pprint.pprint({'replSetInitiate': config})
c.admin.command('replSetInitiate', config)
except pymongo.errors.OperationFailure as e:
# Already initialized from a previous run?
if ha_tools_debug:
print(e)
expected_arbiters = 0
for member in members:
if member.get('arbiterOnly'):
expected_arbiters += 1
expected_secondaries = len(members) - expected_arbiters - 1
# Wait for 4 minutes for replica set to come up.
patience = 4
for i in range(int(patience * 60 / 1)):
time.sleep(1)
try:
if (get_primary() and
len(get_secondaries()) == expected_secondaries and
len(get_arbiters()) == expected_arbiters):
break
except pymongo.errors.ConnectionFailure:
# Keep waiting
pass
if ha_tools_debug:
print('waiting for RS %s' % i)
else:
kill_all_members()
raise Exception(
"Replica set still not initalized after %s minutes" % patience)
return primary, set_name
def create_sharded_cluster(num_routers=3):
global cur_port
# Start three config servers
configdb_hosts = []
for i in range(3):
configdb_host = '%s:%d' % (hostname, cur_port)
path = os.path.join(dbpath, 'configdb' + str(i))
if not os.path.exists(path):
os.makedirs(path)
configdb_logpath = os.path.join(logpath, 'configdb' + str(i) + '.log')
if not os.path.exists(os.path.dirname(configdb_logpath)):
os.makedirs(os.path.dirname(configdb_logpath))
cmd = [mongod,
'--dbpath', path,
'--port', str(cur_port),
'--nojournal', '--logappend',
'--logpath', configdb_logpath]
if ha_tools_debug:
print('starting %s' % ' '.join(cmd))
proc = start_subprocess(cmd)
config_dbs[configdb_host] = {'proc': proc, 'cmd': cmd}
assert wait_for(proc, cur_port)
configdb_hosts.append(configdb_host)
cur_port += 1
# ...and a shard server
shard_host = '%s:%d' % (hostname, cur_port)
path = os.path.join(dbpath, 'shard1')
if not os.path.exists(path):
os.makedirs(path)
db_logpath = os.path.join(logpath, 'shard1.log')
cmd = [mongod,
'--dbpath', path,
'--port', str(cur_port),
'--nojournal', '--logappend',
'--logpath', db_logpath]
if ha_tools_debug:
print('starting %s' % ' '.join(cmd))
proc = start_subprocess(cmd)
nodes[shard_host] = {'proc': proc, 'cmd': cmd}
assert wait_for(proc, cur_port)
# ...and a few mongos instances
cur_port += 1
for i in range(num_routers):
cur_port += 1
host = '%s:%d' % (hostname, cur_port)
mongos_logpath = os.path.join(logpath, 'mongos' + str(i) + '.log')
cmd = [mongos,
'--port', str(cur_port),
'--logappend',
'--logpath', mongos_logpath,
'--configdb', ','.join(configdb_hosts)]
if ha_tools_debug:
print('starting %s' % ' '.join(cmd))
proc = start_subprocess(cmd)
routers[host] = {'proc': proc, 'cmd': cmd}
assert wait_for(proc, cur_port)
# Add the shard
client = pymongo.MongoClient('%s:%d' % (hostname, cur_port))
try:
client.admin.command({'addshard': shard_host})
except pymongo.errors.OperationFailure:
# Already configured.
pass
return get_mongos_seed_list()
# Connect to a random member
def get_client():
return pymongo.MongoClient(
nodes.keys(),
read_preference=ReadPreference.PRIMARY_PREFERRED,
use_greenlets=use_greenlets)
def get_mongos_seed_list():
members = routers.keys()
return ','.join(members)
def kill_mongos(host):
kill_members([host], 2, hosts=routers)
return host
def restart_mongos(host):
restart_members([host], True)
def get_members_in_state(state):
with warnings.catch_warnings():
# Suppress "replsetgetstatus does not support PRIMARY_PREFERRED".
warnings.simplefilter("ignore", UserWarning)
status = get_client().admin.command('replSetGetStatus')
members = status['members']
return [k['name'] for k in members if k['state'] == state]
def get_primary():
try:
primaries = get_members_in_state(1)
assert len(primaries) <= 1
if primaries:
return primaries[0]
except pymongo.errors.ConnectionFailure:
pass
return None
def get_random_secondary():
secondaries = get_members_in_state(2)
if len(secondaries):
return random.choice(secondaries)
return None
def get_secondaries():
return get_members_in_state(2)
def get_arbiters():
return get_members_in_state(7)
def get_recovering():
return get_members_in_state(3)
def get_passives():
return get_client().admin.command('ismaster').get('passives', [])
def get_hosts():
return get_client().admin.command('ismaster').get('hosts', [])
def get_hidden_members():
# Both 'hidden' and 'slaveDelay'
secondaries = get_secondaries()
readers = get_hosts() + get_passives()
for member in readers:
try:
secondaries.remove(member)
except KeyError:
# Skip primary
pass
return secondaries
def get_tags(member):
config = get_client().local.system.replset.find_one()
for m in config['members']:
if m['host'] == member:
return m.get('tags', {})
raise Exception('member %s not in config' % repr(member))
def kill_primary(sig=2):
primary = get_primary()
kill_members([primary], sig)
return primary
def kill_secondary(sig=2):
secondary = get_random_secondary()
kill_members([secondary], sig)
return secondary
def kill_all_secondaries(sig=2):
secondaries = get_secondaries()
kill_members(secondaries, sig)
return secondaries
# TODO: refactor w/ start_replica_set
def add_member(auth=False):
global cur_port
host = '%s:%d' % (hostname, cur_port)
primary = get_primary()
c = pymongo.MongoClient(primary, use_greenlets=use_greenlets)
config = c.local.system.replset.find_one()
_id = max([member['_id'] for member in config['members']]) + 1
member = {'_id': _id, 'host': host}
path = os.path.join(dbpath, 'db' + str(_id))
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
member_logpath = os.path.join(logpath, 'db' + str(_id) + '.log')
if not os.path.exists(os.path.dirname(member_logpath)):
os.makedirs(os.path.dirname(member_logpath))
cmd = [mongod,
'--dbpath', path,
'--port', str(cur_port),
'--replSet', set_name,
'--nojournal', '--oplogSize', '64',
'--logappend', '--logpath', member_logpath]
if auth:
cmd += ['--keyFile', key_file]
if ha_tools_debug:
print('starting %s' % ' '.join(cmd))
proc = start_subprocess(cmd)
nodes[host] = {'proc': proc, 'cmd': cmd}
assert wait_for(proc, cur_port)
cur_port += 1
config['members'].append(member)
config['version'] += 1
if ha_tools_debug:
print({'replSetReconfig': config})
response = c.admin.command({'replSetReconfig': config})
if ha_tools_debug:
print(response)
return host
def stepdown_primary():
primary = get_primary()
if primary:
if ha_tools_debug:
print('stepping down primary: %s' % primary)
c = pymongo.MongoClient(primary, use_greenlets=use_greenlets)
for _ in range(20):
try:
c.admin.command('replSetStepDown', 20)
except pymongo.errors.ConnectionFailure:
# Expected, mongod to close all connections.
if ha_tools_debug:
print('\tcalled replSetStepDown')
return
except Exception as e:
if ha_tools_debug:
# Likely "No electable secondaries caught up", keep trying.
print('Exception from replSetStepDown: %s' % e)
time.sleep(2)
elif ha_tools_debug:
print('stepdown_primary() found no primary')
def set_maintenance(member, value):
"""Put a member into RECOVERING state if value is True, else normal state.
"""
c = pymongo.MongoClient(member, use_greenlets=use_greenlets)
c.admin.command('replSetMaintenance', value)
start = time.time()
while value != (member in get_recovering()):
assert (time.time() - start) <= 10, (
"Member %s never switched state" % member)
time.sleep(0.25)
def restart_members(members, router=False, configdb=False):
restarted = []
if router:
servers = routers
elif configdb:
servers = config_dbs
else:
servers = nodes
for member in members:
cmd = servers[member]['cmd']
proc = start_subprocess(cmd)
servers[member]['proc'] = proc
assert wait_for(proc, int(member.split(':')[1]))
return restarted
|
|
from __future__ import unicode_literals
from future.builtins import super
from datetime import timedelta
from django.contrib.auth.models import User
from django.contrib.messages import info, error
from django.shortcuts import get_object_or_404, redirect
from django.utils.timezone import now
from django.views.generic import ListView, CreateView, DetailView, TemplateView
from mezzanine.accounts import get_profile_model
from mezzanine.conf import settings
from mezzanine.generic.models import ThreadedComment, Keyword
from mezzanine.utils.views import paginate
from drum.links.forms import LinkForm
from drum.links.models import Link
from drum.links.utils import order_by_score
# Returns the name to be used for reverse profile lookups from the user
# object. That's "profile" for the ``drum.links.Profile``, but otherwise
# depends on the model specified in ``AUTH_PROFILE_MODULE``.
USER_PROFILE_RELATED_NAME = get_profile_model().user.field.related_query_name()
class UserFilterView(ListView):
"""
List view that puts a ``profile_user`` variable into the context,
which is optionally retrieved by a ``username`` urlpattern var.
If a user is loaded, ``object_list`` is filtered by the loaded
user. Used for showing lists of links and comments.
"""
def get_context_data(self, **kwargs):
context = super(UserFilterView, self).get_context_data(**kwargs)
try:
username = self.kwargs["username"]
except KeyError:
profile_user = None
else:
users = User.objects.select_related(USER_PROFILE_RELATED_NAME)
lookup = {"username__iexact": username, "is_active": True}
profile_user = get_object_or_404(users, **lookup)
qs = context["object_list"].filter(user=profile_user)
context["object_list"] = qs
# Update context_object_name variable
context_object_name = self.get_context_object_name(context["object_list"])
context[context_object_name] = context["object_list"]
context["profile_user"] = profile_user
context["no_data"] = ("Whoa, there's like, literally no data here, "
"like seriously, I totally got nothin.")
return context
class ScoreOrderingView(UserFilterView):
"""
List view that optionally orders ``object_list`` by calculated
score. Subclasses must defined a ``date_field`` attribute for the
related model, that's used to determine time-scaled scoring.
Ordering by score is the default behaviour, but can be
overridden by passing ``False`` to the ``by_score`` arg in
urlpatterns, in which case ``object_list`` is sorted by most
recent, using the ``date_field`` attribute. Used for showing lists
of links and comments.
"""
def get_context_data(self, **kwargs):
context = super(ScoreOrderingView, self).get_context_data(**kwargs)
qs = context["object_list"]
context["by_score"] = self.kwargs.get("by_score", True)
if context["by_score"]:
qs = order_by_score(qs, self.score_fields, self.date_field)
else:
qs = qs.order_by("-" + self.date_field)
context["object_list"] = paginate(qs, self.request.GET.get("page", 1),
settings.ITEMS_PER_PAGE, settings.MAX_PAGING_LINKS)
# Update context_object_name variable
context_object_name = self.get_context_object_name(context["object_list"])
context[context_object_name] = context["object_list"]
context["title"] = self.get_title(context)
return context
class LinkView(object):
"""
List and detail view mixin for links - just defines the correct
queryset.
"""
def get_queryset(self):
return Link.objects.published().select_related(
"user",
"user__%s" % USER_PROFILE_RELATED_NAME
)
class LinkList(LinkView, ScoreOrderingView):
"""
List view for links, which can be for all users (homepage) or
a single user (links from user's profile page). Links can be
order by score (homepage, profile links) or by most recently
created ("newest" main nav item).
"""
date_field = "publish_date"
score_fields = ["rating_sum", "comments_count"]
def get_queryset(self):
queryset = super(LinkList, self).get_queryset()
tag = self.kwargs.get("tag")
if tag:
queryset = queryset.filter(keywords__keyword__slug=tag)
return queryset.prefetch_related("keywords__keyword")
def get_title(self, context):
tag = self.kwargs.get("tag")
if tag:
return get_object_or_404(Keyword, slug=tag).title
if context["by_score"]:
return "" # Homepage
if context["profile_user"]:
return "Links by %s" % getattr(
context["profile_user"],
USER_PROFILE_RELATED_NAME
)
else:
return "Newest"
class LinkCreate(CreateView):
"""
Link creation view - assigns the user to the new link, as well
as setting Mezzanine's ``gen_description`` attribute to ``False``,
so that we can provide our own descriptions.
"""
form_class = LinkForm
model = Link
def form_valid(self, form):
hours = getattr(settings, "ALLOWED_DUPLICATE_LINK_HOURS", None)
if hours and form.instance.link:
lookup = {
"link": form.instance.link,
"publish_date__gt": now() - timedelta(hours=hours),
}
try:
link = Link.objects.get(**lookup)
except Link.DoesNotExist:
pass
else:
error(self.request, "Link exists")
return redirect(link)
form.instance.user = self.request.user
form.instance.gen_description = False
info(self.request, "Link created")
return super(LinkCreate, self).form_valid(form)
class LinkDetail(LinkView, DetailView):
"""
Link detail view - threaded comments and rating are implemented
in its template.
"""
pass
class CommentList(ScoreOrderingView):
"""
List view for comments, which can be for all users ("comments" and
"best" main nav items) or a single user (comments from user's
profile page). Comments can be order by score ("best" main nav item)
or by most recently created ("comments" main nav item, profile
comments).
"""
date_field = "submit_date"
score_fields = ["rating_sum"]
def get_queryset(self):
qs = ThreadedComment.objects.filter(is_removed=False, is_public=True)
select = ["user", "user__%s" % (USER_PROFILE_RELATED_NAME)]
prefetch = ["content_object"]
return qs.select_related(*select).prefetch_related(*prefetch)
def get_title(self, context):
if context["profile_user"]:
return "Comments by %s" % getattr(
context["profile_user"],
USER_PROFILE_RELATED_NAME
)
elif context["by_score"]:
return "Best comments"
else:
return "Latest comments"
class TagList(TemplateView):
template_name = "links/tag_list.html"
|
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import os
import sys
import json
import uuid
import tempfile
import logging as logbase
from st2common import log as logging
from st2common.logging.formatters import ConsoleLogFormatter
from st2common.logging.formatters import GelfLogFormatter
import st2tests.config as tests_config
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
RESOURCES_DIR = os.path.abspath(os.path.join(CURRENT_DIR, '../resources'))
CONFIG_FILE_PATH = os.path.join(RESOURCES_DIR, 'logging.conf')
class MockRecord(object):
levelno = 40
msg = None
exc_info = None
exc_text = None
def getMessage(self):
return self.msg
class LoggerTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def setUp(self):
super(LoggerTestCase, self).setUp()
self.config_text = open(CONFIG_FILE_PATH).read()
self.cfg_fd, self.cfg_path = tempfile.mkstemp()
self.info_log_fd, self.info_log_path = tempfile.mkstemp()
self.audit_log_fd, self.audit_log_path = tempfile.mkstemp()
with open(self.cfg_path, 'a') as f:
f.write(self.config_text.format(self.info_log_path,
self.audit_log_path))
def tearDown(self):
self._remove_tempfile(self.cfg_fd, self.cfg_path)
self._remove_tempfile(self.info_log_fd, self.info_log_path)
self._remove_tempfile(self.audit_log_fd, self.audit_log_path)
super(LoggerTestCase, self).tearDown()
def _remove_tempfile(self, fd, path):
os.close(fd)
os.unlink(path)
def test_logger_setup_failure(self):
config_file = '/tmp/abc123'
self.assertFalse(os.path.exists(config_file))
self.assertRaises(Exception, logging.setup, config_file)
def test_logger_set_level(self):
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
self.assertEqual(log.getEffectiveLevel(), logbase.DEBUG)
log.setLevel(logbase.INFO)
self.assertEqual(log.getEffectiveLevel(), logbase.INFO)
log.setLevel(logbase.WARN)
self.assertEqual(log.getEffectiveLevel(), logbase.WARN)
log.setLevel(logbase.ERROR)
self.assertEqual(log.getEffectiveLevel(), logbase.ERROR)
log.setLevel(logbase.CRITICAL)
self.assertEqual(log.getEffectiveLevel(), logbase.CRITICAL)
log.setLevel(logbase.AUDIT)
self.assertEqual(log.getEffectiveLevel(), logbase.AUDIT)
def test_log_info(self):
"""Test that INFO log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.info(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_critical(self):
"""Test that CRITICAL log entry does not go to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.critical(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertNotIn(msg, audit_log_entries)
def test_log_audit(self):
"""Test that AUDIT log entry goes to the audit log."""
logging.setup(self.cfg_path)
log = logging.getLogger(__name__)
msg = uuid.uuid4().hex
log.audit(msg)
info_log_entries = open(self.info_log_path).read()
self.assertIn(msg, info_log_entries)
audit_log_entries = open(self.audit_log_path).read()
self.assertIn(msg, audit_log_entries)
class ConsoleLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = ConsoleLogFormatter()
# No extra attributes
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
self.assertEqual(message, mock_message)
# Some extra attributes
mock_message = 'test message 2'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = 'bar'
record.ignored = 'foo' # this one is ignored since it doesnt have a prefix
message = formatter.format(record=record)
expected = 'test message 2 (value=\'bar\',user_id=1)'
self.assertEqual(message, expected)
class GelfLogFormatterTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_format(self):
formatter = GelfLogFormatter()
expected_keys = ['version', 'host', 'short_message', 'full_message',
'timestamp', 'level']
# No extra attributes
mock_message = 'test message 1'
record = MockRecord()
record.msg = mock_message
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertEqual(parsed['full_message'], mock_message)
# Some extra attributes
mock_message = 'test message 2'
record = MockRecord()
record.msg = mock_message
# Add "extra" attributes
record._user_id = 1
record._value = 'bar'
record.ignored = 'foo' # this one is ignored since it doesnt have a prefix
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertEqual(parsed['full_message'], mock_message)
self.assertEqual(parsed['_user_id'], 1)
self.assertEqual(parsed['_value'], 'bar')
self.assertTrue('ignored' not in parsed)
# Record with an exception
mock_exception = Exception('mock exception bar')
try:
raise mock_exception
except Exception:
mock_exc_info = sys.exc_info()
# Some extra attributes
mock_message = 'test message 3'
record = MockRecord()
record.msg = mock_message
record.exc_info = mock_exc_info
message = formatter.format(record=record)
parsed = json.loads(message)
for key in expected_keys:
self.assertTrue(key in parsed)
self.assertEqual(parsed['short_message'], mock_message)
self.assertTrue(mock_message in parsed['full_message'])
self.assertTrue('Traceback' in parsed['full_message'])
self.assertTrue('_exception' in parsed)
self.assertTrue('_traceback' in parsed)
def test_extra_object_serialization(self):
class MyClass1(object):
def __repr__(self):
return 'repr'
class MyClass2(object):
def to_dict(self):
return 'to_dict'
class MyClass3(object):
def to_serializable_dict(self):
return 'to_serializable_dict'
formatter = GelfLogFormatter()
record = MockRecord()
record.msg = 'message'
record._obj1 = MyClass1()
record._obj2 = MyClass2()
record._obj3 = MyClass3()
message = formatter.format(record=record)
parsed = json.loads(message)
self.assertEqual(parsed['_obj1'], 'repr')
self.assertEqual(parsed['_obj2'], 'to_dict')
self.assertEqual(parsed['_obj3'], 'to_serializable_dict')
|
|
from sqlalchemy import (
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
Float,
String,
func,
or_,
)
from sqlalchemy.orm import relationship, backref
import logging
log = logging.getLogger(__name__)
from ..app import db
from .with_offsets import WithOffsets
class Place(db.Model):
"""
A geographical place in South Africa.
The table is pre-populated using the subplace information
from the 2011 Census.
There is a place heirarchy:
Nation -> Province -> District -> Municipality -> Mainplace -> Subplace
We don't store District information because it's rarely used.
Each place has a lat/long pair associated with it. This is the centroid
of the place, all places are actually geographical areas.
The table is not well normalised because we don't really need normalisation
information at this point. For example, a municipality will have province
and muni information, but not mainplace or subplace, which will be null.
"""
__tablename__ = "places"
id = Column(Integer, primary_key=True)
# eg: province, municipality, mainplace, or subplace
level = Column(String(15), index=True)
# province name and code
province_name = Column(String(20), index=True)
province_code = Column(String(5), index=True)
# municipality
municipality_name = Column(String(50), index=True)
municipality_code = Column(String(10), index=True)
# mainplace
mainplace_name = Column(String(50), index=True)
mainplace_code = Column(String(10), index=True)
# subplace
subplace_name = Column(String(50), index=True)
subplace_code = Column(String(10), index=True, unique=True)
lat = Column(String(10))
lng = Column(String(10))
@property
def full_name(self):
parents = [self.subplace_name, self.mainplace_name, self.municipality_name, self.province_name]
return ', '.join(x for x in parents if x)
@property
def name(self):
return getattr(self, '%s_name' % self.level)
@property
def code(self):
return getattr(self, '%s_code' % self.level)
@property
def geo_id(self):
return '%s-%s' % (self.level, self.code)
@property
def geo_type(self):
""" What type of geo is this, a point or an id? """
if self.lat and self.lng:
return "point"
else:
return "region"
@property
def geo_data(self):
""" Data for this place. If it's a point, a lat,lng string.
Otherwise a level-id string."""
if self.lat and self.lng:
return '%s, %s' % (self.lat, self.lng)
else:
return '%s-%s' % (self.level, self.code)
def as_dict(self):
d = {
'type': self.geo_type,
'id': self.geo_id,
'level': self.level,
'code': self.code,
'full_name': self.full_name,
'name': self.name,
}
if d['type'] == 'point':
d['coordinates'] = [self.lat, self.lng]
return d
def __repr__(self):
return "<Place level=%s, province=%s, muni=%s, mainplace='%s', subplace='%s'>" % (
self.level, self.province_code, self.municipality_code,
self.mainplace_name, self.subplace_name)
@classmethod
def find(cls, term):
"""
See if we have a place that matches this name.
"""
if term in PLACE_STOPWORDS:
return
p = Place.query\
.filter(Place.level == 'province')\
.filter(Place.province_name == term).first()
if p:
return p
p = Place.query\
.filter(Place.level == 'municipality')\
.filter(or_(
Place.municipality_name == term,
Place.municipality_name == 'City of %s' % term)).first()
if p:
return p
p = Place.query\
.filter(Place.level == 'mainplace')\
.filter(or_(
Place.mainplace_name == term,
Place.mainplace_name == '%s MP' % term)).first()
if p:
return p
# subplaces are almost always wrong, since they have names like 'Paris' and 'Zuma'
#p = Place.query\
# .filter(Place.level == 'subplace')\
# .filter(or_(
# Place.subplace_name == term,
# Place.subplace_name == '%s SP' % term)).first()
#if p:
# return p
return None
class DocumentPlace(db.Model, WithOffsets):
"""
Place in an article.
"""
__tablename__ = "document_places"
id = Column(Integer, primary_key=True)
doc_id = Column(Integer, ForeignKey('documents.id', ondelete='CASCADE'), index=True, nullable=False)
place_id = Column(Integer, ForeignKey('places.id'), index=True, nullable=False)
relevance = Column(Float, index=True, nullable=True)
relevant = Column(Boolean, index=True, default=False)
# offsets in the document, a space-separated list of offset:length pairs.
offset_list = Column(String(1024))
created_at = Column(DateTime(timezone=True), index=True, unique=False, nullable=False, server_default=func.now())
updated_at = Column(DateTime(timezone=True), server_default=func.now(), onupdate=func.current_timestamp())
# Associations
place = relationship('Place', lazy=False)
def as_dict(self):
p = self.place.as_dict()
p['relevance'] = self.relevance
return p
def __repr__(self):
return "<DocumentPlace id=%s, place=%s, relevance=%s, doc=%s>" % (self.id, self.place, self.relevance, self.document)
@classmethod
def summary_for_docs(cls, docs):
"""
Generate a summary description for places in these docs for plotting on maps.
"""
mentions = {}
origins = {}
count = 0
for d in docs:
# TODO: origin
places = d.get_places()
if places:
count += 1
for dp in places:
geo_id = dp.place.geo_id
if not geo_id in mentions:
mentions[geo_id] = dp.place.as_dict()
mentions[geo_id]['documents'] = []
mentions[geo_id]['documents'].append(d.id)
return {
'document_count': count,
'mentions': mentions.values(),
'origins': origins,
}
# Places we know aren't in SA, but sometimes match something in our DB
PLACE_STOPWORDS = set(x.strip() for x in """
London
New York
Afghanistan
Akrotiri
Albania
Algeria
American Samoa
Andorra
Angola
Anguilla
Antarctica
Antigua and Barbuda
Argentina
Armenia
Aruba
Ashmore and Cartier Islands
Australia
Austria
Azerbaijan
Bahamas
Bahrain
Bangladesh
Barbados
Bassas da India
Belarus
Belgium
Belize
Benin
Bermuda
Bhutan
Bolivia
Bosnia and Herzegovina
Botswana
Bouvet Island
Brazil
British Indian Ocean Territory
British Virgin Islands
Brunei
Bulgaria
Burkina Faso
Burma
Burundi
Cambodia
Cameroon
Canada
Cape Verde
Cayman Islands
Central African Republic
Chad
Chile
China
Christmas Island
Clipperton Island
Cocos (Keeling) Islands
Colombia
Comoros
Congo
Congo
Cook Islands
Coral Sea Islands
Costa Rica
Cote d'Ivoire
Croatia
Cuba
Cyprus
Czech Republic
Denmark
Dhekelia
Djibouti
Dominica
Dominican Republic
Ecuador
Egypt
El Salvador
Equatorial Guinea
Eritrea
Estonia
Ethiopia
Europa Island
Falkland Islands (Islas Malvinas)
Faroe Islands
Fiji
Finland
France
French Guiana
French Polynesia
French Southern and Antarctic Lands
Gabon
Gambia
Gaza Strip
Georgia
Germany
Ghana
Gibraltar
Glorioso Islands
Greece
Greenland
Grenada
Guadeloupe
Guam
Guatemala
Guernsey
Guinea
Guinea-Bissau
Guyana
Haiti
Heard Island and McDonald Islands
Holy See (Vatican City)
Honduras
Hong Kong
Hungary
Iceland
India
Indonesia
Iran
Iraq
Ireland
Isle of Man
Israel
Italy
Jamaica
Jan Mayen
Japan
Jersey
Jordan
Juan de Nova Island
Kazakhstan
Kenya
Kiribati
North Korea
South Korea
Kuwait
Kyrgyzstan
Laos
Latvia
Lebanon
Lesotho
Liberia
Libya
Liechtenstein
Lithuania
Luxembourg
Macau
Macedonia
Madagascar
Malawi
Malaysia
Maldives
Mali
Malta
Marshall Islands
Martinique
Mauritania
Mauritius
Mayotte
Mexico
Micronesia
Moldova
Monaco
Mongolia
Montserrat
Morocco
Mozambique
Namibia
Nauru
Navassa Island
Nepal
Netherlands
Netherlands Antilles
New Caledonia
New Zealand
Nicaragua
Niger
Nigeria
Niue
Norfolk Island
Northern Mariana Islands
Norway
Oman
Pakistan
Palau
Panama
Papua New Guinea
Paracel Islands
Paraguay
Peru
Philippines
Pitcairn Islands
Poland
Portugal
Puerto Rico
Qatar
Reunion
Romania
Russia
Rwanda
Saint Helena
Saint Kitts and Nevis
Saint Lucia
Saint Pierre and Miquelon
Saint Vincent and the Grenadines
Samoa
San Marino
Sao Tome and Principe
Saudi Arabia
Senegal
Serbia and Montenegro
Seychelles
Sierra Leone
Singapore
Slovakia
Slovenia
Solomon Islands
Somalia
South Africa
South Georgia and the South Sandwich Islands
Spain
Spratly Islands
Sri Lanka
Sudan
Suriname
Svalbard
Swaziland
Sweden
Switzerland
Syria
Taiwan
Tajikistan
Tanzania
Thailand
Timor-Leste
Togo
Tokelau
Tonga
Trinidad and Tobago
Tromelin Island
Tunisia
Turkey
Turkmenistan
Turks and Caicos Islands
Tuvalu
Uganda
Ukraine
United Arab Emirates
United Kingdom
United States
Uruguay
Uzbekistan
Vanuatu
Venezuela
Vietnam
Virgin Islands
Wake Island
Wallis and Futuna
West Bank
Western Sahara
Yemen
Zambia
Zimbabwe
""".strip().split("\n"))
|
|
"""
Display stats about running Storm topologies.
"""
from __future__ import absolute_import, print_function
import sys
from itertools import chain
from pkg_resources import parse_version
from prettytable import PrettyTable
from six import iteritems
from six.moves import map, zip
from .common import add_environment, add_name
from ..util import (get_env_config, get_ui_json, get_ui_jsons,
storm_lib_version)
def display_stats(env_name, topology_name=None, component_name=None,
all_components=None):
env_name = env_name
if topology_name and all_components:
_print_all_components(env_name, topology_name)
elif topology_name and component_name:
_print_component_status(env_name, topology_name, component_name)
elif topology_name:
_print_topology_status(env_name, topology_name)
else:
_print_cluster_status(env_name)
def _print_cluster_status(env_name):
jsons = get_ui_jsons(env_name, ["/api/v1/cluster/summary",
"/api/v1/topology/summary",
"/api/v1/supervisor/summary"])
# Print Cluster Summary
ui_cluster_summary = jsons["/api/v1/cluster/summary"]
columns = ['stormVersion', 'nimbusUptime', 'supervisors', 'slotsTotal',
'slotsUsed', 'slotsFree', 'executorsTotal', 'tasksTotal']
_print_stats_dict("Cluster summary", ui_cluster_summary, columns, 'r')
# Print Topologies Summary
ui_topologies_summary = jsons["/api/v1/topology/summary"]
columns = ['name', 'id', 'status', 'uptime', 'workersTotal',
'executorsTotal', 'tasksTotal']
_print_stats_dict("Topology summary", ui_topologies_summary['topologies'],
columns, 'r')
# Print Supervisor Summary
ui_supervisor_summary = jsons["/api/v1/supervisor/summary"]
columns = ['id', 'host', 'uptime', 'slotsTotal', 'slotsUsed']
_print_stats_dict("Supervisor summary",
ui_supervisor_summary['supervisors'], columns, 'r',
{'host': 'l', 'uptime': 'l'})
def _get_topology_ui_detail(env_name, topology_name):
env_name = get_env_config(env_name)[0]
topology_id = _get_topology_id(env_name, topology_name)
detail_url = '/api/v1/topology/%s' % topology_id
detail = get_ui_json(env_name, detail_url)
return detail
def _print_topology_status(env_name, topology_name):
ui_detail = _get_topology_ui_detail(env_name, topology_name)
# Print topology summary
columns = ['name', 'id', 'status', 'uptime', 'workersTotal',
'executorsTotal', 'tasksTotal']
_print_stats_dict("Topology summary", ui_detail, columns, 'r')
# Print topology stats
columns = ['windowPretty', 'emitted', 'transferred', 'completeLatency',
'acked', 'failed']
_print_stats_dict("Topology stats", ui_detail['topologyStats'], columns,
'r')
# Print spouts
if ui_detail.get('spouts'):
columns = ['spoutId', 'emitted', 'transferred', 'completeLatency',
'acked', 'failed']
_print_stats_dict("Spouts (All time)", ui_detail['spouts'], columns,
'r', {'spoutId': 'l'})
columns = ['boltId', 'executors', 'tasks', 'emitted', 'transferred',
'capacity', 'executeLatency', 'executed', 'processLatency',
'acked', 'failed', 'lastError']
_print_stats_dict("Bolt (All time)", ui_detail['bolts'], columns, 'r',
{'boltId': 'l'})
def _get_component_ui_detail(env_name, topology_name, component_names):
if isinstance(component_names, basestring):
component_names = [component_names]
env_name = get_env_config(env_name)[0]
topology_id = _get_topology_id(env_name, topology_name)
base_url = '/api/v1/topology/%s/component/%s'
detail_urls = [base_url % (topology_id, name) for name in component_names]
detail = get_ui_jsons(env_name, detail_urls)
if len(detail) == 1:
return detail.values()[0]
else:
return detail
def _print_all_components(env_name, topology_name):
topology_ui_detail = _get_topology_ui_detail(env_name, topology_name)
spouts = map(lambda spout: (spout['spoutId'],
topology_ui_detail.get('spouts', {})))
bolts = map(lambda spout: (spout['boltId'],
topology_ui_detail.get('bolts', {})))
ui_details = _get_component_ui_detail(env_name, topology_name, chain(spouts,
bolts))
names_and_keys = zip(map(lambda ui_detail: (ui_detail['name'],
ui_details.values())),
ui_details.keys())
for component_name, key in names_and_keys:
_print_component_status(env_name, topology_name,
component_name, ui_details[key])
def _print_component_status(env_name, topology_name, component_name,
ui_detail=None):
if not ui_detail:
ui_detail = _get_component_ui_detail(env_name, topology_name,
component_name)
_print_component_summary(ui_detail)
if ui_detail.get("componentType") == "spout":
_print_spout_stats(ui_detail)
_print_spout_output_stats(ui_detail)
_print_spout_executors(ui_detail)
elif ui_detail.get("componentType") == "bolt":
_print_bolt_stats(ui_detail)
_print_input_stats(ui_detail)
_print_bolt_output_stats(ui_detail)
def _print_component_summary(ui_detail):
columns = ['id', 'name', 'executors', 'tasks']
_print_stats_dict("Component summary", ui_detail, columns, 'r')
def _print_bolt_stats(ui_detail):
columns = ['windowPretty', 'emitted', 'transferred', 'executeLatency',
'executed', 'processLatency', 'acked', 'failed']
_print_stats_dict("Bolt stats", ui_detail['boltStats'], columns, 'r',
{'windowPretty': 'l'})
def _print_input_stats(ui_detail):
columns = ['component', 'stream', 'executeLatency', 'processLatency',
'executed', 'acked', 'failed']
if ui_detail['inputStats']:
_print_stats_dict("Input stats (All time)", ui_detail['inputStats'],
columns, 'r', {'component': 'l'})
def _print_bolt_output_stats(ui_detail):
if ui_detail['outputStats']:
columns = ['stream', 'emitted', 'transferred']
_print_stats_dict("Output stats (All time)", ui_detail['outputStats'],
columns, 'r', {'stream': 'l'})
def _print_spout_stats(ui_detail):
columns = ['windowPretty', 'emitted', 'transferred', 'completeLatency',
'acked', 'failed']
data = ui_detail['spoutSummary'][-1].copy()
_print_stats_dict("Spout stats", data, columns, 'r', {'windowPretty': 'l'})
def _print_spout_output_stats(ui_detail):
columns = ['stream', 'emitted', 'transferred', 'completeLatency',
'acked', 'failed']
_print_stats_dict("Output stats (All time)", ui_detail['outputStats'],
columns, 'r', {'stream': 'l'})
def _print_spout_executors(ui_detail):
columns = ['id', 'uptime', 'host', 'port', 'emitted',
'transferred', 'completeLatency', 'acked', 'failed']
_print_stats_dict("Executors (All time)", ui_detail['executorStats'],
columns, 'r', {'host': 'l'})
def _print_stats_dict(header, data, columns, default_alignment,
custom_alignment=None):
print("# %s" % header)
table = PrettyTable(columns)
table.align = default_alignment
if isinstance(data, list):
for row in data:
table.add_row([row.get(key, "MISSING") for key in columns])
else:
table.add_row([data.get(key, "MISSING") for key in columns])
if custom_alignment:
for column, alignment in iteritems(custom_alignment):
table.align[column] = alignment
print(table)
def _get_topology_id(env_name, topology_name):
"""Get toplogy ID from summary json provided by UI api
"""
summary_url = '/api/v1/topology/summary'
topology_summary = get_ui_json(env_name, summary_url)
for topology in topology_summary["topologies"]:
if topology_name == topology["name"]:
return topology["id"]
def subparser_hook(subparsers):
""" Hook to add subparser for this command. """
subparser = subparsers.add_parser('stats',
description=__doc__,
help=main.__doc__)
subparser.set_defaults(func=main)
subparser.add_argument('--all',
action='store_true',
help='All available stats.')
subparser.add_argument('-c', '--component',
help='Topology component (bolt/spout) name as '
'specified in Clojure topology specification')
add_environment(subparser)
add_name(subparser)
def main(args):
""" Display stats about running Storm topologies. """
storm_version = storm_lib_version()
if storm_version >= parse_version('0.9.2-incubating'):
display_stats(args.environment, topology_name=args.name,
component_name=args.component, all_components=args.all)
else:
print("ERROR: Storm {0} does not support this command."
.format(storm_version))
sys.stdout.flush()
|
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to find the closest match of a string in a list"""
from __future__ import unicode_literals
import re
import difflib
import six
#import ctypes
#import ldistance
#levenshtein_distance = ctypes.cdll.levenshtein.levenshtein_distance
#levenshtein_distance = ldistance.distance
find_best_control_match_cutoff = .6
#====================================================================
class MatchError(IndexError):
"""A suitable match could not be found"""
def __init__(self, items = None, tofind = ''):
"""Init the parent with the message"""
self.tofind = tofind
self.items = items
if self.items is None:
self.items = []
IndexError.__init__(self,
"Could not find '{0}' in '{1}'".format(tofind, self.items))
_cache = {}
# given a list of texts return the match score for each
# and the best score and text with best score
#====================================================================
def _get_match_ratios(texts, match_against):
"""Get the match ratio of how each item in texts compared to match_against"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
ratio_calc.set_seq1(match_against)
ratios = {}
best_ratio = 0
best_text = ''
for text in texts:
if 0:
pass
if (text, match_against) in _cache:
ratios[text] = _cache[(text, match_against)]
elif(match_against, text) in _cache:
ratios[text] = _cache[(match_against, text)]
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(match_against), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text] = ratio
# calculate ratio and store it
ratios[text] = ratio_calc.ratio()
_cache[(match_against, text)] = ratios[text]
# if this is the best so far then update best stats
if ratios[text] > best_ratio:
best_ratio = ratios[text]
best_text = text
return ratios, best_ratio, best_text
#====================================================================
def find_best_match(search_text, item_texts, items, limit_ratio = .5):
"""Return the item that best matches the search_text
* **search_text** The text to search for
* **item_texts** The list of texts to search through
* **items** The list of items corresponding (1 to 1)
to the list of texts to search through.
* **limit_ratio** How well the text has to match the best match.
If the best match matches lower then this then it is not
considered a match and a MatchError is raised, (default = .5)
"""
search_text = _cut_at_eol(_cut_at_tab(search_text))
text_item_map = UniqueDict()
# Clean each item, make it unique and map to
# to the item index
for text, item in zip(item_texts, items):
text_item_map[_cut_at_eol(_cut_at_tab(text))] = item
ratios, best_ratio, best_text = \
_get_match_ratios(text_item_map.keys(), search_text)
if best_ratio < limit_ratio:
raise MatchError(items = text_item_map.keys(), tofind = search_text)
return text_item_map[best_text]
#====================================================================
_after_tab = re.compile(r"\t.*", re.UNICODE)
_after_eol = re.compile(r"\n.*", re.UNICODE)
_non_word_chars = re.compile(r"\W", re.UNICODE)
def _cut_at_tab(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first tab
return _after_tab.sub("", text)
def _cut_at_eol(text):
"""Clean out non characters from the string and return it"""
# remove anything after the first EOL
return _after_eol.sub("", text)
def _clean_non_chars(text):
"""Remove non word characters"""
# should this also remove everything after the first tab?
# remove non alphanumeric characters
return _non_word_chars.sub("", text)
def is_above_or_to_left(ref_control, other_ctrl):
"""Return true if the other_ctrl is above or to the left of ref_control"""
text_r = other_ctrl.rectangle()
ctrl_r = ref_control.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
return False
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
return False
# text control top left corner is below control
# top left corner - so not to the above or left :)
if text_r.top >= ctrl_r.top and text_r.left >= ctrl_r.left:
return False
return True
#====================================================================
distance_cuttoff = 999
def get_non_text_control_name(ctrl, controls, text_ctrls):
"""
return the name for this control by finding the closest
text control above and to its left
"""
names = []
# simply look for an instance of the control in the list,
# we don't use list.index() method as it invokes __eq__
ctrl_index = 0
for i, c in enumerate(controls):
if c is ctrl:
ctrl_index = i
break
ctrl_friendly_class_name = ctrl.friendly_class_name()
if ctrl_index != 0:
prev_ctrl = controls[ctrl_index-1]
prev_ctrl_text = prev_ctrl.window_text()
if prev_ctrl.friendly_class_name() == "Static" and \
prev_ctrl.is_visible() and prev_ctrl_text and \
is_above_or_to_left(ctrl, prev_ctrl):
names.append(
prev_ctrl_text +
ctrl_friendly_class_name)
best_name = ''
closest = distance_cuttoff
# now for each of the visible text controls
for text_ctrl in text_ctrls:
# get aliases to the control rectangles
text_r = text_ctrl.rectangle()
ctrl_r = ctrl.rectangle()
# skip controls where text win is to the right of ctrl
if text_r.left >= ctrl_r.right:
continue
# skip controls where text win is below ctrl
if text_r.top >= ctrl_r.bottom:
continue
# calculate the distance between the controls
# at first I just calculated the distance from the top left
# corner of one control to the top left corner of the other control
# but this was not best, so as a text control should either be above
# or to the left of the control I get the distance between
# the top left of the non text control against the
# Top-Right of the text control (text control to the left)
# Bottom-Left of the text control (text control above)
# then I get the min of these two
# We do not actually need to calculate the difference here as we
# only need a comparative number. As long as we find the closest one
# the actual distance is not all that important to us.
# this reduced the unit tests run on my by about 1 second
# (from 61 ->60 s)
# (x^2 + y^2)^.5
#distance = (
# (text_r.left - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.bottom - ctrl_r.top) ** 2) \
# ** .5 # ^.5
#distance2 = (
# (text_r.right - ctrl_r.left) ** 2 + # (x^2 + y^2)
# (text_r.top - ctrl_r.top) ** 2) \
# ** .5 # ^.5
distance = abs(text_r.left - ctrl_r.left) + abs(text_r.bottom - ctrl_r.top)
distance2 = abs(text_r.right - ctrl_r.left) + abs(text_r.top - ctrl_r.top)
distance = min(distance, distance2)
# UpDown control should use Static text only because edit box text is often useless
if ctrl_friendly_class_name == "UpDown" and \
text_ctrl.friendly_class_name() == "Static" and distance < closest:
# TODO: use search in all text controls for all non-text ones
# (like Dijkstra algorithm vs Floyd one)
closest = distance
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
# if this distance was closer than the last one
elif distance < closest:
closest = distance
#if text_ctrl.window_text() == '':
# best_name = ctrl_friendly_class_name + ' '.join(text_ctrl.texts()[1:2])
#else:
ctrl_text = text_ctrl.window_text()
if ctrl_text is None:
# the control probably doesn't exist so skip it
continue
best_name = ctrl_text + ctrl_friendly_class_name
names.append(best_name)
return names
#====================================================================
def get_control_names(control, allcontrols, textcontrols):
"""Returns a list of names for this control"""
names = []
# if it has a reference control - then use that
#if hasattr(control, 'ref') and control.ref:
# control = control.ref
# Add the control based on it's friendly class name
friendly_class_name = control.friendly_class_name()
names.append(friendly_class_name)
# if it has some character text then add it base on that
# and based on that with friendly class name appended
cleaned = control.window_text()
# Todo - I don't like the hardcoded classnames here!
if cleaned and control.has_title:
names.append(cleaned)
names.append(cleaned + friendly_class_name)
elif control.has_title and friendly_class_name != 'TreeView':
try:
for text in control.texts()[1:]:
names.append(friendly_class_name + text)
except Exception:
#import traceback
#from .actionlogger import ActionLogger
pass #ActionLogger().log('Warning! Cannot get control.texts()') #\nTraceback:\n' + traceback.format_exc())
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# it didn't have visible text
else:
# so find the text of the nearest text visible control
non_text_names = get_non_text_control_name(control, allcontrols, textcontrols)
# and if one was found - add it
if non_text_names:
names.extend(non_text_names)
# return the names - and make sure there are no duplicates or empty values
cleaned_names = set(names) - set([None, ""])
return cleaned_names
#====================================================================
class UniqueDict(dict):
"""A dictionary subclass that handles making its keys unique"""
def __setitem__(self, text, item):
"""Set an item of the dictionary"""
# this text is already in the map
# so we need to make it unique
if text in self:
# find next unique text after text1
unique_text = text
counter = 2
while unique_text in self:
unique_text = text + str(counter)
counter += 1
# now we also need to make sure the original item
# is under text0 and text1 also!
if text + '0' not in self:
dict.__setitem__(self, text+'0', self[text])
dict.__setitem__(self, text+'1', self[text])
# now that we don't need original 'text' anymore
# replace it with the uniq text
text = unique_text
# add our current item
dict.__setitem__(self, text, item)
def find_best_matches(
self,
search_text,
clean = False,
ignore_case = False):
"""Return the best matches for search_text in the items
* **search_text** the text to look for
* **clean** whether to clean non text characters out of the strings
* **ignore_case** compare strings case insensitively
"""
# now time to figure out the matching
ratio_calc = difflib.SequenceMatcher()
if ignore_case:
search_text = search_text.lower()
ratio_calc.set_seq1(search_text)
ratios = {}
best_ratio = 0
best_texts = []
ratio_offset = 1
if clean:
ratio_offset *= .9
if ignore_case:
ratio_offset *= .9
for text_ in self:
# make a copy of the text as we need the original later
text = text_
if clean:
text = _clean_non_chars(text)
if ignore_case:
text = text.lower()
# check if this item is in the cache - if yes, then retrieve it
if (text, search_text) in _cache:
ratios[text_] = _cache[(text, search_text)]
elif(search_text, text) in _cache:
ratios[text_] = _cache[(search_text, text)]
# not in the cache - calculate it and add it to the cache
else:
# set up the SequenceMatcher with other text
ratio_calc.set_seq2(text)
# if a very quick check reveals that this is not going
# to match then
ratio = ratio_calc.real_quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.quick_ratio() * ratio_offset
if ratio >= find_best_control_match_cutoff:
ratio = ratio_calc.ratio() * ratio_offset
# save the match we got and store it in the cache
ratios[text_] = ratio
_cache[(text, search_text)] = ratio
# try using the levenshtein distance instead
#lev_dist = levenshtein_distance(six.text_type(search_text), six.text_type(text))
#ratio = 1 - lev_dist / 10.0
#ratios[text_] = ratio
#print "%5s" %("%0.2f"% ratio), search_text, `text`
# if this is the best so far then update best stats
if ratios[text_] > best_ratio and \
ratios[text_] >= find_best_control_match_cutoff:
best_ratio = ratios[text_]
best_texts = [text_]
elif ratios[text_] == best_ratio:
best_texts.append(text_)
#best_ratio *= ratio_offset
return best_ratio, best_texts
#====================================================================
def build_unique_dict(controls):
"""Build the disambiguated list of controls
Separated out to a different function so that we can get
the control identifiers for printing.
"""
name_control_map = UniqueDict()
# get the visible text controls so that we can get
# the closest text if the control has no text
text_ctrls = [ctrl_ for ctrl_ in controls
if ctrl_.can_be_label and ctrl_.is_visible() and ctrl_.window_text()]
# collect all the possible names for all controls
# and build a list of them
for ctrl in controls:
ctrl_names = get_control_names(ctrl, controls, text_ctrls)
# for each of the names
for name in ctrl_names:
name_control_map[name] = ctrl
return name_control_map
#====================================================================
def find_best_control_matches(search_text, controls):
"""Returns the control that is the the best match to search_text
This is slightly differnt from find_best_match in that it builds
up the list of text items to search through using information
from each control. So for example for there is an OK, Button
then the following are all added to the search list:
"OK", "Button", "OKButton"
But if there is a ListView (which do not have visible 'text')
then it will just add "ListView".
"""
name_control_map = build_unique_dict(controls)
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl, controls)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
search_text = six.text_type(search_text)
best_ratio, best_texts = name_control_map.find_best_matches(search_text)
best_ratio_ci, best_texts_ci = \
name_control_map.find_best_matches(search_text, ignore_case = True)
best_ratio_clean, best_texts_clean = \
name_control_map.find_best_matches(search_text, clean = True)
best_ratio_clean_ci, best_texts_clean_ci = \
name_control_map.find_best_matches(
search_text, clean = True, ignore_case = True)
if best_ratio_ci > best_ratio:
best_ratio = best_ratio_ci
best_texts = best_texts_ci
if best_ratio_clean > best_ratio:
best_ratio = best_ratio_clean
best_texts = best_texts_clean
if best_ratio_clean_ci > best_ratio:
best_ratio = best_ratio_clean_ci
best_texts = best_texts_clean_ci
if best_ratio < find_best_control_match_cutoff:
raise MatchError(items = name_control_map.keys(), tofind = search_text)
return [name_control_map[best_text] for best_text in best_texts]
#
#def GetControlMatchRatio(text, ctrl):
# # get the texts for the control
# ctrl_names = get_control_names(ctrl)
#
# #get the best match for these
# matcher = UniqueDict()
# for name in ctrl_names:
# matcher[name] = ctrl
#
# best_ratio, unused = matcher.find_best_matches(text)
#
# return best_ratio
#
#
#
#def get_controls_ratios(search_text, controls):
# name_control_map = UniqueDict()
#
# # collect all the possible names for all controls
# # and build a list of them
# for ctrl in controls:
# ctrl_names = get_control_names(ctrl)
#
# # for each of the names
# for name in ctrl_names:
# name_control_map[name] = ctrl
#
# match_ratios, best_ratio, best_text = \
# _get_match_ratios(name_control_map.keys(), search_text)
#
# return match_ratios, best_ratio, best_text,
|
|
#!/usr/bin/env python
VERSION='1.02'
import os,sys
import re
import coral
from RecoLuminosity.LumiDB import argparse
class constants(object):
def __init__(self):
self.debug=False
self.runinfodb=''
self.runinfoschema='CMS_RUNINFO'
self.runsessionparameterTable='RUNSESSION_PARAMETER'
self.hltconfname='CMS.LVL0:HLT_KEY_DESCRIPTION'
self.tsckeyname='CMS.TRG:TSC_KEY'
self.fillnumname='CMS.SCAL:FILLN'
def fillnumForRun(dbsession,c,runnum):
'''select string_value from cms_runinfo.runsession_parameter where runnumber=129265 and name='CMS.SCAL:FILLN' and rownum<=1;
'''
result=''
try:
dbsession.transaction().start(True)
schema=dbsession.schema(c.runinfoschema)
if not schema:
raise Exception, 'cannot connect to schema '+c.runinfoschema
if not schema.existsTable(c.runsessionparameterTable):
raise Exception, 'non-existing table '+c.runsessionparameterTable
fillOutput=coral.AttributeList()
fillOutput.extend("fillnum","string")
bindVarList=coral.AttributeList()
bindVarList.extend("name","string")
bindVarList.extend("runnumber","unsigned int")
bindVarList["name"].setData(c.fillnumname)
bindVarList["runnumber"].setData(int(runnum))
query=schema.newQuery()
query.addToTableList(c.runsessionparameterTable)
query.addToOutputList('STRING_VALUE','value')
query.setCondition('NAME=:name AND RUNNUMBER=:runnumber',bindVarList)
query.limitReturnedRows(1)
query.defineOutput(fillOutput)
cursor=query.execute()
while cursor.next():
result=cursor.currentRow()['fillnum'].data()
del query
dbsession.transaction().commit()
#print result
return result
except Exception,e:
print str(e)
dbsession.transaction().rollback()
del dbsession
def hltkeyForRun(dbsession,c,runnum):
'''
select runnumber,string_value from cms_runinfo.runsession_parameter where name=:runsessionparametername and runnumber=:runnum
'''
result={}
try:
dbsession.transaction().start(True)
schema=dbsession.schema(c.runinfoschema)
if not schema:
raise Exception, 'cannot connect to schema '+c.runinfoschema
if not schema.existsTable(c.runsessionparameterTable):
raise Exception, 'non-existing table '+c.runsessionparameterTable
hltkeyOutput=coral.AttributeList()
hltkeyOutput.extend("runnum","unsigned int")
hltkeyOutput.extend("hltkey","string")
bindVarList=coral.AttributeList()
bindVarList.extend("name","string")
bindVarList.extend("runnumber","unsigned int")
bindVarList["name"].setData(c.hltconfname)
bindVarList["runnumber"].setData(int(runnum))
query=schema.newQuery()
query.addToTableList(c.runsessionparameterTable)
query.addToOutputList('RUNNUMBER','runnumber')
query.addToOutputList('STRING_VALUE','value')
query.setCondition('NAME=:name AND RUNNUMBER=:runnumber',bindVarList)
query.defineOutput(hltkeyOutput)
cursor=query.execute()
while cursor.next():
runnum=cursor.currentRow()['runnum'].data()
hltkey=cursor.currentRow()['hltkey'].data()
result[runnum]=hltkey
del query
dbsession.transaction().commit()
#print result
return result
except Exception,e:
print str(e)
dbsession.transaction().rollback()
del dbsession
def l1keyForRun(dbsession,c,runnum):
'''
select runnumber,string_value from cms_runinfo.runsession_parameter where name=:runsessionparametername and runnumber=:runnum
'''
result={}
try:
dbsession.transaction().start(True)
schema=dbsession.schema(c.runinfoschema)
if not schema:
raise Exception, 'cannot connect to schema '+c.runinfoschema
if not schema.existsTable(c.runsessionparameterTable):
raise Exception, 'non-existing table '+c.runsessionparameterTable
l1keyOutput=coral.AttributeList()
l1keyOutput.extend("runnum","unsigned int")
l1keyOutput.extend("l1key","string")
bindVarList=coral.AttributeList()
bindVarList.extend("name","string")
bindVarList.extend("runnumber","unsigned int")
bindVarList["name"].setData(c.tsckeyname)
bindVarList["runnumber"].setData(int(runnum))
query=schema.newQuery()
query.addToTableList(c.runsessionparameterTable)
query.addToOutputList('RUNNUMBER','runnumber')
query.addToOutputList('STRING_VALUE','value')
query.setCondition('NAME=:name AND RUNNUMBER=:runnumber',bindVarList)
query.defineOutput(l1keyOutput)
cursor=query.execute()
while cursor.next():
runnum=cursor.currentRow()['runnum'].data()
l1key=cursor.currentRow()['l1key'].data()
result[runnum]=l1key
del query
dbsession.transaction().commit()
#print result
return result
except Exception,e:
print str(e)
dbsession.transaction().rollback()
del dbsession
def main():
c=constants()
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),description="Dump Run info")
parser.add_argument('-c',dest='connect',action='store',required=True,help='connect string to trigger DB(required)')
parser.add_argument('-P',dest='authpath',action='store',required=True,help='path to authentication file')
parser.add_argument('-r',dest='runnumber',action='store',required=True,help='run number')
parser.add_argument('action',choices=['hltkey','l1key','fill'],help='information to show')
parser.add_argument('--debug',dest='debug',action='store_true',help='debug')
parser.add_argument('--collision-only',dest='collisiononly',action='store_true',help='return only collision runs')
args=parser.parse_args()
runnumber=args.runnumber
c.runinfodb=args.connect
if args.authpath and len(args.authpath)!=0:
os.environ['CORAL_AUTH_PATH']=args.authpath
svc=coral.ConnectionService()
session=svc.connect(c.runinfodb,accessMode=coral.access_ReadOnly)
session.typeConverter().setCppTypeForSqlType("unsigned int","NUMBER(10)")
session.typeConverter().setCppTypeForSqlType("unsigned long long","NUMBER(20)")
if args.debug:
msg=coral.MessageStream('')
msg.setMsgVerbosity(coral.message_Level_Debug)
if args.action == 'hltkey':
p=re.compile(r'^/cdaq/physics/.+')
result=hltkeyForRun(session,c,runnumber)
print 'runnumber hltkey'
for runnum,hltkey in result.items():
if not args.collisiononly:
print runnum,hltkey
if args.collisiononly and re.match(p,hltkey):
fillnum=fillnumForRun(session,c,runnumber)
if len(fillnum)!=0:
print runnum,hltkey
if args.action == 'l1key':
p=re.compile(r'^TSC_.+_collisions_.+')
result=l1keyForRun(session,c,runnumber)
print 'runnumber tsc_key'
for runnum,l1key in result.items():
if not args.collisiononly:
print runnum,l1key
if args.collisiononly and re.match(p,l1key):
fillnum=fillnumForRun(session,c,runnumber)
if len(fillnum)!=0:
print runnum,l1key
if args.action == 'fill':
result=fillnumForRun(session,c,runnumber)
print 'runnumber fill'
if not args.collisiononly:
print runnumber,result
else:
if len(result)!=0:
print runnumber,result
del session
del svc
if __name__=='__main__':
main()
|
|
# Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Tatiana Mazur
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
class AddVPNServiceAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
router_id = forms.ChoiceField(label=_("Router"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddVPNServiceAction, self).__init__(request, *args, **kwargs)
def populate_subnet_id_choices(self, request, context):
subnet_id_choices = [('', _("Select a Subnet"))]
try:
tenant_id = request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
return subnet_id_choices
def populate_router_id_choices(self, request, context):
router_id_choices = [('', _("Select a Router"))]
try:
tenant_id = request.user.tenant_id
routers = api.neutron.router_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve routers list.'))
routers = []
for r in routers:
router_id_choices.append((r.id, r.name))
self.fields['router_id'].choices = router_id_choices
return router_id_choices
class Meta:
name = _("Add New VPN Service")
permissions = ('openstack.services.network',)
help_text = _("Create VPN Service for current project.\n\n"
"Assign a name and description for the VPN Service. "
"Select a router and a subnet. "
"Admin State is Up (checked) by default."
)
class AddVPNServiceStep(workflows.Step):
action_class = AddVPNServiceAction
contributes = ("name", "description", "subnet_id",
"router_id", "admin_state_up")
def contribute(self, data, context):
context = super(AddVPNServiceStep, self).contribute(data, context)
if data:
return context
class AddVPNService(workflows.Workflow):
slug = "addvpnservice"
name = _("Add VPN Service")
finalize_button_name = _("Add")
success_message = _('Added VPN Service "%s".')
failure_message = _('Unable to add VPN Service "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddVPNServiceStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.vpnservice_create(request, **context)
return True
except Exception:
return False
class AddIKEPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"))
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"))
ike_version = forms.ChoiceField(label=_("IKE version"))
lifetime_units = forms.ChoiceField(label=_("Lifetime units for IKE keys"))
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys"),
initial=3600,
help_text=_("Equal to or more than 60"))
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"))
phase1_negotiation_mode = forms.ChoiceField(
label=_("IKE Phase1 negotiation mode"))
def __init__(self, request, *args, **kwargs):
super(AddIKEPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['auth_algorithm'].widget.attrs['readonly'] = True
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
ike_version_choices = [("v1", "v1"),
("v2", "v2")]
self.fields['ike_version'].choices = ike_version_choices
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
phase1_neg_mode_choices = [("main", "main")]
self.fields[
'phase1_negotiation_mode'].choices = phase1_neg_mode_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['phase1_negotiation_mode'].widget.attrs['readonly'] = True
class Meta:
name = _("Add New IKE Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IKE Policy for current project.\n\n"
"Assign a name and description for the IKE Policy. "
)
class AddIKEPolicyStep(workflows.Step):
action_class = AddIKEPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encryption_algorithm", "ike_version",
"lifetime_units", "lifetime_value",
"pfs", "phase1_negotiation_mode")
def contribute(self, data, context):
context = super(AddIKEPolicyStep, self).contribute(data, context)
context['lifetime'] = {'units': data['lifetime_units'],
'value': data['lifetime_value']}
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIKEPolicy(workflows.Workflow):
slug = "addikepolicy"
name = _("Add IKE Policy")
finalize_button_name = _("Add")
success_message = _('Added IKE Policy "%s".')
failure_message = _('Unable to add IKE Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIKEPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ikepolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"))
encapsulation_mode = forms.ChoiceField(label=_("Encapsulation mode"))
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"))
lifetime_units = forms.ChoiceField(label=_("Lifetime units"))
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys "),
initial=3600,
help_text=_("Equal to or more than 60"))
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"))
transform_protocol = forms.ChoiceField(label=_("Transform Protocol"))
def __init__(self, request, *args, **kwargs):
super(AddIPSecPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['auth_algorithm'].widget.attrs['readonly'] = True
encapsulation_mode_choices = [("tunnel", "tunnel"),
("transport", "transport")]
self.fields['encapsulation_mode'].choices = encapsulation_mode_choices
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
transform_protocol_choices = [("esp", "esp"),
("ah", "ah"),
("ah-esp", "ah-esp")]
self.fields['transform_protocol'].choices = transform_protocol_choices
class Meta:
name = _("Add New IPSec Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Policy for current project.\n\n"
"Assign a name and description for the IPSec Policy. "
)
class AddIPSecPolicyStep(workflows.Step):
action_class = AddIPSecPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encapsulation_mode", "encryption_algorithm",
"lifetime_units", "lifetime_value",
"pfs", "transform_protocol")
def contribute(self, data, context):
context = super(AddIPSecPolicyStep, self).contribute(data, context)
context['lifetime'] = {'units': data['lifetime_units'],
'value': data['lifetime_value']}
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIPSecPolicy(workflows.Workflow):
slug = "addipsecpolicy"
name = _("Add IPSec Policy")
finalize_button_name = _("Add")
success_message = _('Added IPSec Policy "%s".')
failure_message = _('Unable to add IPSec Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecpolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecSiteConnectionAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"))
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
vpnservice_id = forms.ChoiceField(
label=_("VPN Service associated with this connection"))
ikepolicy_id = forms.ChoiceField(
label=_("IKE Policy associated with this connection"))
ipsecpolicy_id = forms.ChoiceField(
label=_("IPSec Policy associated with this connection"))
peer_address = forms.IPField(
label=_("Peer gateway public IPv4/IPv6 Address or FQDN"),
help_text=_("Peer gateway public IPv4/IPv6 address or FQDN for "
"the VPN Connection"),
version=forms.IPv4 | forms.IPv6,
mask=False)
peer_id = forms.IPField(
label=_("Peer router identity for authentication (Peer ID)"),
help_text=_("Peer router identity for authentication. "
"Can be IPv4/IPv6 address, e-mail, key ID, or FQDN"),
version=forms.IPv4 | forms.IPv6,
mask=False)
peer_cidrs = forms.MultiIPField(
label=_("Remote peer subnet(s)"),
help_text=_("Remote peer subnet(s) address(es) "
"with mask(s) in CIDR format "
"separated with commas if needed "
"(e.g. 20.1.0.0/24, 21.1.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
psk = forms.CharField(max_length=80,
label=_("Pre-Shared Key (PSK) string"))
def populate_ikepolicy_id_choices(self, request, context):
ikepolicy_id_choices = [('', _("Select IKE Policy"))]
try:
tenant_id = self.request.user.tenant_id
ikepolicies = api.vpn.ikepolicy_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IKE Policies list.'))
ikepolicies = []
for p in ikepolicies:
ikepolicy_id_choices.append((p.id, p.name))
self.fields['ikepolicy_id'].choices = ikepolicy_id_choices
return ikepolicy_id_choices
def populate_ipsecpolicy_id_choices(self, request, context):
ipsecpolicy_id_choices = [('', _("Select IPSec Policy"))]
try:
tenant_id = self.request.user.tenant_id
ipsecpolicies = api.vpn.ipsecpolicy_list(request,
tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IPSec Policies list.'))
ipsecpolicies = []
for p in ipsecpolicies:
ipsecpolicy_id_choices.append((p.id, p.name))
self.fields['ipsecpolicy_id'].choices = ipsecpolicy_id_choices
return ipsecpolicy_id_choices
def populate_vpnservice_id_choices(self, request, context):
vpnservice_id_choices = [('', _("Select VPN Service"))]
try:
tenant_id = self.request.user.tenant_id
vpnservices = api.vpn.vpnservice_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve VPN Services list.'))
vpnservices = []
for s in vpnservices:
vpnservice_id_choices.append((s.id, s.name))
self.fields['vpnservice_id'].choices = vpnservice_id_choices
return vpnservice_id_choices
class Meta:
name = _("Add New IPSec Site Connection")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Site Connection for current project.\n\n"
"Assign a name and description for the "
"IPSec Site Connection. "
"All fields in this tab are required."
)
class AddIPSecSiteConnectionStep(workflows.Step):
action_class = AddIPSecSiteConnectionAction
contributes = ("name", "description",
"vpnservice_id", "ikepolicy_id", "ipsecpolicy_id",
"peer_address", "peer_id", "peer_cidrs", "psk")
class AddIPSecSiteConnectionOptionalAction(workflows.Action):
mtu = forms.IntegerField(
min_value=68,
label=_("Maximum Transmission Unit size for the connection"),
initial=1500,
help_text=_("Equal to or more than 68 if the local subnet is IPv4. "
"Equal to or more than 1280 if the local subnet is IPv6."))
dpd_action = forms.ChoiceField(label=_("Dead peer detection actions"))
dpd_interval = forms.IntegerField(
min_value=1, label=_("Dead peer detection interval"),
initial=30,
help_text=_("Valid integer"))
dpd_timeout = forms.IntegerField(
min_value=1, label=_("Dead peer detection timeout"),
initial=120,
help_text=_("Valid integer greater than the DPD interval"))
initiator = forms.ChoiceField(label=_("Initiator state"))
admin_state_up = forms.BooleanField(label=_("Admin State"),
initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddIPSecSiteConnectionOptionalAction, self).__init__(
request, *args, **kwargs)
initiator_choices = [("bi-directional", "bi-directional"),
("response-only", "response-only")]
self.fields['initiator'].choices = initiator_choices
def populate_dpd_action_choices(self, request, context):
dpd_action_choices = [("hold", "hold"),
("clear", "clear"),
("disabled", "disabled"),
("restart", "restart"),
("restart-by-peer", "restart-by-peer")]
self.fields['dpd_action'].choices = dpd_action_choices
return dpd_action_choices
class Meta:
name = _("Optional Parameters")
permissions = ('openstack.services.network',)
help_text = _("Fields in this tab are optional. "
"You can configure the detail of "
"IPSec site connection created."
)
class AddIPSecSiteConnectionOptionalStep(workflows.Step):
action_class = AddIPSecSiteConnectionOptionalAction
contributes = ("dpd_action", "dpd_interval", "dpd_timeout",
"initiator", "mtu", "admin_state_up")
def contribute(self, data, context):
context = super(
AddIPSecSiteConnectionOptionalStep, self).contribute(data, context)
context['dpd'] = {'action': data['dpd_action'],
'interval': data['dpd_interval'],
'timeout': data['dpd_timeout']}
context.pop('dpd_action')
context.pop('dpd_interval')
context.pop('dpd_timeout')
cidrs = context['peer_cidrs']
context['peer_cidrs'] = cidrs.replace(" ", "").split(",")
if data:
return context
class AddIPSecSiteConnection(workflows.Workflow):
slug = "addipsecsiteconnection"
name = _("Add IPSec Site Connection")
finalize_button_name = _("Add")
success_message = _('Added IPSec Site Connection "%s".')
failure_message = _('Unable to add IPSec Site Connection "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecSiteConnectionStep,
AddIPSecSiteConnectionOptionalStep)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecsiteconnection_create(request, **context)
return True
except Exception:
return False
|
|
# Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import ast
import json
import uuid
from datetime import datetime
from typing import List
from pytz import timezone
from cerebralcortex.kernel.DataStoreEngine.Metadata.Metadata import Metadata
from cerebralcortex.kernel.DataStoreEngine.dataset import DataSet
from cerebralcortex.kernel.datatypes.datastream import DataStream, DataPoint
class LoadData:
def get_stream(self, stream_id: uuid, start_time: datetime = None, end_time: datetime = None,
data_type=DataSet.COMPLETE) -> DataStream:
"""
:param stream_id:
:param start_time:
:param end_time:
:param data_type: this parameter accepts only three types (i.e., all, data, metadata)
:return: spark dataframe
"""
start_time = str(start_time)
end_time = str(end_time)
where_clause = "identifier='" + stream_id + "'"
if stream_id == 'None':
raise Exception("Identifier cannot be null.")
if start_time != 'None':
where_clause += " and start_time>=cast('" + start_time + "' as timestamp)"
if end_time != 'None':
where_clause += " and start_time<=cast('" + end_time + "' as timestamp)"
if data_type == DataSet.COMPLETE:
datapoints = self.map_dataframe_to_datapoint(
self.load_data_from_cassandra(self.datapointTable, where_clause))
stream = self.map_datapoint_and_metadata_to_datastream(stream_id, datapoints)
elif data_type == DataSet.ONLY_DATA:
return self.map_dataframe_to_datapoint(self.load_data_from_cassandra(self.datapointTable, where_clause))
elif data_type == DataSet.ONLY_METADATA:
datapoints = []
stream = self.map_datapoint_and_metadata_to_datastream(stream_id, datapoints)
else:
raise ValueError("Invalid type parameter.")
return stream
def deprecated_get_annotation_stream(self, annotation_stream_id: uuid, input_stream_id: uuid, annotation: str,
start_time: datetime = None, end_time: datetime = None,
data_type=DataSet.COMPLETE) -> DataStream:
"""
This method is not optimized. It will send n (depending on the number of windows) number of queries to Cassandra to retrieve data.
In future, this will be deleted after verifying performance of other filtering method(s).
:param annotation_stream_id:
:param input_stream_id:
:param annotation:
:param start_time:
:param end_time:
:param data_type:
:return:
"""
datapointsList = []
start_time = str(start_time)
end_time = str(end_time)
where_clause = "identifier='" + annotation_stream_id + "'"
if annotation_stream_id == 'None':
raise Exception("Stream identifier cannot be null.")
if input_stream_id == 'None':
raise Exception("Input stream identifier cannot be null.")
if start_time != 'None':
where_clause += " and start_time>=cast('" + start_time + "' as timestamp)"
if end_time != 'None':
where_clause += " and start_time<=cast('" + end_time + "' as timestamp)"
if annotation != 'None':
where_clause += " and sample='{" + annotation + "}'"
annotation_stream = self.load_data_from_cassandra(self.datapointTable, where_clause)
rows = annotation_stream.collect()
for row in rows:
localtz = timezone(self.CC_obj.time_zone)
start_time = localtz.localize(row["start_time"])
end_time = localtz.localize(row["end_time"])
dp = self.get_stream(input_stream_id, start_time=start_time,
end_time=end_time, data_type=DataSet.ONLY_DATA)
datapointsList.append(dp)
if data_type == DataSet.COMPLETE:
annotation_stream = self.map_datapoint_and_metadata_to_datastream(annotation_stream_id, datapointsList)
elif data_type == DataSet.ONLY_DATA:
return datapointsList
elif data_type == DataSet.ONLY_METADATA:
datapoints = []
annotation_stream = self.map_datapoint_and_metadata_to_datastream(annotation_stream_id, datapoints)
else:
raise ValueError("Invalid type parameter.")
return annotation_stream
def get_annotation_stream(self, input_stream_id: uuid, annotation_stream_id: uuid, annotation: str,
start_time: datetime = None, end_time: datetime = None) -> List[DataPoint]:
"""
:param input_stream_id:
:param annotation_stream_id:
:param annotation:
:param start_time:
:param end_time:
:return:
"""
datapoints_list = []
annotation_stream_dps = self.get_stream(annotation_stream_id, start_time=start_time,
end_time=end_time, data_type=DataSet.ONLY_DATA)
data_stream_dps = self.get_stream(input_stream_id, start_time=start_time,
end_time=end_time, data_type=DataSet.ONLY_DATA)
for dp in self.map_annotation_stream_to_data_stream(annotation_stream_dps, data_stream_dps, annotation):
datapoints_list = datapoints_list + dp
return datapoints_list
@classmethod
def map_annotation_stream_to_data_stream(self, annotation_stream_dps: List[DataPoint],
data_stream_dps: List[DataPoint], annotation: str) -> List[DataPoint]:
"""
Map annotation stream to data stream.
:param annotation_stream_dps:
:param data_stream_dps:
:param annotation:
:rtype: List[DataPoint]
"""
filtered_datapoints = []
tmp = 0
for annotation_dp in annotation_stream_dps:
if annotation_dp.sample == annotation:
for index, datastream_dp in enumerate(data_stream_dps[tmp:], start=0):
if datastream_dp.start_time >= annotation_dp.start_time:
if (annotation_dp.start_time <= datastream_dp.start_time) and (
annotation_dp.end_time >= datastream_dp.end_time):
filtered_datapoints.append(datastream_dp)
if (tmp + index + 1) == len(data_stream_dps):
tmp = index + tmp + 1
yield filtered_datapoints
filtered_datapoints = []
else:
tmp = tmp + index
yield filtered_datapoints
filtered_datapoints = []
break
yield filtered_datapoints
def map_dataframe_to_datapoint(self, dataframe: object) -> list:
"""
Converts a PySpark DataFrame into a list of datapoint objects
:param dataframe:
:return: list of datapoint objects
"""
datapointsList = []
rows = dataframe.collect()
for row in rows:
localtz = timezone(self.CC_obj.time_zone)
start_time = localtz.localize(row["start_time"])
if row["end_time"] != None:
end_time = localtz.localize(row["end_time"])
else:
end_time = ""
# ast.literal_eval is used to convert strings into json, list, dict. it returns string if ast.literal_eval fails
try:
smple = ast.literal_eval(row["sample"])
except:
smple = row["sample"]
dp = DataPoint(start_time, end_time, smple)
datapointsList.append(dp)
return datapointsList
def map_datapoint_and_metadata_to_datastream(self, stream_id: int, data: list) -> DataStream:
"""
This method will map the datapoint and metadata to datastream object
:param stream_id:
:param data: list
:return: datastream object
"""
# query datastream(mysql) for metadata
datastream_info = Metadata(self.CC_obj).get_stream_info(stream_id)
ownerID = datastream_info[0]["owner"]
name = datastream_info[0]["name"]
data_descriptor = json.loads(datastream_info[0]["data_descriptor"])
execution_context = json.loads(datastream_info[0]["execution_context"])
annotations = json.loads(datastream_info[0]["annotations"])
stream_type = datastream_info[0]["type"]
start_time = datastream_info[0]["start_time"]
end_time = datastream_info[0]["end_time"]
return DataStream(stream_id, ownerID, name, data_descriptor, execution_context, annotations,
stream_type, start_time, end_time, data)
def load_data_from_cassandra(self, table_name: str, where_clause: str) -> object:
"""
Establish connection with cassandra, load data, and filter based on the condition passed in whereClause argument
:return:
:param table_name:
:param where_clause:
:return: spark dataframe
"""
# TO-DO, replace .filter with .where() for performance
dataframe = self.sqlContext.read.format("org.apache.spark.sql.cassandra"). \
option("spark.cassandra.connection.host", self.hostIP). \
option("spark.cassandra.auth.username", self.dbUser). \
option("spark.cassandra.auth.password", self.dbPassword). \
options(table=table_name, keyspace=self.keyspaceName, pushdownss=True).load(). \
select("start_time", "end_time", "sample"). \
filter(where_clause). \
orderBy('start_time', ascending=True)
return dataframe
@staticmethod
def get_epoch_time(dt: datetime) -> datetime:
"""
:param dt:
:return:
"""
epoch = datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds() * 1000.0
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import inspect
import re
from inspect import signature
from typing import Any, Callable, Dict, Optional, Tuple, TypeVar, cast
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models.dag import DAG, DagContext
from airflow.models.xcom_arg import XComArg
from airflow.utils.task_group import TaskGroup, TaskGroupContext
def validate_python_callable(python_callable):
"""
Validate that python callable can be wrapped by operator.
Raises exception if invalid.
:param python_callable: Python object to be validated
:raises: TypeError, AirflowException
"""
if not callable(python_callable):
raise TypeError('`python_callable` param must be callable')
if 'self' in signature(python_callable).parameters.keys():
raise AirflowException('@task does not support methods')
def get_unique_task_id(
task_id: str, dag: Optional[DAG] = None, task_group: Optional[TaskGroup] = None
) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context)
Ids are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag:
return task_id
# We need to check if we are in the context of TaskGroup as the task_id may
# already be altered
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
tg_task_id = task_group.child_id(task_id) if task_group else task_id
if tg_task_id not in dag.task_ids:
return task_id
core = re.split(r'__\d+$', task_id)[0]
suffixes = sorted(
int(re.split(r'^.+__', task_id)[1])
for task_id in dag.task_ids
if re.match(rf'^{core}__\d+$', task_id)
)
if not suffixes:
return f'{core}__1'
return f'{core}__{suffixes[-1] + 1}'
class DecoratedOperator(BaseOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:type op_args: list
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
:type multiple_outputs: bool
:param kwargs_to_upstream: For certain operators, we might need to upstream certain arguments
that would otherwise be absorbed by the DecoratedOperator (for example python_callable for the
PythonOperator). This gives a user the option to upstream kwargs as needed.
:type kwargs_to_upstream: dict
"""
template_fields = ('op_args', 'op_kwargs')
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs = ('python_callable',)
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Tuple[Any],
op_kwargs: Dict[str, Any],
multiple_outputs: bool = False,
kwargs_to_upstream: dict = None,
**kwargs,
) -> None:
kwargs['task_id'] = get_unique_task_id(task_id, kwargs.get('dag'), kwargs.get('task_group'))
self.python_callable = python_callable
kwargs_to_upstream = kwargs_to_upstream or {}
# Check that arguments can be binded
signature(python_callable).bind(*op_args, **op_kwargs)
self.multiple_outputs = multiple_outputs
self.op_args = op_args
self.op_kwargs = op_kwargs
super().__init__(**kwargs_to_upstream, **kwargs)
def execute(self, context: Dict):
return_value = super().execute(context)
return self._handle_output(return_value=return_value, context=context, xcom_push=self.xcom_push)
def _handle_output(self, return_value: Any, context: Dict, xcom_push: Callable):
"""
Handles logic for whether a decorator needs to push a single return value or multiple return values.
:param return_value:
:param context:
:param xcom_push:
"""
if not self.multiple_outputs:
return return_value
if isinstance(return_value, dict):
for key in return_value.keys():
if not isinstance(key, str):
raise AirflowException(
'Returned dictionary keys must be strings when using '
f'multiple_outputs, found {key} ({type(key)}) instead'
)
for key, value in return_value.items():
xcom_push(context, key, value)
else:
raise AirflowException(
f'Returned output was type {type(return_value)} expected dictionary for multiple_outputs'
)
return return_value
def _hook_apply_defaults(self, *args, **kwargs):
if 'python_callable' not in kwargs:
return args, kwargs
python_callable = kwargs['python_callable']
default_args = kwargs.get('default_args') or {}
op_kwargs = kwargs.get('op_kwargs') or {}
f_sig = signature(python_callable)
for arg in f_sig.parameters:
if arg not in op_kwargs and arg in default_args:
op_kwargs[arg] = default_args[arg]
kwargs['op_kwargs'] = op_kwargs
return args, kwargs
T = TypeVar("T", bound=Callable)
def task_decorator_factory(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
decorated_operator_class: BaseOperator = None,
**kwargs,
) -> Callable[[T], T]:
"""
A factory that generates a wrapper that raps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:type python_callable: Optional[Callable]
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. List/Tuples will unroll to xcom values
with index as key. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
:type multiple_outputs: bool
:param decorated_operator_class: The operator that executes the logic needed to run the python function in
the correct environment
:type decorated_operator_class: BaseDecoratedOperator
"""
# try to infer from type annotation
if python_callable and multiple_outputs is None:
sig = signature(python_callable).return_annotation
ttype = getattr(sig, "__origin__", None)
multiple_outputs = sig != inspect.Signature.empty and ttype in (dict, Dict)
def wrapper(f: T):
"""
Python wrapper to generate PythonDecoratedOperator out of simple python functions.
Used for Airflow Decorated interface
"""
validate_python_callable(f)
kwargs.setdefault('task_id', f.__name__)
@functools.wraps(f)
def factory(*args, **f_kwargs):
op = decorated_operator_class(
python_callable=f,
op_args=args,
op_kwargs=f_kwargs,
multiple_outputs=multiple_outputs,
**kwargs,
)
if f.__doc__:
op.doc_md = f.__doc__
return XComArg(op)
return cast(T, factory)
if callable(python_callable):
return wrapper(python_callable)
elif python_callable is not None:
raise AirflowException('No args allowed while using @task, use kwargs instead')
return wrapper
|
|
# Copyright (c) 2008 Divmod. See LICENSE for details.
"""
Tests for L{epsilon.amprouter}.
"""
import six
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python.failure import Failure
from twisted.protocols.amp import IBoxReceiver, IBoxSender
from twisted.trial.unittest import TestCase
from epsilon.amprouter import _ROUTE, RouteNotConnected, Router
@implementer(IBoxReceiver)
class SomeReceiver:
"""
A stub AMP box receiver which just keeps track of whether it has been
started or stopped and what boxes have been delivered to it.
@ivar sender: C{None} until C{startReceivingBoxes} is called, then a
reference to the L{IBoxSender} passed to that method.
@ivar reason: C{None} until {stopReceivingBoxes} is called, then a
reference to the L{Failure} passed to that method.
@ivar started: C{False} until C{startReceivingBoxes} is called, then
C{True}.
@ivar stopped: C{False} until C{stopReceivingBoxes} is called, then
C{True}.
"""
sender = None
reason = None
started = False
stopped = False
def __init__(self):
self.boxes = []
def startReceivingBoxes(self, sender):
self.started = True
self.sender = sender
def ampBoxReceived(self, box):
if self.started and not self.stopped:
self.boxes.append(box)
def stopReceivingBoxes(self, reason):
self.stopped = True
self.reason = reason
@implementer(IBoxSender)
class CollectingSender:
"""
An L{IBoxSender} which collects and saves boxes and errors sent to it.
"""
def __init__(self):
self.boxes = []
self.errors = []
def sendBox(self, box):
"""
Reject boxes with non-string keys or values; save all the rest in
C{self.boxes}.
"""
serial_types = (six.text_type, six.binary_type)
for k, v in six.viewitems(box):
if not (isinstance(k, serial_types) and isinstance(v, serial_types)):
raise TypeError("Cannot send boxes containing non-strings")
self.boxes.append(box)
def unhandledError(self, failure):
self.errors.append(failure.getErrorMessage())
class RouteTests(TestCase):
"""
Tests for L{Route}, the L{IBoxSender} which handles adding routing
information to outgoing boxes.
"""
def setUp(self):
"""
Create a route attached to a stub sender.
"""
self.receiver = SomeReceiver()
self.sender = CollectingSender()
self.localName = u"foo"
self.remoteName = u"bar"
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.route = self.router.bindRoute(self.receiver, self.localName)
def test_interfaces(self):
"""
L{Route} instances provide L{IBoxSender}.
"""
self.assertTrue(verifyObject(IBoxSender, self.route))
def test_start(self):
"""
L{Route.start} starts its L{IBoxReceiver}.
"""
self.assertFalse(self.receiver.started)
self.route.start()
self.assertTrue(self.receiver.started)
self.assertIdentical(self.receiver.sender, self.route)
def test_stop(self):
"""
L{Route.stop} stops its L{IBoxReceiver}.
"""
self.route.start()
self.assertFalse(self.receiver.stopped)
self.route.stop(Failure(RuntimeError("foo")))
self.assertTrue(self.receiver.stopped)
self.receiver.reason.trap(RuntimeError)
def test_sendBox(self):
"""
L{Route.sendBox} adds the route name to the box before passing it on to
the underlying sender.
"""
self.route.connectTo(self.remoteName)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{_ROUTE: self.remoteName, "foo": "bar"}])
def test_sendUnroutedBox(self):
"""
If C{Route.connectTo} is called with C{None}, no route name is added to
the outgoing box.
"""
self.route.connectTo(None)
self.route.sendBox({"foo": "bar"})
self.assertEqual(
self.sender.boxes, [{"foo": "bar"}])
def test_sendBoxWithoutConnection(self):
"""
L{Route.sendBox} raises L{RouteNotConnected} if called before the
L{Route} is connected to a remote route name.
"""
self.assertRaises(
RouteNotConnected, self.route.sendBox, {'foo': 'bar'})
def test_unbind(self):
"""
L{Route.unbind} removes the route from its router.
"""
self.route.unbind()
self.assertRaises(
KeyError, self.router.ampBoxReceived, {_ROUTE: self.localName})
class RouterTests(TestCase):
"""
Tests for L{Router}, the L{IBoxReceiver} which directs routed AMP boxes to
the right object.
"""
def setUp(self):
"""
Create sender, router, receiver, and route objects.
"""
self.sender = CollectingSender()
self.router = Router()
self.router.startReceivingBoxes(self.sender)
self.receiver = SomeReceiver()
self.route = self.router.bindRoute(self.receiver)
self.route.connectTo(u"foo")
def test_interfaces(self):
"""
L{Router} instances provide L{IBoxReceiver}.
"""
self.assertTrue(verifyObject(IBoxReceiver, self.router))
def test_uniqueRoutes(self):
"""
L{Router.createRouteIdentifier} returns a new, different route
identifier on each call.
"""
identifiers = [self.router.createRouteIdentifier() for x in range(10)]
self.assertEqual(len(set(identifiers)), len(identifiers))
def test_bind(self):
"""
L{Router.bind} returns a new L{Route} instance which will send boxes to
the L{Route}'s L{IBoxSender} after adding a C{_ROUTE} key to them.
"""
self.route.sendBox({'foo': 'bar'})
self.assertEqual(
self.sender.boxes,
[{_ROUTE: self.route.remoteRouteName, 'foo': 'bar'}])
self.route.unhandledError(Failure(Exception("some test exception")))
self.assertEqual(
self.sender.errors, ["some test exception"])
def test_bindBeforeStart(self):
"""
If a L{Route} is created with L{Router.bind} before the L{Router} is
started with L{Router.startReceivingBoxes}, the L{Route} is created
unstarted and only started when the L{Router} is started.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
route.connectTo(u'quux')
self.assertFalse(receiver.started)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertTrue(receiver.started)
route.sendBox({'foo': 'bar'})
self.assertEqual(
sender.boxes, [{_ROUTE: route.remoteRouteName, 'foo': 'bar'}])
router.ampBoxReceived({_ROUTE: route.localRouteName, 'baz': 'quux'})
self.assertEqual(receiver.boxes, [{'baz': 'quux'}])
def test_bindBeforeStartFinishAfterStart(self):
"""
If a L{Route} is created with L{Router.connect} before the L{Router} is
started with L{Router.startReceivingBoxes} but the Deferred returned by
the connect thunk does not fire until after the router is started, the
L{IBoxReceiver} associated with the route is not started until that
Deferred fires and the route is associated with a remote route name.
"""
router = Router()
receiver = SomeReceiver()
route = router.bindRoute(receiver)
sender = CollectingSender()
router.startReceivingBoxes(sender)
self.assertFalse(receiver.started)
route.connectTo(u"remoteName")
self.assertTrue(receiver.started)
receiver.sender.sendBox({'foo': 'bar'})
self.assertEqual(sender.boxes, [{_ROUTE: 'remoteName', 'foo': 'bar'}])
def test_ampBoxReceived(self):
"""
L{Router.ampBoxReceived} passes on AMP boxes to the L{IBoxReceiver}
identified by the route key in the box.
"""
firstReceiver = SomeReceiver()
firstRoute = self.router.bindRoute(firstReceiver)
firstRoute.start()
secondReceiver = SomeReceiver()
secondRoute = self.router.bindRoute(secondReceiver)
secondRoute.start()
self.router.ampBoxReceived(
{_ROUTE: firstRoute.localRouteName, 'foo': 'bar'})
self.router.ampBoxReceived(
{_ROUTE: secondRoute.localRouteName, 'baz': 'quux'})
self.assertEqual(firstReceiver.boxes, [{'foo': 'bar'}])
self.assertEqual(secondReceiver.boxes, [{'baz': 'quux'}])
def test_ampBoxReceivedDefaultRoute(self):
"""
L{Router.ampBoxReceived} delivers boxes with no route to the default
box receiver.
"""
sender = CollectingSender()
receiver = SomeReceiver()
router = Router()
router.startReceivingBoxes(sender)
router.bindRoute(receiver, None).start()
router.ampBoxReceived({'foo': 'bar'})
self.assertEqual(receiver.boxes, [{'foo': 'bar'}])
def test_stopReceivingBoxes(self):
"""
L{Router.stopReceivingBoxes} calls the C{stop} method of each connected
route.
"""
sender = CollectingSender()
router = Router()
router.startReceivingBoxes(sender)
receiver = SomeReceiver()
router.bindRoute(receiver)
class DummyException(Exception):
pass
self.assertFalse(receiver.stopped)
router.stopReceivingBoxes(Failure(DummyException()))
self.assertTrue(receiver.stopped)
receiver.reason.trap(DummyException)
|
|
#!/usr/bin/env python
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate JSON for BigQuery importing."""
import argparse
import logging
import json
import os
import subprocess
import sys
import time
import traceback
try:
import defusedxml.ElementTree as ET
except ImportError:
import xml.etree.cElementTree as ET
import model
def parse_junit(xml):
"""Generate failed tests as a series of dicts. Ignore skipped tests."""
# NOTE: this is modified from gubernator/view_build.py
tree = ET.fromstring(xml)
# pylint: disable=redefined-outer-name
def make_result(name, time, failure_text):
if failure_text:
if time is None:
return {'name': name, 'failed': True, 'failure_text': failure_text}
return {'name': name, 'time': time, 'failed': True, 'failure_text': failure_text}
if time is None:
return {'name': name}
return {'name': name, 'time': time}
# Note: skipped tests are ignored because they make rows too large for BigQuery.
# Knowing that a given build could have ran a test but didn't for some reason
# isn't very interesting.
if tree.tag == 'testsuite':
for child in tree.findall('testcase'):
name = child.attrib['name']
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text)
elif tree.tag == 'testsuites':
for testsuite in tree:
suite_name = testsuite.attrib['name']
for child in testsuite.findall('testcase'):
name = '%s %s' % (suite_name, child.attrib['name'])
time = float(child.attrib['time'] or 0)
failure_text = None
for param in child.findall('failure'):
failure_text = param.text
skipped = child.findall('skipped')
if skipped:
continue
yield make_result(name, time, failure_text)
else:
logging.error('unable to find failures, unexpected tag %s', tree.tag)
def buckets_yaml():
import yaml # does not support pypy
with open(os.path.dirname(os.path.abspath(__file__))+'/buckets.yaml') as fp:
return yaml.load(fp)
# pypy compatibility hack
def python_buckets_yaml(python='python2'):
return json.loads(subprocess.check_output(
[python, '-c', 'import json,yaml; print json.dumps(yaml.load(open("buckets.yaml")))'],
cwd=os.path.dirname(os.path.abspath(__file__))))
for attempt in [python_buckets_yaml, buckets_yaml, lambda: python_buckets_yaml(python='python')]:
try:
BUCKETS = attempt()
break
except (ImportError, OSError):
traceback.print_exc()
else:
# pylint: disable=misplaced-bare-raise
# This is safe because the only way we get here is by faling all attempts
raise
def path_to_job_and_number(path):
assert not path.endswith('/')
for bucket, meta in BUCKETS.iteritems():
if path.startswith(bucket):
prefix = meta['prefix']
break
else:
if path.startswith('gs://kubernetes-jenkins/pr-logs'):
prefix = 'pr:'
else:
raise ValueError('unknown build path')
build = os.path.basename(path)
job = prefix + os.path.basename(os.path.dirname(path))
try:
return job, int(build)
except ValueError:
return job, None
def row_for_build(path, started, finished, results):
tests = []
for result in results:
for test in parse_junit(result):
if '#' in test['name'] and not test.get('failed'):
continue # skip successful repeated tests
tests.append(test)
build = {
'path': path,
'test': tests,
'tests_run': len(tests),
'tests_failed': sum(t.get('failed', 0) for t in tests)
}
job, number = path_to_job_and_number(path)
build['job'] = job
if number:
build['number'] = number
if started:
build['started'] = int(started['timestamp'])
if 'node' in started:
build['executor'] = started['node']
if finished:
build['finished'] = int(finished['timestamp'])
if 'result' in finished:
build['result'] = finished['result']
build['passed'] = build['result'] == 'SUCCESS'
elif isinstance(finished.get('passed'), bool):
build['passed'] = finished['passed']
build['result'] = 'SUCCESS' if build['passed'] else 'FAILURE'
if 'version' in finished:
build['version'] = finished['version']
def get_metadata():
metadata = None
if finished and 'metadata' in finished:
metadata = finished['metadata']
elif started:
metadata = started.get('metadata')
if metadata:
# clean useless/duplicated metadata fields
if 'repo' in metadata and not metadata['repo']:
metadata.pop('repo')
build_version = build.get('version', 'N/A')
if metadata.get('job-version') == build_version:
metadata.pop('job-version')
if metadata.get('version') == build_version:
metadata.pop('version')
for key, value in metadata.items():
if not isinstance(value, basestring):
# the schema specifies a string value. force it!
metadata[key] = json.dumps(value)
if not metadata:
return None
return [{'key': k, 'value': v} for k, v in sorted(metadata.items())]
metadata = get_metadata()
if metadata:
build['metadata'] = metadata
if started and finished:
build['elapsed'] = build['finished'] - build['started']
return build
def get_table(days):
if days:
return ('build_emitted_%g' % days).replace('.', '_')
return 'build_emitted'
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument('--days', type=float, default=0,
help='Grab data for builds within N days')
parser.add_argument('--assert-oldest', type=float,
help='Exit nonzero if a build older than X days was emitted previously.')
parser.add_argument('--reset-emitted', action='store_true',
help='Clear list of already-emitted builds.')
parser.add_argument('paths', nargs='*',
help='Options list of gs:// paths to dump rows for.')
return parser.parse_args(args)
def make_rows(db, builds):
for rowid, path, started, finished in builds:
try:
results = db.test_results_for_build(path)
yield rowid, row_for_build(path, started, finished, results)
except IOError:
return
except: # pylint: disable=bare-except
logging.exception('error on %s', path)
def main(db, opts, outfile):
min_started = None
if opts.days:
min_started = time.time() - (opts.days or 1) * 24 * 60 * 60
incremental_table = get_table(opts.days)
if opts.assert_oldest:
oldest = db.get_oldest_emitted(incremental_table)
if oldest < time.time() - opts.assert_oldest * 24 * 60 * 60:
return 1
return 0
if opts.reset_emitted:
db.reset_emitted(incremental_table)
if opts.paths:
# When asking for rows for specific builds, use a dummy table and clear it first.
incremental_table = 'incremental_manual'
db.reset_emitted(incremental_table)
builds = list(db.get_builds_from_paths(opts.paths, incremental_table))
else:
builds = db.get_builds(min_started=min_started, incremental_table=incremental_table)
rows_emitted = set()
for rowid, row in make_rows(db, builds):
json.dump(row, outfile, sort_keys=True)
outfile.write('\n')
rows_emitted.add(rowid)
if rows_emitted:
gen = db.insert_emitted(rows_emitted, incremental_table=incremental_table)
print >>sys.stderr, 'incremental progress gen #%d' % gen
else:
print >>sys.stderr, 'no rows emitted'
return 0
if __name__ == '__main__':
DB = model.Database()
OPTIONS = parse_args(sys.argv[1:])
sys.exit(main(DB, OPTIONS, sys.stdout))
|
|
"""The image module provides basic functions for working with images in nipy.
Functions are provided to load, save and create image objects, along with
iterators to easily slice through volumes.
load : load an image from a file
save : save an image to a file
fromarray : create an image from a numpy array
Examples
--------
See documentation for load and save functions for 'working' examples.
"""
import os
import numpy as np
import nipy.io.imageformats as formats
from nipy.core.api import Image, is_image
from nifti_ref import (coordmap_from_affine, coerce_coordmap,
ijk_from_fps, fps_from_ijk)
def load(filename):
"""Load an image from the given filename.
Parameters
----------
filename : string
Should resolve to a complete filename path.
Returns
-------
image : An `Image` object
If successful, a new `Image` object is returned.
See Also
--------
save_image : function for saving images
fromarray : function for creating images from numpy arrays
Examples
--------
>>> from nipy.io.api import load_image
>>> from nipy.testing import anatfile
>>> img = load_image(anatfile)
>>> img.shape
(33, 41, 25)
"""
img = formats.load(filename)
aff = img.get_affine()
shape = img.get_shape()
hdr = img.get_header()
# Get info from NIFTI header, if present, to tell which axes are
# which. This is a NIFTI-specific kludge, that might be abstracted
# out into the image backend in a general way. Similarly for
# getting zooms
try:
fps = hdr.get_dim_info()
except (TypeError, AttributeError):
fps = (None, None, None)
ijk = ijk_from_fps(fps)
try:
zooms = hdr.get_zooms()
except AttributeError:
zooms = np.ones(len(shape))
aff = _match_affine(aff, len(shape), zooms)
coordmap = coordmap_from_affine(aff, ijk)
img = Image(img.get_data(), coordmap)
img.header = hdr
return img
def _match_affine(aff, ndim, zooms=None):
''' Fill or prune affine to given number of dimensions
>>> aff = np.arange(16).reshape(4,4)
>>> _match_affine(aff, 3)
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> _match_affine(aff, 2)
array([[ 0., 1., 3.],
[ 4., 5., 7.],
[ 0., 0., 1.]])
>>> _match_affine(aff, 4)
array([[ 0., 1., 2., 0., 3.],
[ 4., 5., 6., 0., 7.],
[ 8., 9., 10., 0., 11.],
[ 0., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1.]])
>>> aff = np.arange(9).reshape(3,3)
>>> _match_affine(aff, 2)
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
'''
if aff.shape[0] != aff.shape[1]:
raise ValueError('Need square affine')
aff_dim = aff.shape[0] - 1
if ndim == aff_dim:
return aff
aff_diag = np.ones(ndim+1)
if not zooms is None:
n = min(len(zooms), ndim)
aff_diag[:n] = zooms[:n]
mod_aff = np.diag(aff_diag)
n = min(ndim, aff_dim)
# rotations zooms shears
mod_aff[:n,:n] = aff[:n,:n]
# translations
mod_aff[:n,-1] = aff[:n,-1]
return mod_aff
def save(img, filename, dtype=None):
"""Write the image to a file.
Parameters
----------
img : An `Image` object
filename : string
Should be a valid filename.
Returns
-------
image : An `Image` object
See Also
--------
load_image : function for loading images
fromarray : function for creating images from numpy arrays
Examples
--------
>>> import os
>>> import numpy as np
>>> from tempfile import mkstemp
>>> from nipy.core.api import fromarray
>>> from nipy.io.api import save_image
>>> data = np.zeros((91,109,91), dtype=np.uint8)
>>> img = fromarray(data, 'kji', 'zxy')
>>> fd, fname = mkstemp(suffix='.nii.gz')
>>> saved_img = save_image(img, fname)
>>> saved_img.shape
(91, 109, 91)
>>> os.unlink(fname)
>>> fd, fname = mkstemp(suffix='.img.gz')
>>> saved_img = save_image(img, fname)
>>> saved_img.shape
(91, 109, 91)
>>> os.unlink(fname)
>>> fname = 'test.mnc'
>>> saved_image = save_image(img, fname)
Traceback (most recent call last):
...
ValueError: Cannot save file type "minc"
Notes
-----
Filetype is determined by the file extension in 'filename'. Currently the
following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* Analyze file pair : ['.img', 'img.gz']
"""
# Get header from image
try:
original_hdr = img.header
except AttributeError:
original_hdr = None
# Make NIFTI compatible version of image
newcmap, order = coerce_coordmap(img.coordmap)
Fimg = Image(np.transpose(np.asarray(img), order), newcmap)
# Expand or contract affine to 4x4 (3 dimensions)
rzs = Fimg.affine[:-1,:-1]
zooms = np.sqrt(np.sum(rzs * rzs, axis=0))
aff = _match_affine(Fimg.affine, 3, zooms)
ftype = _type_from_filename(filename)
if ftype.startswith('nifti1'):
klass = formats.Nifti1Image
elif ftype == 'analyze':
klass = formats.Spm2AnalyzeImage
else:
raise ValueError('Cannot save file type "%s"' % ftype)
# make new image
out_img = klass(data=np.asarray(Fimg),
affine=aff,
header=original_hdr)
hdr = out_img.get_header()
# work out phase, freqency, slice from coordmap names
ijk = newcmap.input_coords.coord_names
fps = fps_from_ijk(ijk)
# put fps into header if possible
try:
hdr.set_dim_info(*fps)
except AttributeError:
pass
# Set zooms
hdr.set_zooms(zooms)
# save to disk
out_img.to_filename(filename)
return Fimg
def _type_from_filename(filename):
''' Return image type determined from filename
Filetype is determined by the file extension in 'filename'.
Currently the following filetypes are supported:
* Nifti single file : ['.nii', '.nii.gz']
* Nifti file pair : ['.hdr', '.hdr.gz']
* Analyze file pair : ['.img', '.img.gz']
>>> _type_from_filename('test.nii')
'nifti1single'
>>> _type_from_filename('test')
'nifti1single'
>>> _type_from_filename('test.hdr')
'nifti1pair'
>>> _type_from_filename('test.hdr.gz')
'nifti1pair'
>>> _type_from_filename('test.img.gz')
'analyze'
>>> _type_from_filename('test.mnc')
'minc'
'''
if filename.endswith('.gz'):
filename = filename[:-3]
elif filename.endswith('.bz2'):
filename = filename[:-4]
_, ext = os.path.splitext(filename)
if ext in ('', '.nii'):
return 'nifti1single'
if ext == '.hdr':
return 'nifti1pair'
if ext == '.img':
return 'analyze'
if ext == '.mnc':
return 'minc'
raise ValueError('Strange file extension "%s"' % ext)
def as_image(image_input):
''' Load image from filename or pass through image instance
Parameters
----------
image_input : str or Image instance
image or string filename of image. If a string, load image and
return. If an image, pass through without modification
Returns
-------
img : Image or Image-like instance
Input object if `image_input` seemed to be an image, loaded Image
object if `image_input` was a string.
Raises
------
TypeError : if neither string nor image-like passed
Examples
--------
>>> from nipy.testing import anatfile
>>> from nipy.io.api import load_image
>>> img = as_image(anatfile)
>>> img2 = as_image(img)
>>> img2 is img
True
'''
if is_image(image_input):
return image_input
if isinstance(image_input, basestring):
return load(image_input)
raise TypeError('Expecting an image-like object or filename string')
|
|
from os.path import join,isfile,isdir
import re
import shutil
from spark_package.spark_package import licenses,register_package_http,check_homepage
import sys
if sys.version_info >= (3, 0):
from io import StringIO
else:
from StringIO import StringIO
import subprocess
import tempfile
import unittest
import zipfile
import responses
import pexpect
def run_cmd(cmd):
return subprocess.Popen(["spark-package"] + cmd, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE, close_fds = True)
def spawn(cmd):
return pexpect.spawn(" ".join(["spark-package"] + cmd))
def input_and_expect(p, vals):
for prompt, input in vals:
if sys.version_info >= (3, 0):
p.expect(re.compile(prompt))
else:
p.expect(re.compile(prompt.decode('utf-8')))
if input:
p.sendline(input)
def communicate(p, val):
if type(val) is list:
# stdo = [p.stdout.readline().decode("utf-8")]
for input in val:
p.stdin.write(input)
p.stdin.flush()
# stdo.append(p.stdout.readline().decode("utf-8"))
return p.communicate()
if sys.version_info >= (3, 0):
return p.communicate(val.encode())
else:
return p.communicate(val)
def check_sbt_files(test, temp_dir, name, exists=True):
base_name = name.split("/")[1]
if exists:
check_exists = test.assertTrue
else:
check_exists = test.assertFalse
check_exists(isdir(join(temp_dir, base_name, "project")))
check_exists(isfile(join(temp_dir, base_name, "project", "build.properties")))
check_exists(isfile(join(temp_dir, base_name, "project", "plugins.sbt")))
check_exists(isdir(join(temp_dir, base_name, "build")))
check_exists(isfile(join(temp_dir, base_name, "build", "sbt")))
check_exists(isfile(join(temp_dir, base_name, "build", "sbt-launch-lib.bash")))
build_file = join(temp_dir, base_name, "build.sbt")
check_exists(isfile(build_file))
if exists:
with open(build_file, 'r') as f:
test.assertTrue("spName := \"%s\"" % name in f.read())
def check_scala_files(test, temp_dir, name, exists=True):
base_name = name.split("/")[1]
if exists:
check_exists = test.assertTrue
else:
check_exists = test.assertFalse
check_exists(isdir(join(temp_dir, base_name, "src", "main", "scala")))
check_exists(isdir(join(temp_dir, base_name, "src", "test", "scala")))
def check_base_files(test, temp_dir, name):
base_name = name.split("/")[1]
test.assertTrue(isfile(join(temp_dir, base_name, "LICENSE")))
test.assertTrue(isfile(join(temp_dir, base_name, "README.md")))
test.assertTrue(isfile(join(temp_dir, base_name, ".gitignore")))
def check_python_files(test, temp_dir, name, exists=True):
base_name = name.split("/")[1]
if exists:
check_exists = test.assertTrue
else:
check_exists = test.assertFalse
check_exists(isdir(join(temp_dir, base_name, "python")))
check_exists(isfile(join(temp_dir, base_name, "python", "setup.py")))
check_exists(isfile(join(temp_dir, base_name, "python", "setup.cfg")))
check_exists(isfile(join(temp_dir, base_name, "python", "MANIFEST.in")))
check_exists(isfile(join(temp_dir, base_name, "python", "requirements.txt")))
check_exists(isfile(join(temp_dir, base_name, "python", "spark-package-deps.txt")))
check_exists(isfile(join(temp_dir, base_name, "python", "tests.py")))
def check_java_files(test, temp_dir, name, exists=True):
base_name = name.split("/")[1]
if exists:
check_exists = test.assertTrue
else:
check_exists = test.assertFalse
check_exists(isdir(join(temp_dir, base_name, "src", "main", "java")))
check_exists(isdir(join(temp_dir, base_name, "src", "test", "java")))
def check_r_files(test, temp_dir, name, exists=True):
base_name = name.split("/")[1]
if exists:
check_exists = test.assertTrue
else:
check_exists = test.assertFalse
check_exists(isdir(join(temp_dir, base_name, "R", "pkg", "R")))
check_exists(isdir(join(temp_dir, base_name, "R", "pkg", "man")))
check_exists(isdir(join(temp_dir, base_name, "R", "pkg", "data")))
check_exists(isdir(join(temp_dir, base_name, "R", "pkg", "src")))
check_exists(isfile(join(temp_dir, base_name, "R", "pkg", "NAMESPACE")))
check_exists(isfile(join(temp_dir, base_name, "R", "pkg", "man", "documentation.Rd")))
check_exists(isfile(join(temp_dir, base_name, "R", "pkg", "Read-and-delete-me")))
description = join(temp_dir, base_name, "R", "pkg", "DESCRIPTION")
check_exists(isfile(description))
if exists:
with open(description, 'r') as f:
test.assertTrue("Package: %s" % base_name in f.read())
def clean_dir(test, dir):
shutil.rmtree(dir)
test.assertFalse(isdir(dir))
def check_exception(test, expect, p):
out, _ = p.communicate()
test.assertTrue(expect in out.decode('utf-8'))
def get_licenses():
first_lines = [
"Apache License, Version 2.0",
"Copyright (c) <YEAR>, <OWNER>",
"Copyright (c) <YEAR>, <OWNER>",
"The GNU General Public License (GPL-2.0)",
"GNU GENERAL PUBLIC LICENSE",
"GNU Lesser General Public License",
"GNU LESSER GENERAL PUBLIC LICENSE",
"The MIT License (MIT)",
"Mozilla Public License, version 2.0",
"Eclipse Public License, Version 1.0 (EPL-1.0)",
"# Every Spark Package must have a license in order to be published. You may"
]
return [(x[0], x[1], y) for x, y in zip(licenses, first_lines)]
class TestCommandLineToolInit(unittest.TestCase):
def test_simple(self):
p = run_cmd(["init"])
check_exception(self, "Please specify the name of the package using -n or --name.", p)
def test_bad_name(self):
p = run_cmd(["init", "-n", "noslash"])
check_exception(self, "The name of the package must contain exactly one slash.", p)
p = run_cmd(["init", "-n", "abc/03/doubleslash"])
check_exception(self, "The name of the package must contain exactly one slash.", p)
p = run_cmd(["init", "-n", "w3!rd/ch@rs"])
check_exception(self, "The name of the package can only contain letters, numbers,", p)
def test_matrix(self):
has_lang_opts = [True, False]
i = 0
for has_scala in has_lang_opts:
for has_r in has_lang_opts:
for has_python in has_lang_opts:
for has_java in has_lang_opts:
temp_dir = tempfile.mkdtemp()
name = "test/trial-%s" % i
langs = []
if has_java:
langs.append("-j")
if has_scala:
langs.append("-s")
if has_python:
langs.append("-p")
if has_r:
langs.append("-r")
if not has_java and not has_scala and not has_python and not has_r:
has_scala = True
p = run_cmd(["init", "-n", name, "-o", temp_dir] + langs)
communicate(p, "1")
self.assertTrue(p.returncode == 0)
check_scala_files(self, temp_dir, name, exists=has_scala)
check_base_files(self, temp_dir, name)
check_sbt_files(self, temp_dir, name, exists=has_scala | has_java)
check_python_files(self, temp_dir, name, exists=has_python)
check_r_files(self, temp_dir, name, exists=has_r)
check_java_files(self, temp_dir, name, exists=has_java)
clean_dir(self, temp_dir)
i += 1
def test_license(self):
i = 1
for license_name, url, first_line in get_licenses():
temp_dir = tempfile.mkdtemp()
name = "license-%s" % i
p = run_cmd(["init", "-n", "test/" + name, "-o", temp_dir])
communicate(p, str(i))
check_base_files(self, temp_dir, "test/" + name)
if i != len(licenses):
with open(join(temp_dir, name, "build.sbt"), "r") as f:
contents = f.read()
self.assertTrue(license_name in contents)
self.assertTrue(url in contents)
with open(join(temp_dir, name, "LICENSE"), "r") as f:
self.assertTrue(first_line in f.readline())
i += 1
clean_dir(self, temp_dir)
def check_pom(test, pom, org_name, artifact_name, version, dependencies):
"""
Check the contents of the pom. Make sure the groupId, artifactId, and version are properly set.
:param org_name: organization (group) id of the package
:param artifact_name: artifact id of package
:param version: version of release
:param dependencies: List of dependencies expected in the pom
"""
contents = pom.read().decode('utf-8')
def gen_coordinate_regex(org, artifact, v):
regex = """<groupId>\\s*%s\\s*<\\/groupId>\\s*""" % org
regex += """<artifactId>\\s*%s\\s*<\\/artifactId>\\s*""" % artifact
regex += """<version>\\s*%s\\s*<\\/version>""" % v
return regex
main = gen_coordinate_regex(org_name, artifact_name, version)
test.assertTrue(len(re.findall(main, contents)) == 1)
for dep_org, dep_art, dep_version in dependencies:
dep = gen_coordinate_regex(dep_org, dep_art, dep_version)
test.assertTrue(len(re.findall(dep, contents)) == 1)
pom.close()
def check_jar(test, jar, files):
"""
Check the contents of the pom. Make sure the groupId, artifactId, and version are properly set.
:param files: List of entries expected in the jar
"""
jar_file = zipfile.PyZipFile(jar, 'r')
entries = jar_file.namelist()
for expected in files:
test.assertTrue(expected in entries)
jar_file.close()
def check_zip(test, temp_dir, org_name, artifact_name, version, files, dependencies):
"""
Checks if the zip exists and the contents of the pom and jar are valid.
:param temp_dir: Directory where the zip should exist
:param org_name: organization (group) id of the package
:param artifact_name: artifact id of package
:param version: version of release
:param files: List of entries expected in the jar
:param dependencies: List of dependencies expected in the pom
"""
artifact_format = "%s-%s" % (artifact_name, version)
zip = join(temp_dir, artifact_format + ".zip" )
test.assertTrue(isfile(zip))
with zipfile.PyZipFile(zip, 'r') as myzip:
entries = myzip.namelist()
test.assertTrue(artifact_format + ".pom" in entries)
test.assertTrue(artifact_format + ".jar" in entries)
check_jar(test, myzip.extract(artifact_format + ".jar", temp_dir), files)
check_pom(test, myzip.open(artifact_format + ".pom"),
org_name, artifact_name, version, dependencies)
def write_file(path, contents):
with open(path, 'w') as f:
f.write(contents)
def create_jar(temp_dir, artifact_name, version, files):
jar = zipfile.PyZipFile(join(temp_dir, artifact_name,
"%s-%s.jar" % (artifact_name, version)), 'w')
for f in files:
jar.write(f, f.replace(temp_dir, ""))
jar.close()
def create_pom(temp_dir, group_id, artifact_id, version):
contents = ("""
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>%s</groupId>
<artifactId>%s</artifactId>
<version>%s</version>
</project>""" % (group_id, artifact_id, version)).strip()
write_file(join(temp_dir, "pom.xml"), contents)
class TestCommandLineToolZip(unittest.TestCase):
def test_zip_missing_args(self):
temp_dir = tempfile.mkdtemp()
name = "test/zip-test"
p = run_cmd(["init", "-n", name, "-o", temp_dir])
communicate(p, "1")
p = run_cmd(["zip"])
check_exception(self, "Please specify the name of the package using -n or --name", p)
p = run_cmd(["zip", "-n", name])
check_exception(self, "Please specify the folder of the spark package", p)
p = run_cmd(["zip", "-n", name, "-f", join(temp_dir, "zip-test")])
check_exception(self, "Please specify a version for the release", p)
clean_dir(self, temp_dir)
def test_zip_bad_names(self):
p = run_cmd(["zip", "-n", "noslash"])
check_exception(self, "The name of the package must contain exactly one slash.", p)
p = run_cmd(["zip", "-n", "abc/03/doubleslash"])
check_exception(self, "The name of the package must contain exactly one slash.", p)
p = run_cmd(["zip", "-n", "w3!rd/ch@rs"])
check_exception(self, "The name of the package can only contain letters, numbers,", p)
def test_zip_proper(self):
temp_dir = tempfile.mkdtemp()
org_name = "test"
base_name = "zip-test"
name = org_name + "/" + base_name
p = run_cmd(["init", "-n", name, "-o", temp_dir, "-p"])
out, err = communicate(p, "1")
version = "0.2"
p = run_cmd(["zip", "-n", name, "-o", temp_dir, "-v", version,
"-f", join(temp_dir, base_name)])
# p.wait()
out, err = p.communicate()
jar_contents = ["setup.pyc", "requirements.txt", "tests.pyc"]
check_zip(self, temp_dir, org_name, base_name, version, files=jar_contents, dependencies=[])
clean_dir(self, temp_dir)
def test_zip_existing_jar(self):
temp_dir = tempfile.mkdtemp()
org_name = "test"
base_name = "zip-test"
name = org_name + "/" + base_name
p = run_cmd(["init", "-n", name, "-o", temp_dir, "-p", "-s"])
communicate(p, "1")
version = "0.2"
test1 = join(temp_dir, "test.class")
test2 = join(temp_dir, "test2.class")
write_file(join(temp_dir, "test.class"), "hulahulahulahey")
write_file(join(temp_dir, "test2.class"), "hulahulahulaheyheyhey")
create_jar(temp_dir, base_name, version, [test1, test2])
self.assertTrue(isfile(join(temp_dir, base_name, "%s-%s.jar" % (base_name, version))))
create_pom(join(temp_dir, base_name), "org.test", base_name, version)
self.assertTrue(isfile(join(temp_dir, base_name, "pom.xml")))
p = run_cmd(["zip", "-n", name, "-o", temp_dir, "-v", version,
"-f", join(temp_dir, base_name)])
p.wait()
jar_contents = ["setup.pyc", "requirements.txt", "tests.pyc", "test.class", "test2.class"]
check_zip(self, temp_dir, org_name, base_name, version, files=jar_contents, dependencies=[])
clean_dir(self, temp_dir)
def test_zip_python_dependencies(self):
temp_dir = tempfile.mkdtemp()
org_name = "test"
base_name = "zip-test"
name = org_name + "/" + base_name
p = run_cmd(["init", "-n", name, "-o", temp_dir, "-p"])
communicate(p, "1")
version = "0.2"
deps_file = join(temp_dir, base_name, "python", "spark-package-deps.txt")
write_file(deps_file, """wrong/format\n""")
p = run_cmd(["zip", "-n", name, "-o", temp_dir, "-v", version,
"-f", join(temp_dir, base_name)])
check_exception(self, ":package_name==:version` in spark-package-deps.txt", p)
write_file(deps_file, """wrong:format==2\n""")
p = run_cmd(["zip", "-n", name, "-o", temp_dir, "-v", version,
"-f", join(temp_dir, base_name)])
check_exception(self, "supplied as: `:repo_owner_name/:repo_name` in", p)
write_file(deps_file, """right/format==3\n""")
p = run_cmd(["zip", "-n", name, "-o", temp_dir, "-v", version,
"-f", join(temp_dir, base_name)])
p.wait()
jar_contents = ["setup.pyc", "requirements.txt", "tests.pyc"]
check_zip(self, temp_dir, org_name, base_name, version,
files=jar_contents, dependencies=[("right", "format", "3")])
clean_dir(self, temp_dir)
class TestCommandLineToolRegister(unittest.TestCase):
def test_register_bad_args(self):
p = run_cmd(["register"])
check_exception(self, "Please specify the name of the package using -n or --name", p)
def test_ask_git_creds(self):
p = spawn(["register", "-n", "test/register"])
input_and_expect(p, [
(b"Please enter your Github username.*", "git-user"),
(b"Github Personal access token with read\:org.*", "git-password"),
(b"Please supply a short \(one line\) description of.*", None)])
p.kill(0)
@responses.activate
def test_simple_register(self):
responses.add(
responses.POST, 'https://spark-packages.org/api/submit-package',
body="",
status=201)
register_package_http("test/register", "fake", "token", "short", "long", "http://homepage")
self.assertTrue(len(responses.calls) == 1)
self.assertTrue(responses.calls[0].request.url ==
'https://spark-packages.org/api/submit-package')
if __name__ == '__main__':
unittest.main()
|
|
'''
corcle.py
'Imagine super hexagon X two cars'
Nicholas Ruggles
'''
from __future__ import division
import pygame, sys, math, random, os, time, copy
from pygame.locals import *
FPS = 60
WINDOWWIDTH = 1000
WINDOWHEIGHT = 700
# R G B
DARKTURQUOISE = ( 3, 54, 73)
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
DARKBLUE = ( 0, 0, 127)
RED = (255, 0, 0)
PURPLE = (255, 0, 255)
GREEN = ( 0, 255, 0)
YELLOW = (255, 255, 0)
ORANGE = (255, 127, 0)
BGCOLOR = BLACK
PITCOLOR = BLACK
LINECOLOR = DARKBLUE
COLOR1 = RED
COLOR2 = GREEN
FONTCOLOR = WHITE
# Paddle attributes
PADDIAMETER = 150
PADLENGTH = math.pi/3
PADSPEED = math.pi/24
PADWIDTH = 5
# Pit attributes
PITRADIUS = 25
# Dot attributes
DOTSPEED = 3.5
BASEFREQ = 180
NUMLINES = 20
FONTSIZE = 25
WAITTIME = 500
SPAWNINTERVAL = 900 # How often we change the spawn regime, in frames
BLANKINTERVAL = 120 # Free time given between spawn regimes
def main():
global DISPLAYSURF, FPSCLOCK, GAMEFONT
# Font junk
dataDir = 'data'
fontName = 'FreeSansBold.ttf'
myFontFile = resource_path(os.path.join(dataDir, fontName))
# print myFontFile
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
GAMEFONT = pygame.font.Font( myFontFile, FONTSIZE)
pygame.display.set_caption('Corcle')
startScreen()
while True:
timeAlive = runGame()
endScreen(timeAlive)
def startScreen():
DISPLAYSURF.fill(BLACK)
textSurf = GAMEFONT.render("Use A-S and K-L to move the paddles", False, FONTCOLOR)
textRect = textSurf.get_rect()
textRect.center = (WINDOWWIDTH//2.2, WINDOWHEIGHT//4)
DISPLAYSURF.blit(textSurf, textRect)
pressSurf = GAMEFONT.render("Press any key to continue", False, FONTCOLOR)
pressRect = textSurf.get_rect()
pressRect.center = (WINDOWWIDTH//2, WINDOWHEIGHT//2)
DISPLAYSURF.blit(pressSurf, pressRect)
pygame.display.update()
pygame.time.wait(WAITTIME)
pygame.event.get()
while True:
checkForQuit()
if checkForKeyPress():
return
def runGame():
global SPAWNCOLOR
firstPaddle = paddle(COLOR1, PADDIAMETER, PADLENGTH, PADWIDTH, math.pi)
secondPaddle = paddle(COLOR2, PADDIAMETER + PADWIDTH*2, PADLENGTH, PADWIDTH, 0)
centerPit = pit(PITCOLOR, PITRADIUS)
dotList = []
frameCount = 0
timeAlive = frameCount / 60
lastSpawnTime = time.time()
regimeList = [spawnSimultaneous, spawnRandom, spawnSame, spawnAlternating]
dotSpawn = random.choice(regimeList)
degrees = 0
rotationSpeed = 0
while True:
# Event Code
checkForQuit()
pygame.event.get() # Clear event Queue
keys = pygame.key.get_pressed()
if keys[K_a]:
firstPaddle.move(PADSPEED)
if keys[K_s]:
firstPaddle.move(-PADSPEED)
if keys[K_k]:
secondPaddle.move(PADSPEED)
if keys[K_l]:
secondPaddle.move(-PADSPEED)
# Dot spawning functions either return None, or a list of dot objects
if frameCount%SPAWNINTERVAL == 0:
SPAWNCOLOR = random.choice((COLOR1, COLOR2))
spawnRegime = copy.copy(regimeList)
spawnRegime.remove(dotSpawn)
dotSpawn = random.choice(spawnRegime)
rotationSpeed = (random.uniform(0.25, 0.5)) * ((frameCount + 1800) / 1800) * random.choice((-1, 1))
if (frameCount%SPAWNINTERVAL < SPAWNINTERVAL - BLANKINTERVAL):
newDotList = dotSpawn(frameCount, time.time() - lastSpawnTime)
if newDotList != None:
lastSpawnTime = time.time()
for dot in newDotList:
dotList.append(dot)
# Collision Code
i = 0
for dot in dotList:
if firstPaddle.collide(dot.getPos()):
if (firstPaddle.getColor() == dot.getColor()):
dotList.pop(i)
else:
return timeAlive
elif secondPaddle.collide(dot.getPos()):
if (secondPaddle.getColor() == dot.getColor()):
dotList.pop(i)
else:
return timeAlive
elif centerPit.collide(dot.getPos()):
return timeAlive
else:
dot.move()
i += 1
# Draw Code
DISPLAYSURF.fill(BGCOLOR)
degrees += (math.sin((frameCount%1000)/1000) - 0.5) * rotationSpeed
rotAngle = degrees*math.pi/180
drawRadialLines(LINECOLOR, NUMLINES, rotAngle)
centerPit.draw()
firstPaddle.draw(rotAngle)
secondPaddle.draw(rotAngle)
# Framerate, for debug purposes
#pygame.display.set_caption('%d' % FPSCLOCK.get_fps() )
for dot in dotList:
dot.draw(rotAngle)
# Update Screen & wait for next frame
pygame.display.update()
FPSCLOCK.tick(FPS)
frameCount += 1
timeAlive = frameCount / 60
def endScreen(timeAlive):
DISPLAYSURF.fill(BLACK)
scoreSurf = GAMEFONT.render("You stayed alive for %.2f seconds" % timeAlive, False, FONTCOLOR)
scoreRect = scoreSurf.get_rect()
scoreRect.center = (WINDOWWIDTH // 2, WINDOWHEIGHT // 4)
DISPLAYSURF.blit(scoreSurf, scoreRect)
playSurf = GAMEFONT.render("Press any key to play again", False, FONTCOLOR)
playRect = playSurf.get_rect()
playRect.center = (WINDOWWIDTH // 2, WINDOWHEIGHT // 2)
DISPLAYSURF.blit(playSurf, playRect)
pygame.display.update()
pygame.time.wait(WAITTIME)
pygame.event.get()
while True:
checkForQuit()
if checkForKeyPress():
return
class paddle(object):
def __init__(self, color, diameter, arcLength, arcWidth, arcPos):
self.color = color
self.diameter = diameter
self.arcLength = arcLength
self.arcWidth = arcWidth
self.arcPos = arcPos
if self.arcPos > 2*math.pi:
self.arcPos -= 2*math.pi
if self.arcPos < 0:
self.arcPos += 2*math.pi
self.circleRect = pygame.Rect((WINDOWWIDTH - diameter)/2, (WINDOWHEIGHT - diameter)/2, diameter, diameter)
def draw(self, rotAngle=0):
pygame.draw.arc(DISPLAYSURF, self.color, self.circleRect, self.arcPos - rotAngle, self.arcPos + self.arcLength - rotAngle, self.arcWidth)
def move(self, arcDelta):
self.arcPos = self.arcPos + arcDelta
if self.arcPos > 2*math.pi:
self.arcPos -= 2*math.pi
if self.arcPos < 0:
self.arcPos += 2*math.pi
def collide(self, pos):
distance = ( (pos[0] - self.circleRect.center[0])**2 + (pos[1] - self.circleRect.center[1])**2 )**0.5
theta = getAngle(pos)
if (theta < self.arcLength) and (self.arcPos + self.arcLength > 2*math.pi) :
theta += 2*math.pi
return (distance < self.diameter/2) and (distance > self.diameter/2 - self.arcWidth) \
and (theta > self.arcPos) and (theta < self.arcPos + self.arcLength)
def getColor(self):
return self.color
class pit(object):
def __init__(self, color, radius):
self.color = color
self.radius = radius
self.pitPos = (WINDOWWIDTH//2, WINDOWHEIGHT//2)
def draw(self):
pygame.draw.circle(DISPLAYSURF, self.color, self.pitPos, self.radius)
def collide(self, pos):
# Returns True if x, y in pit
distance = ((pos[0] - self.pitPos[0])**2 + (pos[1] - self.pitPos[1])**2)**0.5
return distance < self.radius
class dot(object):
def __init__(self, color, xPos, yPos, direction, speed):
self.color = color
self.direction = direction
self.xPos = xPos - 5
self.yPos = yPos - 5
self.speed = speed
self.dotRect = (self.xPos, self.yPos, 10, 10)
def draw(self, rotAngle=0):
self.updateDrawRect(rotAngle)
pygame.draw.rect(DISPLAYSURF, self.color, self.drawRect)
def move(self):
self.xPos = self.xPos - self.speed * math.cos(self.direction)
self.yPos = self.yPos - self.speed * math.sin(self.direction)
self.dotRect = (self.xPos, self.yPos, 10, 10)
def updateDrawRect(self, rotAngle):
angle = getAngle((self.xPos, self.yPos))
centerX = WINDOWWIDTH // 2
centerY = WINDOWHEIGHT // 2
radius = ( (self.xPos + 5 - centerX )**2 + (self.yPos + 5 - centerY )**2 )**0.5
drawX = math.cos(angle - rotAngle) * radius + centerX
drawY = - math.sin(angle - rotAngle) * radius + centerY
self.drawRect = pygame.Rect(drawX - 5, drawY - 5, 10, 10)
def getPos(self):
return (self.xPos + 5, self.yPos + 5)
def getColor(self):
return self.color
def spawnDot(color, angle, speed):
#returns dot object positioned at edge of screen, facing center.
x = WINDOWWIDTH//2 + WINDOWHEIGHT*math.cos(angle)
y = WINDOWHEIGHT//2 - WINDOWHEIGHT*math.sin(angle)
newDot = dot(color, x, y, -angle, speed)
return newDot
def getAngle(pos):
'''
Gets angle from center of screen
'''
# Get coords relative to center
x = pos[0] - WINDOWWIDTH//2
y = pos[1] - WINDOWHEIGHT//2
if y < 0:
angle = math.atan2(-y, x)
else:
angle = math.pi - math.atan2(-y, -x)
return angle
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get():
if event.type == QUIT:
terminate()
if event.type == KEYUP:
if event.key == K_ESCAPE:
terminate()
pygame.event.post(event)
def checkForKeyPress():
for event in pygame.event.get():
if event.type == KEYUP:
return True
return False
def spawnSimultaneous(frameCount, timeSinceSpawn):
newDotList = []
spawnThreshold = (BASEFREQ - (frameCount**0.5) )/FPS
if timeSinceSpawn > spawnThreshold:
firstSpawn = random.uniform(0, math.pi*2)
newDotList.append(spawnDot(COLOR1, firstSpawn , DOTSPEED))
newDotList.append(spawnDot(COLOR2, firstSpawn + random.uniform(PADLENGTH, math.pi*2 - PADLENGTH), DOTSPEED))
return newDotList
else:
return None
def spawnRandom(frameCount, timeSinceSpawn):
newDotList = []
spawnThreshold = (BASEFREQ/1.6 - (frameCount**0.5) )/FPS
if timeSinceSpawn > spawnThreshold:
newDotList.append(spawnDot(random.choice((COLOR1, COLOR2)), random.uniform(0, math.pi*2), DOTSPEED))
return newDotList
return None
def spawnSame(frameCount, timeSinceSpawn):
newDotList = []
spawnThreshold = (BASEFREQ/2 - (frameCount**0.5))/FPS
if timeSinceSpawn > spawnThreshold:
newDotList.append(spawnDot(SPAWNCOLOR, random.uniform(0, math.pi*2), DOTSPEED))
return newDotList
return None
def spawnAlternating(frameCount, timeSinceSpawn):
global SPAWNCOLOR
newDotList = []
spawnThreshold = ( BASEFREQ/1.7 - (frameCount**0.5)) /FPS
if timeSinceSpawn > spawnThreshold:
if SPAWNCOLOR == COLOR1:
SPAWNCOLOR = COLOR2
else:
SPAWNCOLOR = COLOR1
newDotList.append(spawnDot(SPAWNCOLOR, random.uniform(0, math.pi*2), DOTSPEED))
return newDotList
return None
def drawRadialLines(color, numLines, rotAngle=0):
# Draws numLines number of lines radiating from center of screen
assert numLines > 0
lineDiff = 2 * math.pi / numLines
center = (WINDOWWIDTH // 2, WINDOWHEIGHT // 2)
for i in range(numLines) :
angle = lineDiff * i + rotAngle
endX = center[0] + WINDOWHEIGHT*math.cos(angle)
endY = center[1] + WINDOWHEIGHT*math.sin(angle)
pygame.draw.line(DISPLAYSURF, color, center, (endX, endY))
def resource_path(relative):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, relative)
return os.path.join(relative)
if __name__ == '__main__':
main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from cinderclient import exceptions as cinder_exp
import mock
import mox
from oslo_config import cfg
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import nova
from heat.engine.resources.aws.ec2 import instance
from heat.engine.resources.aws.ec2 import volume as aws_vol
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.tests.cinder import test_volume_utils as vt_base
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
volume_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Volume Test",
"Parameters" : {},
"Resources" : {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "foo",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "some data"
}
},
"DataVolume" : {
"Type" : "AWS::EC2::Volume",
"Properties" : {
"Size" : "1",
"AvailabilityZone" : {"Fn::GetAtt": ["WikiDatabase",
"AvailabilityZone"]},
"Tags" : [{ "Key" : "Usage", "Value" : "Wiki Data Volume" }]
}
},
"MountPoint" : {
"Type" : "AWS::EC2::VolumeAttachment",
"Properties" : {
"InstanceId" : { "Ref" : "WikiDatabase" },
"VolumeId" : { "Ref" : "DataVolume" },
"Device" : "/dev/vdc"
}
}
}
}
'''
class VolumeTest(vt_base.BaseVolumeTest):
def setUp(self):
super(VolumeTest, self).setUp()
self.t = template_format.parse(volume_template)
self.use_cinder = False
def _mock_create_volume(self, fv, stack_name, final_status='available'):
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description=vol_name,
name=vol_name,
metadata={u'Usage': u'Wiki Data Volume'}).AndReturn(
vt_base.FakeVolume(fv))
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume(final_status, id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
return fv_ready
def test_volume(self):
stack_name = 'test_volume_create_stack'
# create script
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
# failed delete due to in-use script
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('in-use'))
# delete script
self._mock_delete_volume(fv)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'DataVolume')
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
self.assertIn("Volume in use", six.text_type(ex))
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_volume_default_az(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_volume_defaultaz_stack'
# create script
nova.NovaClientPlugin._create().AndReturn(self.fc)
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance.Instance, '_resolve_attribute')
self.m.StubOutWithMock(aws_vol.VolumeAttachment,
'handle_create')
self.m.StubOutWithMock(aws_vol.VolumeAttachment,
'check_create_complete')
instance.Instance.handle_create().AndReturn(None)
instance.Instance.check_create_complete(None).AndReturn(True)
instance.Instance._resolve_attribute(
'AvailabilityZone').MultipleTimes().AndReturn(None)
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.stub_ImageConstraint_validate()
self.stub_ServerConstraint_validate()
self.stub_VolumeConstraint_validate()
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description=vol_name,
name=vol_name,
metadata={u'Usage': u'Wiki Data Volume'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
aws_vol.VolumeAttachment.handle_create().AndReturn(None)
aws_vol.VolumeAttachment.check_create_complete(
None).AndReturn(True)
# delete script
self.m.StubOutWithMock(instance.Instance, 'handle_delete')
self.m.StubOutWithMock(aws_vol.VolumeAttachment, 'handle_delete')
self.m.StubOutWithMock(aws_vol.VolumeAttachment,
'check_delete_complete')
instance.Instance.handle_delete().AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndRaise(
cinder_exp.NotFound('Not found'))
cookie = object()
aws_vol.VolumeAttachment.handle_delete().AndReturn(cookie)
aws_vol.VolumeAttachment.check_delete_complete(cookie).AndReturn(True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = stack['DataVolume']
self.assertIsNone(rsrc.validate())
scheduler.TaskRunner(stack.create)()
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
scheduler.TaskRunner(stack.delete)()
self.m.VerifyAll()
def test_volume_create_error(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_volume_create_error_stack'
cfg.CONF.set_override('action_retry_limit', 0)
self._mock_create_volume(fv, stack_name, final_status='error')
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.ResourceFailure,
self.create_volume, self.t, stack, 'DataVolume')
self.assertIn('Went to status error due to "Unknown"',
six.text_type(ex))
self.m.VerifyAll()
def test_volume_bad_tags(self):
stack_name = 'test_volume_bad_tags_stack'
self.t['Resources']['DataVolume']['Properties'][
'Tags'] = [{'Foo': 'bar'}]
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.StackValidationFailed,
self.create_volume, self.t, stack, 'DataVolume')
self.assertEqual("Property error: "
"Resources.DataVolume.Properties.Tags[0]: "
"Unknown Property Foo", six.text_type(ex))
self.m.VerifyAll()
def test_volume_attachment_error(self):
stack_name = 'test_volume_attach_error_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'), final_status='error')
self.stub_VolumeConstraint_validate()
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
ex = self.assertRaises(exception.ResourceFailure,
self.create_attachment,
self.t, stack, 'MountPoint')
self.assertIn("Volume attachment failed - Unknown status error",
six.text_type(ex))
self.m.VerifyAll()
def test_volume_attachment(self):
stack_name = 'test_volume_attach_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
fva = self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('detaching', id=fva.id))
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available', id=fva.id))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detachment_err(self):
stack_name = 'test_volume_detach_err_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
fva = self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception(400))
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available', id=fva.id))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_non_exist(self):
fv = vt_base.FakeVolume('creating')
fva = vt_base.FakeVolume('in-use')
stack_name = 'test_volume_detach_nonexist_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
self.stub_VolumeConstraint_validate()
# delete script
self.fc.volumes.delete_server_volume(u'WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndRaise(
cinder_exp.NotFound('Not found'))
self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123'
).AndRaise(
fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_deleting_volume(self):
fv = vt_base.FakeVolume('creating')
fva = vt_base.FakeVolume('deleting')
stack_name = 'test_volume_detach_deleting_volume_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
self.stub_VolumeConstraint_validate()
# delete script
self.fc.volumes.delete_server_volume(u'WikiDatabase',
'vol-123').AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.get_server_volume(u'WikiDatabase', 'vol-123'
).AndRaise(
fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_with_latency(self):
stack_name = 'test_volume_detach_latency_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
fva = self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('in-use', id=fva.id))
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('detaching', id=fva.id))
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available', id=fva.id))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_volume_detach_with_error(self):
stack_name = 'test_volume_detach_werr_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
fva = self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('error', id=fva.id))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
detach_task = scheduler.TaskRunner(rsrc.delete)
ex = self.assertRaises(exception.ResourceFailure, detach_task)
self.assertIn('Volume detachment failed - Unknown status error',
six.text_type(ex))
self.m.VerifyAll()
def test_volume_delete(self):
stack_name = 'test_volume_delete_stack'
fv = vt_base.FakeVolume('creating')
self._mock_create_volume(fv, stack_name)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Delete'
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'DataVolume')
self.m.StubOutWithMock(rsrc, "handle_delete")
rsrc.handle_delete().AndReturn(None)
self.m.StubOutWithMock(rsrc, "check_delete_complete")
rsrc.check_delete_complete(None).AndReturn(True)
self.m.ReplayAll()
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_volume_deleting_delete(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_volume_deleting_stack'
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('deleting'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'DataVolume')
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_volume_update_not_supported(self):
stack_name = 'test_volume_updnotsup_stack'
fv = vt_base.FakeVolume('creating')
self._mock_create_volume(fv, stack_name)
self.m.ReplayAll()
t = template_format.parse(volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = self.create_volume(t, stack, 'DataVolume')
props = copy.deepcopy(rsrc.properties.data)
props['Size'] = 2
props['Tags'] = None
props['AvailabilityZone'] = 'other'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
updater = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, updater)
self.assertIn("NotSupported: resources.DataVolume: "
"Update to properties "
"AvailabilityZone, Size, Tags of DataVolume "
"(AWS::EC2::Volume) is not supported",
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
def test_volume_check(self):
stack = utils.parse_stack(self.t, stack_name='volume_check')
res = stack['DataVolume']
fake_volume = vt_base.FakeVolume('available')
cinder = mock.Mock()
cinder.volumes.get.return_value = fake_volume
self.patchobject(res, 'client', return_value=cinder)
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
fake_volume = vt_base.FakeVolume('in-use')
res.cinder().volumes.get.return_value = fake_volume
scheduler.TaskRunner(res.check)()
self.assertEqual((res.CHECK, res.COMPLETE), res.state)
def test_volume_check_not_available(self):
stack = utils.parse_stack(self.t, stack_name='volume_check_na')
res = stack['DataVolume']
cinder = mock.Mock()
fake_volume = vt_base.FakeVolume('foobar')
cinder.volumes.get.return_value = fake_volume
self.patchobject(res, 'client', return_value=cinder)
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('foobar', res.status_reason)
def test_volume_check_fail(self):
stack = utils.parse_stack(self.t, stack_name='volume_check_fail')
res = stack['DataVolume']
cinder = mock.Mock()
cinder.volumes.get.side_effect = Exception('boom')
self.patchobject(res, 'client', return_value=cinder)
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(res.check))
self.assertEqual((res.CHECK, res.FAILED), res.state)
self.assertIn('boom', res.status_reason)
def test_snapshot(self):
stack_name = 'test_volume_snapshot_stack'
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
# snapshot script
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fb = vt_base.FakeBackup('available')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.cinder_fc.backups.get(fb.id).AndReturn(fb)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self._mock_delete_volume(fv)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'DataVolume')
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_snapshot_error(self):
stack_name = 'test_volume_snapshot_err_stack'
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name)
# snapshot script
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fb = vt_base.FakeBackup('error')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.cinder_fc.backups.get(fb.id).AndReturn(fb)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'DataVolume')
ex = self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.destroy))
self.assertIn('Unknown status error', six.text_type(ex))
self.m.VerifyAll()
def test_snapshot_no_volume(self):
"""Test that backup does not start for failed resource."""
stack_name = 'test_volume_snapshot_novol_stack'
cfg.CONF.set_override('action_retry_limit', 0)
fv = self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name,
final_status='error')
self._mock_delete_volume(fv)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['DeletionPolicy'] = 'Snapshot'
self.t['Resources']['DataVolume']['Properties'][
'AvailabilityZone'] = 'nova'
stack = utils.parse_stack(self.t, stack_name=stack_name)
resource_defns = stack.t.resource_definitions(stack)
rsrc = aws_vol.Volume('DataVolume',
resource_defns['DataVolume'],
stack)
create = scheduler.TaskRunner(rsrc.create)
ex = self.assertRaises(exception.ResourceFailure, create)
self.assertIn('Went to status error due to "Unknown"',
six.text_type(ex))
scheduler.TaskRunner(rsrc.destroy)()
self.m.VerifyAll()
def test_create_from_snapshot(self):
stack_name = 'test_volume_create_from_snapshot_stack'
fv = vt_base.FakeVolume('restoring-backup')
fvbr = vt_base.FakeBackupRestore('vol-123')
# create script
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.update('vol-123',
description=vol_name, name=vol_name)
fv.status = 'available'
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['Properties'][
'SnapshotId'] = 'backup-123'
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
self.m.VerifyAll()
def test_create_from_snapshot_error(self):
stack_name = 'test_volume_create_from_snap_err_stack'
cfg.CONF.set_override('action_retry_limit', 0)
fv = vt_base.FakeVolume('restoring-backup')
fvbr = vt_base.FakeBackupRestore('vol-123')
# create script
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
vol_name = utils.PhysName(stack_name, 'DataVolume')
self.cinder_fc.volumes.update(fv.id,
description=vol_name, name=vol_name)
fv.status = 'error'
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
self.t['Resources']['DataVolume']['Properties'][
'SnapshotId'] = 'backup-123'
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.ResourceFailure,
self.create_volume, self.t, stack, 'DataVolume')
self.assertIn('Went to status error due to "Unknown"',
six.text_type(ex))
self.m.VerifyAll()
def test_volume_size_constraint(self):
self.t['Resources']['DataVolume']['Properties']['Size'] = '0'
stack = utils.parse_stack(self.t)
error = self.assertRaises(exception.StackValidationFailed,
self.create_volume,
self.t, stack, 'DataVolume')
self.assertEqual(
"Property error: Resources.DataVolume.Properties.Size: "
"0 is out of range (min: 1, max: None)", six.text_type(error))
def test_volume_attachment_updates_not_supported(self):
self.m.StubOutWithMock(nova.NovaClientPlugin, 'get_server')
nova.NovaClientPlugin.get_server(mox.IgnoreArg()).AndReturn(
mox.MockAnything())
fv = vt_base.FakeVolume('creating')
fva = vt_base.FakeVolume('attaching')
stack_name = 'test_volume_attach_updnotsup_stack'
self._mock_create_volume(fv, stack_name)
self._mock_create_server_volume_script(fva)
self.stub_VolumeConstraint_validate()
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'DataVolume')
rsrc = self.create_attachment(self.t, stack, 'MountPoint')
props = copy.deepcopy(rsrc.properties.data)
props['InstanceId'] = 'some_other_instance_id'
props['VolumeId'] = 'some_other_volume_id'
props['Device'] = '/dev/vdz'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn('NotSupported: resources.MountPoint: '
'Update to properties Device, InstanceId, '
'VolumeId of MountPoint (AWS::EC2::VolumeAttachment)',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
|
|
from __future__ import unicode_literals
import mock
import os
import shutil
import subprocess
import tempfile
from django.conf import settings
from django.test import SimpleTestCase, override_settings
from semantic_version import Version
from systemjs.base import System, BundleError
from .helpers import mock_Popen
from .test_management import _bundle
@override_settings(STATIC_ROOT=tempfile.mkdtemp())
class BundleTests(SimpleTestCase):
def setUp(self):
super(BundleTests, self).setUp()
self.patcher = mock.patch.object(System, 'get_jspm_version')
mocked = self.patcher.start()
mocked.return_value = Version('0.15.0')
def tearDown(self):
super(BundleTests, self).tearDown()
self.patcher.stop()
try:
shutil.rmtree(settings.STATIC_ROOT)
except (OSError, IOError):
pass
@override_settings(SYSTEMJS_OUTPUT_DIR='SYSJS')
@mock.patch('subprocess.Popen')
def test_bundle_result(self, mock_subproc_popen):
"""
Test that bundling an app returns the correct relative path.
"""
system = System()
def side_effect(*args, **kwargs):
_bundle('app/dummy')
return ('output', 'error')
# mock Popen/communicate
mock_Popen(mock_subproc_popen, side_effect=side_effect)
path = system.bundle('app/dummy')
expected_path = os.path.join(settings.SYSTEMJS_OUTPUT_DIR, 'app/dummy.js')
self.assertEqual(path, expected_path)
@mock.patch('subprocess.Popen')
def test_bundle_suprocess(self, mock_subproc_popen):
"""
Test that bundling calls the correct subprocess command
"""
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
_bundle(app_name)
return ('output', 'error')
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System()
system.bundle(app_name)
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args[0][0]
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/{0}.js'.format(app_name))
self.assertEqual(command, 'jspm bundle {0} {1}'.format(app_name, outfile))
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('foo')\nSystem.import('app/dummy.js');\n")
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
def test_bundlesfx_suprocess(self, mock_subproc_popen):
"""
Test that bundling calls the correct subprocess command
"""
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen)
# Bundle app/dummy
system = System(sfx=True)
system.bundle('app/dummy')
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args[0][0]
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/app/dummy.js')
self.assertEqual(command, 'jspm bundle-sfx app/dummy {0}'.format(outfile))
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
def test_bundle_minify_suprocess(self, mock_subproc_popen):
"""
Test that bundling calls the correct subprocess command
"""
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
_bundle(app_name)
return ('output', 'error')
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System(minify=True)
system.bundle('app/dummy')
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args[0][0]
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/app/dummy.js')
self.assertEqual(command, 'jspm bundle app/dummy {0} --minify'.format(outfile))
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
def test_bundle_skip_source_maps_suprocess(self, mock_subproc_popen):
"""
Test that bundling calls the correct subprocess command
"""
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
_bundle(app_name)
return ('output', 'error')
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System(skip_source_maps=True)
system.bundle('app/dummy')
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args[0][0]
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/app/dummy.js')
self.assertEqual(command,
'jspm bundle app/dummy {0} --skip-source-maps'.format(outfile))
outfile = os.path.join(settings.STATIC_ROOT,
'SYSTEMJS/{0}.js'.format(app_name))
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('foo')\nSystem.import('app/dummy.js');\n")
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
def test_oserror_caught(self, mock):
def oserror():
raise OSError('Error')
mock_Popen(mock, side_effect=oserror)
with self.assertRaises(BundleError):
system = System()
system.bundle('app/dummy')
@mock.patch('subprocess.Popen')
def test_ioerror_caught(self, mock):
def ioerror():
raise IOError('Error')
mock_Popen(mock, side_effect=ioerror)
with self.assertRaises(BundleError):
system = System()
system.bundle('app/dummy')
class JSPMIntegrationTests(SimpleTestCase):
@mock.patch('subprocess.Popen')
def test_jspm_version_suprocess(self, mock_subproc_popen):
"""
Test that JSPM version discovery works.
"""
# mock Popen/communicate
return_value = (b'0.15.7\nRunning against global jspm install.\n', '')
process_mock = mock_Popen(mock_subproc_popen, return_value=return_value)
system = System()
# Call version
version = system.get_jspm_version({'jspm': 'jspm'})
self.assertEqual(mock_subproc_popen.call_count, 1)
self.assertEqual(version, Version('0.15.7'))
command = mock_subproc_popen.call_args[0][0]
self.assertEqual(command, 'jspm --version')
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
def test_jspm_version_suprocess_error(self, mock_subproc_popen):
"""
Test that bundling calls the correct subprocess command
"""
# mock Popen/communicate
return_value = (b'gibberish', 'a jspm error')
process_mock = mock_Popen(mock_subproc_popen, return_value=return_value)
system = System()
# Call version
with self.assertRaises(BundleError):
system.get_jspm_version({'jspm': 'jspm'})
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args[0][0]
self.assertEqual(command, 'jspm --version')
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
@mock.patch.object(System, 'get_jspm_version')
def test_jspm_016_log(self, mock_version, mock_subproc_popen):
"""
Test that bundles are generated with --log=err.
JSPM > 0.16.0 has the --log option that surpresses levels of
output.
"""
mock_version.return_value = Version('0.16.3')
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
_bundle(app_name)
return ('', '') # no stdout, no stderr -> success
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System()
system.bundle(app_name)
self.assertEqual(mock_subproc_popen.call_count, 1)
command = mock_subproc_popen.call_args
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/{0}.js'.format(app_name))
self.assertEqual(
command,
mock.call(
'jspm bundle {0} {1} --log err'.format(app_name, outfile),
stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
shell=True, cwd=None
)
)
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('foo')\nSystem.import('app/dummy.js');\n")
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
@mock.patch.object(System, 'get_jspm_version')
def test_jspm_016_log_error(self, mock_version, mock_subproc_popen):
"""
Test that bundles are generated with --log=err.
JSPM > 0.16.0 has the --log option that surpresses levels of
output.
"""
mock_version.return_value = Version('0.16.3')
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
_bundle(app_name)
return ('', 'Something went wrong') # no stdout, no stderr -> success
# mock Popen/communicate
process_mock = mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
with self.assertRaises(BundleError) as ctx:
system = System()
system.bundle(app_name)
self.assertEqual(ctx.exception.args[0], "Could not bundle \'app/dummy\': \nSomething went wrong")
self.assertEqual(mock_subproc_popen.call_count, 1)
self.assertEqual(process_mock.communicate.call_count, 1)
@mock.patch('subprocess.Popen')
@mock.patch.object(System, 'get_jspm_version')
def test_sourcemap_comment(self, mock_version, mock_subproc_popen):
"""
Asserts that the sourcemap comment is still at the end.
"""
mock_version.return_value = Version('0.15.7')
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
content = 'alert(\'foo\')\n//# sourceMappingURL=dummy.js.map'
_bundle(app_name, content=content)
return ('output', 'error')
# mock Popen/communicate
mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System()
system.bundle(app_name)
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/{0}.js'.format(app_name))
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('foo')\nSystem.import('app/dummy.js');\n"
"//# sourceMappingURL=dummy.js.map")
@mock.patch('subprocess.Popen')
@mock.patch.object(System, 'get_jspm_version')
def test_sourcemap_comment_end_newline(self, mock_version, mock_subproc_popen):
"""
Asserts that the sourcemap comment is still at the end - with ending newline
"""
mock_version.return_value = Version('0.15.7')
app_name = 'app/dummy'
def side_effect(*args, **kwargs):
content = 'alert(\'foo\')\n//# sourceMappingURL=dummy.js.map\n'
_bundle(app_name, content=content)
return ('output', 'error')
# mock Popen/communicate
mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System()
system.bundle(app_name)
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/{0}.js'.format(app_name))
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('foo')\nSystem.import('app/dummy.js');\n"
"//# sourceMappingURL=dummy.js.map")
@mock.patch('subprocess.Popen')
@mock.patch.object(System, 'get_jspm_version')
def test_sourcemap_comment_large_file(self, mock_version, mock_subproc_popen):
"""
Same test as test_sourcemap_comment, except with a 'file' that's more
than 100 bytes (to read multiple blocks).
"""
mock_version.return_value = Version('0.15.7')
app_name = 'app/dummy'
lorem = '''
Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non
proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
'''
def side_effect(*args, **kwargs):
content = 'alert(\'{}\')\n//# sourceMappingURL=dummy.js.map'.format(lorem)
_bundle(app_name, content=content)
return ('output', 'error')
# mock Popen/communicate
mock_Popen(mock_subproc_popen, side_effect=side_effect)
# Bundle app/dummy
system = System()
system.bundle(app_name)
outfile = os.path.join(settings.STATIC_ROOT, 'SYSTEMJS/{0}.js'.format(app_name))
with open(outfile, 'r') as of:
js = of.read()
self.assertEqual(js, "alert('{}')\nSystem.import('app/dummy.js');\n"
"//# sourceMappingURL=dummy.js.map".format(lorem))
|
|
# -*- coding: utf-8 -*-
"""
This file is part of the PROPheT tool.
Copyright (C) 2016: MKLab <pmitzias@iti.gr; mriga@iti.gr; skontopo@iti.gr>
http://mklab.iti.gr/project/prophet-ontology-populator
https://github.com/MKLab-ITI/prophet
Licensed under the Apache License, Version 2.0 (the "License").
You may use this file in compliance with the License.
For more details, see LICENCE file.
"""
# Form implementation generated from reading ui file 'ui\Preferences.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Preferences(object):
def setupUi(self, Preferences):
Preferences.setObjectName(_fromUtf8("Preferences"))
Preferences.resize(685, 441)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/preferences.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Preferences.setWindowIcon(icon)
self.gridLayout = QtGui.QGridLayout(Preferences)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.progressBarLoad = QtGui.QProgressBar(Preferences)
self.progressBarLoad.setMinimumSize(QtCore.QSize(100, 10))
self.progressBarLoad.setMaximumSize(QtCore.QSize(140, 12))
self.progressBarLoad.setProperty("value", 40)
self.progressBarLoad.setTextVisible(False)
self.progressBarLoad.setObjectName(_fromUtf8("progressBarLoad"))
self.gridLayout.addWidget(self.progressBarLoad, 1, 0, 1, 1)
self.tabWidgetPreferences = QtGui.QTabWidget(Preferences)
self.tabWidgetPreferences.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.tabWidgetPreferences.setTabPosition(QtGui.QTabWidget.North)
self.tabWidgetPreferences.setIconSize(QtCore.QSize(16, 12))
self.tabWidgetPreferences.setObjectName(_fromUtf8("tabWidgetPreferences"))
self.tabGeneral = QtGui.QWidget()
self.tabGeneral.setObjectName(_fromUtf8("tabGeneral"))
self.gridLayout_7 = QtGui.QGridLayout(self.tabGeneral)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.lblCheckForUpdatesAtStartup = QtGui.QLabel(self.tabGeneral)
self.lblCheckForUpdatesAtStartup.setObjectName(_fromUtf8("lblCheckForUpdatesAtStartup"))
self.gridLayout_7.addWidget(self.lblCheckForUpdatesAtStartup, 0, 0, 1, 1)
self.checkBoxCheckForUpdatesAtStartupOption = QtGui.QCheckBox(self.tabGeneral)
self.checkBoxCheckForUpdatesAtStartupOption.setToolTip(_fromUtf8(""))
self.checkBoxCheckForUpdatesAtStartupOption.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBoxCheckForUpdatesAtStartupOption.setText(_fromUtf8(""))
self.checkBoxCheckForUpdatesAtStartupOption.setChecked(False)
self.checkBoxCheckForUpdatesAtStartupOption.setObjectName(_fromUtf8("checkBoxCheckForUpdatesAtStartupOption"))
self.gridLayout_7.addWidget(self.checkBoxCheckForUpdatesAtStartupOption, 0, 3, 1, 1)
self.line_4 = QtGui.QFrame(self.tabGeneral)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.gridLayout_7.addWidget(self.line_4, 1, 0, 1, 4)
self.lblEMInstancesLimit = QtGui.QLabel(self.tabGeneral)
self.lblEMInstancesLimit.setObjectName(_fromUtf8("lblEMInstancesLimit"))
self.gridLayout_7.addWidget(self.lblEMInstancesLimit, 2, 0, 1, 1)
self.spinBoxEMInstancesLimit = QtGui.QSpinBox(self.tabGeneral)
self.spinBoxEMInstancesLimit.setMaximumSize(QtCore.QSize(85, 16777215))
self.spinBoxEMInstancesLimit.setMinimum(1)
self.spinBoxEMInstancesLimit.setMaximum(100000)
self.spinBoxEMInstancesLimit.setObjectName(_fromUtf8("spinBoxEMInstancesLimit"))
self.gridLayout_7.addWidget(self.spinBoxEMInstancesLimit, 2, 2, 1, 2, QtCore.Qt.AlignRight)
self.line = QtGui.QFrame(self.tabGeneral)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_7.addWidget(self.line, 3, 0, 1, 4)
self.lblSameAsOption = QtGui.QLabel(self.tabGeneral)
self.lblSameAsOption.setObjectName(_fromUtf8("lblSameAsOption"))
self.gridLayout_7.addWidget(self.lblSameAsOption, 4, 0, 2, 1)
self.comboBoxSameAsOption = QtGui.QComboBox(self.tabGeneral)
self.comboBoxSameAsOption.setMaximumSize(QtCore.QSize(85, 16777215))
self.comboBoxSameAsOption.setObjectName(_fromUtf8("comboBoxSameAsOption"))
self.comboBoxSameAsOption.addItem(_fromUtf8(""))
self.comboBoxSameAsOption.addItem(_fromUtf8(""))
self.comboBoxSameAsOption.addItem(_fromUtf8(""))
self.gridLayout_7.addWidget(self.comboBoxSameAsOption, 5, 1, 1, 3, QtCore.Qt.AlignRight)
self.line_2 = QtGui.QFrame(self.tabGeneral)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout_7.addWidget(self.line_2, 6, 0, 1, 4)
self.lblPropertyEquivalentToOption = QtGui.QLabel(self.tabGeneral)
self.lblPropertyEquivalentToOption.setObjectName(_fromUtf8("lblPropertyEquivalentToOption"))
self.gridLayout_7.addWidget(self.lblPropertyEquivalentToOption, 7, 0, 2, 1)
self.checkBoxPropertyEquivalentToOption = QtGui.QCheckBox(self.tabGeneral)
self.checkBoxPropertyEquivalentToOption.setToolTip(_fromUtf8(""))
self.checkBoxPropertyEquivalentToOption.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBoxPropertyEquivalentToOption.setText(_fromUtf8(""))
self.checkBoxPropertyEquivalentToOption.setChecked(False)
self.checkBoxPropertyEquivalentToOption.setObjectName(_fromUtf8("checkBoxPropertyEquivalentToOption"))
self.gridLayout_7.addWidget(self.checkBoxPropertyEquivalentToOption, 8, 3, 1, 1)
self.line_5 = QtGui.QFrame(self.tabGeneral)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.gridLayout_7.addWidget(self.line_5, 9, 0, 1, 4)
self.lblLabelOption = QtGui.QLabel(self.tabGeneral)
self.lblLabelOption.setObjectName(_fromUtf8("lblLabelOption"))
self.gridLayout_7.addWidget(self.lblLabelOption, 10, 0, 2, 1)
self.checkBoxLabelOption = QtGui.QCheckBox(self.tabGeneral)
self.checkBoxLabelOption.setToolTip(_fromUtf8(""))
self.checkBoxLabelOption.setLayoutDirection(QtCore.Qt.RightToLeft)
self.checkBoxLabelOption.setText(_fromUtf8(""))
self.checkBoxLabelOption.setChecked(False)
self.checkBoxLabelOption.setObjectName(_fromUtf8("checkBoxLabelOption"))
self.gridLayout_7.addWidget(self.checkBoxLabelOption, 11, 3, 1, 1)
self.line_3 = QtGui.QFrame(self.tabGeneral)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.gridLayout_7.addWidget(self.line_3, 12, 0, 1, 4)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_7.addItem(spacerItem, 13, 3, 1, 1)
self.btnSaveChangesInGeneralTab = QtGui.QPushButton(self.tabGeneral)
self.btnSaveChangesInGeneralTab.setMinimumSize(QtCore.QSize(100, 0))
self.btnSaveChangesInGeneralTab.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnSaveChangesInGeneralTab.setObjectName(_fromUtf8("btnSaveChangesInGeneralTab"))
self.gridLayout_7.addWidget(self.btnSaveChangesInGeneralTab, 14, 1, 1, 3, QtCore.Qt.AlignRight)
self.tabWidgetPreferences.addTab(self.tabGeneral, icon, _fromUtf8(""))
self.tabMyModel = QtGui.QWidget()
self.tabMyModel.setObjectName(_fromUtf8("tabMyModel"))
self.gridLayout_6 = QtGui.QGridLayout(self.tabMyModel)
self.gridLayout_6.setObjectName(_fromUtf8("gridLayout_6"))
self.btnAddMyModel = QtGui.QPushButton(self.tabMyModel)
self.btnAddMyModel.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnAddMyModel.setObjectName(_fromUtf8("btnAddMyModel"))
self.gridLayout_6.addWidget(self.btnAddMyModel, 1, 1, 1, 1)
self.tableWidgetMyModels = QtGui.QTableWidget(self.tabMyModel)
self.tableWidgetMyModels.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.tableWidgetMyModels.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetMyModels.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableWidgetMyModels.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetMyModels.setShowGrid(True)
self.tableWidgetMyModels.setObjectName(_fromUtf8("tableWidgetMyModels"))
self.tableWidgetMyModels.setColumnCount(2)
self.tableWidgetMyModels.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetMyModels.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetMyModels.setHorizontalHeaderItem(1, item)
self.tableWidgetMyModels.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetMyModels.horizontalHeader().setStretchLastSection(True)
self.gridLayout_6.addWidget(self.tableWidgetMyModels, 0, 0, 1, 4)
self.btnDeleteMyModel = QtGui.QPushButton(self.tabMyModel)
self.btnDeleteMyModel.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnDeleteMyModel.setObjectName(_fromUtf8("btnDeleteMyModel"))
self.gridLayout_6.addWidget(self.btnDeleteMyModel, 1, 2, 1, 1)
self.btnLoadMyModel = QtGui.QPushButton(self.tabMyModel)
self.btnLoadMyModel.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnLoadMyModel.setObjectName(_fromUtf8("btnLoadMyModel"))
self.gridLayout_6.addWidget(self.btnLoadMyModel, 1, 3, 1, 1)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/load_model.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabMyModel, icon1, _fromUtf8(""))
self.tabEndpoints = QtGui.QWidget()
self.tabEndpoints.setObjectName(_fromUtf8("tabEndpoints"))
self.gridLayout_2 = QtGui.QGridLayout(self.tabEndpoints)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.tableWidgetKnownEndpoints = QtGui.QTableWidget(self.tabEndpoints)
self.tableWidgetKnownEndpoints.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.tableWidgetKnownEndpoints.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetKnownEndpoints.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableWidgetKnownEndpoints.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetKnownEndpoints.setShowGrid(True)
self.tableWidgetKnownEndpoints.setObjectName(_fromUtf8("tableWidgetKnownEndpoints"))
self.tableWidgetKnownEndpoints.setColumnCount(2)
self.tableWidgetKnownEndpoints.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownEndpoints.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownEndpoints.setHorizontalHeaderItem(1, item)
self.tableWidgetKnownEndpoints.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetKnownEndpoints.horizontalHeader().setStretchLastSection(True)
self.gridLayout_2.addWidget(self.tableWidgetKnownEndpoints, 0, 0, 1, 4)
self.btnAddEndpoint = QtGui.QPushButton(self.tabEndpoints)
self.btnAddEndpoint.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnAddEndpoint.setObjectName(_fromUtf8("btnAddEndpoint"))
self.gridLayout_2.addWidget(self.btnAddEndpoint, 1, 1, 1, 1)
self.btnSelectEndpoint = QtGui.QPushButton(self.tabEndpoints)
self.btnSelectEndpoint.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnSelectEndpoint.setObjectName(_fromUtf8("btnSelectEndpoint"))
self.gridLayout_2.addWidget(self.btnSelectEndpoint, 1, 3, 1, 1)
self.btnDeleteEndpoint = QtGui.QPushButton(self.tabEndpoints)
self.btnDeleteEndpoint.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnDeleteEndpoint.setObjectName(_fromUtf8("btnDeleteEndpoint"))
self.gridLayout_2.addWidget(self.btnDeleteEndpoint, 1, 2, 1, 1)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/endpoint.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabEndpoints, icon2, _fromUtf8(""))
self.tabNamespaces = QtGui.QWidget()
self.tabNamespaces.setObjectName(_fromUtf8("tabNamespaces"))
self.gridLayout_4 = QtGui.QGridLayout(self.tabNamespaces)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.btnDeleteNamespace = QtGui.QPushButton(self.tabNamespaces)
self.btnDeleteNamespace.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnDeleteNamespace.setObjectName(_fromUtf8("btnDeleteNamespace"))
self.gridLayout_4.addWidget(self.btnDeleteNamespace, 1, 2, 1, 1)
self.btnRestoreDefaultNamespaces = QtGui.QPushButton(self.tabNamespaces)
self.btnRestoreDefaultNamespaces.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnRestoreDefaultNamespaces.setObjectName(_fromUtf8("btnRestoreDefaultNamespaces"))
self.gridLayout_4.addWidget(self.btnRestoreDefaultNamespaces, 1, 3, 1, 1)
self.btnAddNamespace = QtGui.QPushButton(self.tabNamespaces)
self.btnAddNamespace.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnAddNamespace.setObjectName(_fromUtf8("btnAddNamespace"))
self.gridLayout_4.addWidget(self.btnAddNamespace, 1, 1, 1, 1)
self.tableWidgetKnownNamespaces = QtGui.QTableWidget(self.tabNamespaces)
self.tableWidgetKnownNamespaces.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.tableWidgetKnownNamespaces.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetKnownNamespaces.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableWidgetKnownNamespaces.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetKnownNamespaces.setShowGrid(True)
self.tableWidgetKnownNamespaces.setObjectName(_fromUtf8("tableWidgetKnownNamespaces"))
self.tableWidgetKnownNamespaces.setColumnCount(2)
self.tableWidgetKnownNamespaces.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownNamespaces.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownNamespaces.setHorizontalHeaderItem(1, item)
self.tableWidgetKnownNamespaces.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetKnownNamespaces.horizontalHeader().setStretchLastSection(True)
self.gridLayout_4.addWidget(self.tableWidgetKnownNamespaces, 0, 0, 1, 4)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/namespaces.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabNamespaces, icon3, _fromUtf8(""))
self.tabMapping = QtGui.QWidget()
self.tabMapping.setObjectName(_fromUtf8("tabMapping"))
self.gridLayout_5 = QtGui.QGridLayout(self.tabMapping)
self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5"))
self.btnDeleteAllMappings = QtGui.QPushButton(self.tabMapping)
self.btnDeleteAllMappings.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnDeleteAllMappings.setObjectName(_fromUtf8("btnDeleteAllMappings"))
self.gridLayout_5.addWidget(self.btnDeleteAllMappings, 1, 2, 1, 1)
self.tableWidgetKnownMappings = QtGui.QTableWidget(self.tabMapping)
self.tableWidgetKnownMappings.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.tableWidgetKnownMappings.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetKnownMappings.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableWidgetKnownMappings.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetKnownMappings.setShowGrid(True)
self.tableWidgetKnownMappings.setObjectName(_fromUtf8("tableWidgetKnownMappings"))
self.tableWidgetKnownMappings.setColumnCount(4)
self.tableWidgetKnownMappings.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownMappings.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownMappings.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownMappings.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetKnownMappings.setHorizontalHeaderItem(3, item)
self.tableWidgetKnownMappings.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetKnownMappings.horizontalHeader().setDefaultSectionSize(160)
self.tableWidgetKnownMappings.horizontalHeader().setMinimumSectionSize(70)
self.tableWidgetKnownMappings.horizontalHeader().setStretchLastSection(False)
self.gridLayout_5.addWidget(self.tableWidgetKnownMappings, 0, 0, 1, 3)
self.btnDeleteMapping = QtGui.QPushButton(self.tabMapping)
self.btnDeleteMapping.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnDeleteMapping.setObjectName(_fromUtf8("btnDeleteMapping"))
self.gridLayout_5.addWidget(self.btnDeleteMapping, 1, 1, 1, 1)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/mapping.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabMapping, icon4, _fromUtf8(""))
self.tabLog = QtGui.QWidget()
self.tabLog.setObjectName(_fromUtf8("tabLog"))
self.gridLayout_3 = QtGui.QGridLayout(self.tabLog)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.btnClearLog = QtGui.QPushButton(self.tabLog)
self.btnClearLog.setMaximumSize(QtCore.QSize(100, 16777215))
self.btnClearLog.setObjectName(_fromUtf8("btnClearLog"))
self.gridLayout_3.addWidget(self.btnClearLog, 1, 1, 1, 1)
self.tableWidgetLogEntries = QtGui.QTableWidget(self.tabLog)
self.tableWidgetLogEntries.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.tableWidgetLogEntries.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetLogEntries.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.tableWidgetLogEntries.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.tableWidgetLogEntries.setShowGrid(True)
self.tableWidgetLogEntries.setObjectName(_fromUtf8("tableWidgetLogEntries"))
self.tableWidgetLogEntries.setColumnCount(2)
self.tableWidgetLogEntries.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidgetLogEntries.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetLogEntries.setHorizontalHeaderItem(1, item)
self.tableWidgetLogEntries.horizontalHeader().setCascadingSectionResizes(False)
self.tableWidgetLogEntries.horizontalHeader().setDefaultSectionSize(115)
self.tableWidgetLogEntries.horizontalHeader().setMinimumSectionSize(100)
self.tableWidgetLogEntries.horizontalHeader().setStretchLastSection(True)
self.tableWidgetLogEntries.verticalHeader().setDefaultSectionSize(30)
self.gridLayout_3.addWidget(self.tableWidgetLogEntries, 0, 0, 1, 2)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/log.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabLog, icon5, _fromUtf8(""))
self.tabDatabase = QtGui.QWidget()
self.tabDatabase.setObjectName(_fromUtf8("tabDatabase"))
self.gridLayout_8 = QtGui.QGridLayout(self.tabDatabase)
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.tableWidgetDatabaseStats = QtGui.QTableWidget(self.tabDatabase)
self.tableWidgetDatabaseStats.setMaximumSize(QtCore.QSize(16777215, 182))
self.tableWidgetDatabaseStats.setContextMenuPolicy(QtCore.Qt.PreventContextMenu)
self.tableWidgetDatabaseStats.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.tableWidgetDatabaseStats.setObjectName(_fromUtf8("tableWidgetDatabaseStats"))
self.tableWidgetDatabaseStats.setColumnCount(1)
self.tableWidgetDatabaseStats.setRowCount(6)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(3, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(4, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setVerticalHeaderItem(5, item)
item = QtGui.QTableWidgetItem()
self.tableWidgetDatabaseStats.setHorizontalHeaderItem(0, item)
self.tableWidgetDatabaseStats.horizontalHeader().setVisible(False)
self.tableWidgetDatabaseStats.horizontalHeader().setDefaultSectionSize(200)
self.tableWidgetDatabaseStats.horizontalHeader().setStretchLastSection(True)
self.gridLayout_8.addWidget(self.tableWidgetDatabaseStats, 0, 0, 1, 5)
self.btnExportDatabase = QtGui.QPushButton(self.tabDatabase)
self.btnExportDatabase.setMaximumSize(QtCore.QSize(110, 16777215))
self.btnExportDatabase.setObjectName(_fromUtf8("btnExportDatabase"))
self.gridLayout_8.addWidget(self.btnExportDatabase, 2, 2, 1, 1)
spacerItem1 = QtGui.QSpacerItem(20, 115, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_8.addItem(spacerItem1, 1, 1, 1, 1)
self.btnImportDatabase = QtGui.QPushButton(self.tabDatabase)
self.btnImportDatabase.setMaximumSize(QtCore.QSize(110, 16777215))
self.btnImportDatabase.setObjectName(_fromUtf8("btnImportDatabase"))
self.gridLayout_8.addWidget(self.btnImportDatabase, 2, 3, 1, 1)
self.btnResetToDefaultDatabase = QtGui.QPushButton(self.tabDatabase)
self.btnResetToDefaultDatabase.setMaximumSize(QtCore.QSize(110, 16777215))
self.btnResetToDefaultDatabase.setObjectName(_fromUtf8("btnResetToDefaultDatabase"))
self.gridLayout_8.addWidget(self.btnResetToDefaultDatabase, 2, 4, 1, 1)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/database.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.tabWidgetPreferences.addTab(self.tabDatabase, icon6, _fromUtf8(""))
self.gridLayout.addWidget(self.tabWidgetPreferences, 0, 0, 1, 4)
self.btnClose = QtGui.QPushButton(Preferences)
self.btnClose.setMaximumSize(QtCore.QSize(75, 16777215))
self.btnClose.setObjectName(_fromUtf8("btnClose"))
self.gridLayout.addWidget(self.btnClose, 1, 3, 1, 1)
self.retranslateUi(Preferences)
self.tabWidgetPreferences.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(Preferences)
def retranslateUi(self, Preferences):
Preferences.setWindowTitle(_translate("Preferences", "Preferences", None))
Preferences.setWhatsThis(_translate("Preferences", "Preferences window", None))
self.progressBarLoad.setToolTip(_translate("Preferences", "Loading..", None))
self.lblCheckForUpdatesAtStartup.setToolTip(_translate("Preferences", "Automatically check for updates when starting PROPheT", None))
self.lblCheckForUpdatesAtStartup.setText(_translate("Preferences", "Check for PROPheT updates at startup", None))
self.lblEMInstancesLimit.setToolTip(_translate("Preferences", "Define the maximum number of results that will be fetched from the External Model (values: 1-100000)", None))
self.lblEMInstancesLimit.setText(_translate("Preferences", "Limit query results for instances from External Model to:", None))
self.spinBoxEMInstancesLimit.setToolTip(_translate("Preferences", "Maximum number of instances fetched from the External Model", None))
self.spinBoxEMInstancesLimit.setWhatsThis(_translate("Preferences", "Maximum number of instances fetched from the External Model", None))
self.lblSameAsOption.setToolTip(_translate("Preferences", "When preference is checked, a triple like the following is inserted: <my_instance owl:sameAs external_instance>", None))
self.lblSameAsOption.setText(_translate("Preferences", "Connect new instances with original External Model instances with property:", None))
self.comboBoxSameAsOption.setItemText(0, _translate("Preferences", "owl:sameAs", None))
self.comboBoxSameAsOption.setItemText(1, _translate("Preferences", "rdfs:seeAlso", None))
self.comboBoxSameAsOption.setItemText(2, _translate("Preferences", "None", None))
self.lblPropertyEquivalentToOption.setToolTip(_translate("Preferences", "When preference is checked, a triple like the following is inserted: <my_model_property owl:equivalentProperty external_model_property>", None))
self.lblPropertyEquivalentToOption.setText(_translate("Preferences", "Connect My Model data properties to External Model data properties with owl:equivalentTo", None))
self.lblLabelOption.setToolTip(_translate("Preferences", "When preference is checked, a triple like the following is inserted: <my_instance owl:sameAs external_instance>", None))
self.lblLabelOption.setText(_translate("Preferences", "Import rdfs:label annotations to new instances", None))
self.btnSaveChangesInGeneralTab.setToolTip(_translate("Preferences", "Save changes made in General tab", None))
self.btnSaveChangesInGeneralTab.setWhatsThis(_translate("Preferences", "Save changes made in General tab", None))
self.btnSaveChangesInGeneralTab.setText(_translate("Preferences", "Save changes", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabGeneral), _translate("Preferences", "General", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabGeneral), _translate("Preferences", "General preferences", None))
self.btnAddMyModel.setToolTip(_translate("Preferences", "Add a new model", None))
self.btnAddMyModel.setWhatsThis(_translate("Preferences", "Add a new model", None))
self.btnAddMyModel.setText(_translate("Preferences", "Add model...", None))
self.tableWidgetMyModels.setToolTip(_translate("Preferences", "List of all initial (source) models", None))
self.tableWidgetMyModels.setWhatsThis(_translate("Preferences", "A list of my models", None))
item = self.tableWidgetMyModels.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "Name", None))
item = self.tableWidgetMyModels.horizontalHeaderItem(1)
item.setText(_translate("Preferences", "URI", None))
self.btnDeleteMyModel.setToolTip(_translate("Preferences", "Delete selected model", None))
self.btnDeleteMyModel.setWhatsThis(_translate("Preferences", "Delete selected model", None))
self.btnDeleteMyModel.setText(_translate("Preferences", "Delete model", None))
self.btnLoadMyModel.setToolTip(_translate("Preferences", "Load selected model", None))
self.btnLoadMyModel.setWhatsThis(_translate("Preferences", "Load selected model", None))
self.btnLoadMyModel.setText(_translate("Preferences", "Load", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabMyModel), _translate("Preferences", "My models", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabMyModel), _translate("Preferences", "My model preferences", None))
self.tableWidgetKnownEndpoints.setToolTip(_translate("Preferences", "List of all stored endpoints", None))
self.tableWidgetKnownEndpoints.setWhatsThis(_translate("Preferences", "A list of all known endpoints", None))
item = self.tableWidgetKnownEndpoints.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "Name", None))
item = self.tableWidgetKnownEndpoints.horizontalHeaderItem(1)
item.setText(_translate("Preferences", "Endpoint URI", None))
self.btnAddEndpoint.setToolTip(_translate("Preferences", "Add a new endpoint", None))
self.btnAddEndpoint.setWhatsThis(_translate("Preferences", "Add a new endpoint", None))
self.btnAddEndpoint.setText(_translate("Preferences", "Add endpoint...", None))
self.btnSelectEndpoint.setToolTip(_translate("Preferences", "Set selected endpoint as default", None))
self.btnSelectEndpoint.setWhatsThis(_translate("Preferences", "Set selected endpoint as default", None))
self.btnSelectEndpoint.setText(_translate("Preferences", "Select", None))
self.btnDeleteEndpoint.setToolTip(_translate("Preferences", "Delete selected endpoint", None))
self.btnDeleteEndpoint.setWhatsThis(_translate("Preferences", "Delete selected endpoint", None))
self.btnDeleteEndpoint.setText(_translate("Preferences", "Delete endpoint", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabEndpoints), _translate("Preferences", "Endpoints", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabEndpoints), _translate("Preferences", "Endpoint preferences", None))
self.btnDeleteNamespace.setToolTip(_translate("Preferences", "Delete selected namespace", None))
self.btnDeleteNamespace.setWhatsThis(_translate("Preferences", "Delete selected endpoint", None))
self.btnDeleteNamespace.setText(_translate("Preferences", "Delete namespace", None))
self.btnRestoreDefaultNamespaces.setToolTip(_translate("Preferences", "Restore default namespaces", None))
self.btnRestoreDefaultNamespaces.setWhatsThis(_translate("Preferences", "Delete selected endpoint", None))
self.btnRestoreDefaultNamespaces.setText(_translate("Preferences", "Restore default", None))
self.btnAddNamespace.setToolTip(_translate("Preferences", "Add a new namespace", None))
self.btnAddNamespace.setWhatsThis(_translate("Preferences", "Add a new endpoint", None))
self.btnAddNamespace.setText(_translate("Preferences", "Add namespace...", None))
self.tableWidgetKnownNamespaces.setToolTip(_translate("Preferences", "List of all stored namespaces", None))
self.tableWidgetKnownNamespaces.setWhatsThis(_translate("Preferences", "A list of all known endpoints", None))
item = self.tableWidgetKnownNamespaces.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "Prefix", None))
item = self.tableWidgetKnownNamespaces.horizontalHeaderItem(1)
item.setText(_translate("Preferences", "URI", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabNamespaces), _translate("Preferences", "Namespaces", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabNamespaces), _translate("Preferences", "Namespace preferences", None))
self.btnDeleteAllMappings.setToolTip(_translate("Preferences", "Delete selected mapping", None))
self.btnDeleteAllMappings.setWhatsThis(_translate("Preferences", "Delete selected mapping", None))
self.btnDeleteAllMappings.setText(_translate("Preferences", "Delete all", None))
self.tableWidgetKnownMappings.setToolTip(_translate("Preferences", "List of all stored mappings", None))
self.tableWidgetKnownMappings.setWhatsThis(_translate("Preferences", "A list of all known mappings", None))
item = self.tableWidgetKnownMappings.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "My model", None))
item = self.tableWidgetKnownMappings.horizontalHeaderItem(1)
item.setText(_translate("Preferences", "My property", None))
item = self.tableWidgetKnownMappings.horizontalHeaderItem(2)
item.setText(_translate("Preferences", "External Model\'s property", None))
item = self.tableWidgetKnownMappings.horizontalHeaderItem(3)
item.setText(_translate("Preferences", "External model", None))
self.btnDeleteMapping.setToolTip(_translate("Preferences", "Delete selected mapping", None))
self.btnDeleteMapping.setWhatsThis(_translate("Preferences", "Delete selected mapping", None))
self.btnDeleteMapping.setText(_translate("Preferences", "Delete mapping", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabMapping), _translate("Preferences", "Mapping", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabMapping), _translate("Preferences", "Mapping preferences", None))
self.btnClearLog.setToolTip(_translate("Preferences", "Clear log entries", None))
self.btnClearLog.setWhatsThis(_translate("Preferences", "Delete log entries", None))
self.btnClearLog.setText(_translate("Preferences", "Clear log", None))
self.tableWidgetLogEntries.setToolTip(_translate("Preferences", "List of log entries", None))
self.tableWidgetLogEntries.setWhatsThis(_translate("Preferences", "List of log entries", None))
item = self.tableWidgetLogEntries.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "Time", None))
item = self.tableWidgetLogEntries.horizontalHeaderItem(1)
item.setText(_translate("Preferences", "Description", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabLog), _translate("Preferences", "Log", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabLog), _translate("Preferences", "Log preferences", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(0)
item.setText(_translate("Preferences", "My Models", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(1)
item.setText(_translate("Preferences", "Endpoints", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(2)
item.setText(_translate("Preferences", "Known namespaces", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(3)
item.setText(_translate("Preferences", "Property mappings", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(4)
item.setText(_translate("Preferences", "Distinct pairs MM/EM in mappings", None))
item = self.tableWidgetDatabaseStats.verticalHeaderItem(5)
item.setText(_translate("Preferences", "Log entries", None))
item = self.tableWidgetDatabaseStats.horizontalHeaderItem(0)
item.setText(_translate("Preferences", "Value", None))
self.btnExportDatabase.setToolTip(_translate("Preferences", "Export current database", None))
self.btnExportDatabase.setWhatsThis(_translate("Preferences", "Save changes made in General tab", None))
self.btnExportDatabase.setText(_translate("Preferences", "Export database...", None))
self.btnImportDatabase.setToolTip(_translate("Preferences", "Select a database to import", None))
self.btnImportDatabase.setWhatsThis(_translate("Preferences", "Save changes made in General tab", None))
self.btnImportDatabase.setText(_translate("Preferences", "Import database...", None))
self.btnResetToDefaultDatabase.setToolTip(_translate("Preferences", "Reset to default database", None))
self.btnResetToDefaultDatabase.setWhatsThis(_translate("Preferences", "Save changes made in General tab", None))
self.btnResetToDefaultDatabase.setText(_translate("Preferences", "Reset to default", None))
self.tabWidgetPreferences.setTabText(self.tabWidgetPreferences.indexOf(self.tabDatabase), _translate("Preferences", "Database", None))
self.tabWidgetPreferences.setTabToolTip(self.tabWidgetPreferences.indexOf(self.tabDatabase), _translate("Preferences", "Database preferences", None))
self.btnClose.setToolTip(_translate("Preferences", "Close Preferences window", None))
self.btnClose.setWhatsThis(_translate("Preferences", "Close Preferences window", None))
self.btnClose.setText(_translate("Preferences", "Close", None))
import preferences_resources_rc
|
|
"""Light platform support for yeelight."""
import logging
import voluptuous as vol
import yeelight
from yeelight import (
BulbException,
Flow,
RGBTransition,
SleepTransition,
transitions as yee_transitions,
)
from yeelight.enums import BulbType, LightType, PowerMode, SceneClass
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
Light,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_MODE, CONF_HOST, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.service import extract_entity_ids
import homeassistant.util.color as color_util
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from . import (
ACTION_RECOVER,
ATTR_ACTION,
ATTR_COUNT,
ATTR_TRANSITIONS,
CONF_CUSTOM_EFFECTS,
CONF_FLOW_PARAMS,
CONF_MODE_MUSIC,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DATA_UPDATED,
DATA_YEELIGHT,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
YEELIGHT_FLOW_TRANSITION_SCHEMA,
YEELIGHT_SERVICE_SCHEMA,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_DATA_KEY = f"{DATA_YEELIGHT}_lights"
SUPPORT_YEELIGHT = (
SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_FLASH | SUPPORT_EFFECT
)
SUPPORT_YEELIGHT_WHITE_TEMP = SUPPORT_YEELIGHT | SUPPORT_COLOR_TEMP
SUPPORT_YEELIGHT_RGB = SUPPORT_YEELIGHT_WHITE_TEMP | SUPPORT_COLOR
ATTR_MINUTES = "minutes"
SERVICE_SET_MODE = "set_mode"
SERVICE_START_FLOW = "start_flow"
SERVICE_SET_COLOR_SCENE = "set_color_scene"
SERVICE_SET_HSV_SCENE = "set_hsv_scene"
SERVICE_SET_COLOR_TEMP_SCENE = "set_color_temp_scene"
SERVICE_SET_COLOR_FLOW_SCENE = "set_color_flow_scene"
SERVICE_SET_AUTO_DELAY_OFF_SCENE = "set_auto_delay_off_scene"
EFFECT_DISCO = "Disco"
EFFECT_TEMP = "Slow Temp"
EFFECT_STROBE = "Strobe epilepsy!"
EFFECT_STROBE_COLOR = "Strobe color"
EFFECT_ALARM = "Alarm"
EFFECT_POLICE = "Police"
EFFECT_POLICE2 = "Police2"
EFFECT_CHRISTMAS = "Christmas"
EFFECT_RGB = "RGB"
EFFECT_RANDOM_LOOP = "Random Loop"
EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop"
EFFECT_LSD = "LSD"
EFFECT_SLOWDOWN = "Slowdown"
EFFECT_WHATSAPP = "WhatsApp"
EFFECT_FACEBOOK = "Facebook"
EFFECT_TWITTER = "Twitter"
EFFECT_STOP = "Stop"
YEELIGHT_TEMP_ONLY_EFFECT_LIST = [EFFECT_TEMP, EFFECT_STOP]
YEELIGHT_MONO_EFFECT_LIST = [
EFFECT_DISCO,
EFFECT_STROBE,
EFFECT_ALARM,
EFFECT_POLICE2,
EFFECT_WHATSAPP,
EFFECT_FACEBOOK,
EFFECT_TWITTER,
*YEELIGHT_TEMP_ONLY_EFFECT_LIST,
]
YEELIGHT_COLOR_EFFECT_LIST = [
EFFECT_STROBE_COLOR,
EFFECT_POLICE,
EFFECT_CHRISTMAS,
EFFECT_RGB,
EFFECT_RANDOM_LOOP,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_LSD,
EFFECT_SLOWDOWN,
*YEELIGHT_MONO_EFFECT_LIST,
]
MODEL_TO_DEVICE_TYPE = {
"mono": BulbType.White,
"mono1": BulbType.White,
"color": BulbType.Color,
"color1": BulbType.Color,
"color2": BulbType.Color,
"strip1": BulbType.Color,
"bslamp1": BulbType.Color,
"bslamp2": BulbType.Color,
"RGBW": BulbType.Color,
"lamp1": BulbType.WhiteTemp,
"ceiling1": BulbType.WhiteTemp,
"ceiling2": BulbType.WhiteTemp,
"ceiling3": BulbType.WhiteTemp,
"ceiling4": BulbType.WhiteTempMood,
}
EFFECTS_MAP = {
EFFECT_DISCO: yee_transitions.disco,
EFFECT_TEMP: yee_transitions.temp,
EFFECT_STROBE: yee_transitions.strobe,
EFFECT_STROBE_COLOR: yee_transitions.strobe_color,
EFFECT_ALARM: yee_transitions.alarm,
EFFECT_POLICE: yee_transitions.police,
EFFECT_POLICE2: yee_transitions.police2,
EFFECT_CHRISTMAS: yee_transitions.christmas,
EFFECT_RGB: yee_transitions.rgb,
EFFECT_RANDOM_LOOP: yee_transitions.randomloop,
EFFECT_LSD: yee_transitions.lsd,
EFFECT_SLOWDOWN: yee_transitions.slowdown,
}
VALID_BRIGHTNESS = vol.All(vol.Coerce(int), vol.Range(min=1, max=100))
SERVICE_SCHEMA_SET_MODE = YEELIGHT_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_MODE): vol.In([mode.name.lower() for mode in PowerMode])}
)
SERVICE_SCHEMA_START_FLOW = YEELIGHT_SERVICE_SCHEMA.extend(
YEELIGHT_FLOW_TRANSITION_SCHEMA
)
SERVICE_SCHEMA_SET_COLOR_SCENE = YEELIGHT_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_RGB_COLOR): vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
)
SERVICE_SCHEMA_SET_HSV_SCENE = YEELIGHT_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_HS_COLOR): vol.All(
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=359)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
vol.Coerce(tuple),
),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
)
SERVICE_SCHEMA_SET_COLOR_TEMP_SCENE = YEELIGHT_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_KELVIN): vol.All(
vol.Coerce(int), vol.Range(min=1700, max=6500)
),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
)
SERVICE_SCHEMA_SET_COLOR_FLOW_SCENE = YEELIGHT_SERVICE_SCHEMA.extend(
YEELIGHT_FLOW_TRANSITION_SCHEMA
)
SERVICE_SCHEMA_SET_AUTO_DELAY_OFF = YEELIGHT_SERVICE_SCHEMA.extend(
{
vol.Required(ATTR_MINUTES): vol.All(vol.Coerce(int), vol.Range(min=1, max=60)),
vol.Required(ATTR_BRIGHTNESS): VALID_BRIGHTNESS,
}
)
def _transitions_config_parser(transitions):
"""Parse transitions config into initialized objects."""
transition_objects = []
for transition_config in transitions:
transition, params = list(transition_config.items())[0]
transition_objects.append(getattr(yeelight, transition)(*params))
return transition_objects
def _parse_custom_effects(effects_config):
effects = {}
for config in effects_config:
params = config[CONF_FLOW_PARAMS]
action = Flow.actions[params[ATTR_ACTION]]
transitions = _transitions_config_parser(params[ATTR_TRANSITIONS])
effects[config[CONF_NAME]] = {
ATTR_COUNT: params[ATTR_COUNT],
ATTR_ACTION: action,
ATTR_TRANSITIONS: transitions,
}
return effects
def _cmd(func):
"""Define a wrapper to catch exceptions from the bulb."""
def _wrap(self, *args, **kwargs):
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return func(self, *args, **kwargs)
except BulbException as ex:
_LOGGER.error("Error when calling %s: %s", func, ex)
return _wrap
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Yeelight bulbs."""
if not discovery_info:
return
if PLATFORM_DATA_KEY not in hass.data:
hass.data[PLATFORM_DATA_KEY] = []
device = hass.data[DATA_YEELIGHT][discovery_info[CONF_HOST]]
_LOGGER.debug("Adding %s", device.name)
custom_effects = _parse_custom_effects(discovery_info[CONF_CUSTOM_EFFECTS])
nl_switch_light = (
discovery_info.get(CONF_NIGHTLIGHT_SWITCH_TYPE) == NIGHTLIGHT_SWITCH_TYPE_LIGHT
)
lights = []
if device.model:
device_type = MODEL_TO_DEVICE_TYPE.get(device.model, None)
else:
device_type = device.type
def _lights_setup_helper(klass):
lights.append(klass(device, custom_effects=custom_effects))
if device_type == BulbType.White:
_lights_setup_helper(YeelightGenericLight)
elif device_type == BulbType.Color:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightColorLightWithNightlightSwitch)
_lights_setup_helper(YeelightNightLightModeWithWithoutBrightnessControl)
else:
_lights_setup_helper(YeelightColorLightWithoutNightlightSwitch)
elif device_type == BulbType.WhiteTemp:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightWithNightLight)
_lights_setup_helper(YeelightNightLightMode)
else:
_lights_setup_helper(YeelightWhiteTempWithoutNightlightSwitch)
elif device_type == BulbType.WhiteTempMood:
if nl_switch_light and device.is_nightlight_supported:
_lights_setup_helper(YeelightNightLightModeWithAmbientSupport)
_lights_setup_helper(YeelightWithAmbientAndNightlight)
else:
_lights_setup_helper(YeelightWithAmbientWithoutNightlight)
_lights_setup_helper(YeelightAmbientLight)
else:
_lights_setup_helper(YeelightGenericLight)
_LOGGER.warning(
"Cannot determine device type for %s, %s. Falling back to white only",
device.ipaddr,
device.name,
)
hass.data[PLATFORM_DATA_KEY] += lights
add_entities(lights, True)
setup_services(hass)
def setup_services(hass):
"""Set up the service listeners."""
def service_call(func):
def service_to_entities(service):
"""Return the known entities that a service call mentions."""
entity_ids = extract_entity_ids(hass, service)
target_devices = [
light
for light in hass.data[PLATFORM_DATA_KEY]
if light.entity_id in entity_ids
]
return target_devices
def service_to_params(service):
"""Return service call params, without entity_id."""
return {
key: value
for key, value in service.data.items()
if key != ATTR_ENTITY_ID
}
def wrapper(service):
params = service_to_params(service)
target_devices = service_to_entities(service)
for device in target_devices:
func(device, params)
return wrapper
@service_call
def service_set_mode(target_device, params):
target_device.set_mode(**params)
@service_call
def service_start_flow(target_devices, params):
params[ATTR_TRANSITIONS] = _transitions_config_parser(params[ATTR_TRANSITIONS])
target_devices.start_flow(**params)
@service_call
def service_set_color_scene(target_device, params):
target_device.set_scene(
SceneClass.COLOR, *[*params[ATTR_RGB_COLOR], params[ATTR_BRIGHTNESS]]
)
@service_call
def service_set_hsv_scene(target_device, params):
target_device.set_scene(
SceneClass.HSV, *[*params[ATTR_HS_COLOR], params[ATTR_BRIGHTNESS]]
)
@service_call
def service_set_color_temp_scene(target_device, params):
target_device.set_scene(
SceneClass.CT, params[ATTR_KELVIN], params[ATTR_BRIGHTNESS]
)
@service_call
def service_set_color_flow_scene(target_device, params):
flow = Flow(
count=params[ATTR_COUNT],
action=Flow.actions[params[ATTR_ACTION]],
transitions=_transitions_config_parser(params[ATTR_TRANSITIONS]),
)
target_device.set_scene(SceneClass.CF, flow)
@service_call
def service_set_auto_delay_off_scene(target_device, params):
target_device.set_scene(
SceneClass.AUTO_DELAY_OFF, params[ATTR_BRIGHTNESS], params[ATTR_MINUTES]
)
hass.services.register(
DOMAIN, SERVICE_SET_MODE, service_set_mode, schema=SERVICE_SCHEMA_SET_MODE
)
hass.services.register(
DOMAIN, SERVICE_START_FLOW, service_start_flow, schema=SERVICE_SCHEMA_START_FLOW
)
hass.services.register(
DOMAIN,
SERVICE_SET_COLOR_SCENE,
service_set_color_scene,
schema=SERVICE_SCHEMA_SET_COLOR_SCENE,
)
hass.services.register(
DOMAIN,
SERVICE_SET_HSV_SCENE,
service_set_hsv_scene,
schema=SERVICE_SCHEMA_SET_HSV_SCENE,
)
hass.services.register(
DOMAIN,
SERVICE_SET_COLOR_TEMP_SCENE,
service_set_color_temp_scene,
schema=SERVICE_SCHEMA_SET_COLOR_TEMP_SCENE,
)
hass.services.register(
DOMAIN,
SERVICE_SET_COLOR_FLOW_SCENE,
service_set_color_flow_scene,
schema=SERVICE_SCHEMA_SET_COLOR_FLOW_SCENE,
)
hass.services.register(
DOMAIN,
SERVICE_SET_AUTO_DELAY_OFF_SCENE,
service_set_auto_delay_off_scene,
schema=SERVICE_SCHEMA_SET_AUTO_DELAY_OFF,
)
class YeelightGenericLight(Light):
"""Representation of a Yeelight generic light."""
def __init__(self, device, custom_effects=None):
"""Initialize the Yeelight light."""
self.config = device.config
self._device = device
self._brightness = None
self._color_temp = None
self._hs = None
self._effect = None
model_specs = self._bulb.get_model_specs()
self._min_mireds = kelvin_to_mired(model_specs["color_temp"]["max"])
self._max_mireds = kelvin_to_mired(model_specs["color_temp"]["min"])
self._light_type = LightType.Main
if custom_effects:
self._custom_effects = custom_effects
else:
self._custom_effects = {}
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Handle entity which will be added."""
async_dispatcher_connect(
self.hass,
DATA_UPDATED.format(self._device.ipaddr),
self._schedule_immediate_update,
)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self.device.available
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_YEELIGHT
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._predefined_effects + self.custom_effects_names
@property
def color_temp(self) -> int:
"""Return the color temperature."""
temp_in_k = self._get_property("ct")
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self.device.name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._get_property(self._power_property) == "on"
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
temp = self._get_property(self._brightness_property)
if temp:
self._brightness = temp
return round(255 * (int(self._brightness) / 100))
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return self._min_mireds
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return self._max_mireds
@property
def custom_effects(self):
"""Return dict with custom effects."""
return self._custom_effects
@property
def custom_effects_names(self):
"""Return list with custom effects names."""
return list(self.custom_effects.keys())
@property
def light_type(self):
"""Return light type."""
return self._light_type
@property
def hs_color(self) -> tuple:
"""Return the color property."""
return self._hs
@property
def effect(self):
"""Return the current effect."""
return self._effect
# F821: https://github.com/PyCQA/pyflakes/issues/373
@property
def _bulb(self) -> "Bulb": # noqa: F821
return self.device.bulb
@property
def _properties(self) -> dict:
if self._bulb is None:
return {}
return self._bulb.last_properties
def _get_property(self, prop, default=None):
return self._properties.get(prop, default)
@property
def _brightness_property(self):
return "bright"
@property
def _power_property(self):
return "power"
@property
def _turn_on_power_mode(self):
return PowerMode.LAST
@property
def _predefined_effects(self):
return YEELIGHT_MONO_EFFECT_LIST
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {"flowing": self.device.is_color_flow_enabled}
if self.device.is_nightlight_supported:
attributes["night_light"] = self.device.is_nightlight_enabled
return attributes
@property
def device(self):
"""Return yeelight device."""
return self._device
def update(self):
"""Update light properties."""
self._hs = self._get_hs_from_properties()
if not self.device.is_color_flow_enabled:
self._effect = None
def _get_hs_from_properties(self):
rgb = self._get_property("rgb")
color_mode = self._get_property("color_mode")
if not rgb or not color_mode:
return None
color_mode = int(color_mode)
if color_mode == 2: # color temperature
temp_in_k = mired_to_kelvin(self.color_temp)
return color_util.color_temperature_to_hs(temp_in_k)
if color_mode == 3: # hsv
hue = int(self._get_property("hue"))
sat = int(self._get_property("sat"))
return (hue / 360 * 65536, sat / 100 * 255)
rgb = int(rgb)
blue = rgb & 0xFF
green = (rgb >> 8) & 0xFF
red = (rgb >> 16) & 0xFF
return color_util.color_RGB_to_hs(red, green, blue)
def set_music_mode(self, mode) -> None:
"""Set the music mode on or off."""
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music()
@_cmd
def set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if brightness:
_LOGGER.debug("Setting brightness: %s", brightness)
self._bulb.set_brightness(
brightness / 255 * 100, duration=duration, light_type=self.light_type
)
@_cmd
def set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if rgb and self.supported_features & SUPPORT_COLOR:
_LOGGER.debug("Setting RGB: %s", rgb)
self._bulb.set_rgb(
rgb[0], rgb[1], rgb[2], duration=duration, light_type=self.light_type
)
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug("Setting color temp: %s K", temp_in_k)
self._bulb.set_color_temp(
temp_in_k, duration=duration, light_type=self.light_type
)
@_cmd
def set_default(self) -> None:
"""Set current options as default."""
self._bulb.set_default()
@_cmd
def set_flash(self, flash) -> None:
"""Activate flash."""
if flash:
if self._bulb.last_properties["color_mode"] != 1:
_LOGGER.error("Flash supported currently only in RGB mode.")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = color_util.color_hs_to_RGB(*self._hs)
transitions = list()
transitions.append(
RGBTransition(255, 0, 0, brightness=10, duration=duration)
)
transitions.append(SleepTransition(duration=transition))
transitions.append(
RGBTransition(
red, green, blue, brightness=self.brightness, duration=duration
)
)
flow = Flow(count=count, transitions=transitions)
try:
self._bulb.start_flow(flow, light_type=self.light_type)
except BulbException as ex:
_LOGGER.error("Unable to set flash: %s", ex)
@_cmd
def set_effect(self, effect) -> None:
"""Activate effect."""
if not effect:
return
if effect == EFFECT_STOP:
self._bulb.stop_flow(light_type=self.light_type)
return
if effect in self.custom_effects_names:
flow = Flow(**self.custom_effects[effect])
elif effect in EFFECTS_MAP:
flow = Flow(count=0, transitions=EFFECTS_MAP[effect]())
elif effect == EFFECT_FAST_RANDOM_LOOP:
flow = Flow(count=0, transitions=yee_transitions.randomloop(duration=250))
elif effect == EFFECT_WHATSAPP:
flow = Flow(count=2, transitions=yee_transitions.pulse(37, 211, 102))
elif effect == EFFECT_FACEBOOK:
flow = Flow(count=2, transitions=yee_transitions.pulse(59, 89, 152))
elif effect == EFFECT_TWITTER:
flow = Flow(count=2, transitions=yee_transitions.pulse(0, 172, 237))
else:
return
try:
self._bulb.start_flow(flow, light_type=self.light_type)
self._effect = effect
except BulbException as ex:
_LOGGER.error("Unable to set effect: %s", ex)
def turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = color_util.color_hs_to_RGB(*hs_color) if hs_color else None
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
self.device.turn_on(
duration=duration,
light_type=self.light_type,
power_mode=self._turn_on_power_mode,
)
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
try:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
except BulbException as ex:
_LOGGER.error(
"Unable to turn on music mode, consider disabling it: %s", ex
)
try:
# values checked for none in methods
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
self.set_effect(effect)
except BulbException as ex:
_LOGGER.error("Unable to set bulb properties: %s", ex)
return
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE] and (brightness or colortemp or rgb):
try:
self.set_default()
except BulbException as ex:
_LOGGER.error("Unable to set the defaults: %s", ex)
return
self.device.update()
def turn_off(self, **kwargs) -> None:
"""Turn off."""
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
self.device.turn_off(duration=duration, light_type=self.light_type)
self.device.update()
def set_mode(self, mode: str):
"""Set a power mode."""
try:
self._bulb.set_power_mode(PowerMode[mode.upper()])
self.device.update()
except BulbException as ex:
_LOGGER.error("Unable to set the power mode: %s", ex)
def start_flow(self, transitions, count=0, action=ACTION_RECOVER):
"""Start flow."""
try:
flow = Flow(
count=count, action=Flow.actions[action], transitions=transitions
)
self._bulb.start_flow(flow, light_type=self.light_type)
self.device.update()
except BulbException as ex:
_LOGGER.error("Unable to set effect: %s", ex)
def set_scene(self, scene_class, *args):
"""
Set the light directly to the specified state.
If the light is off, it will first be turned on.
"""
try:
self._bulb.set_scene(scene_class, *args)
self.device.update()
except BulbException as ex:
_LOGGER.error("Unable to set scene: %s", ex)
class YeelightColorLightSupport:
"""Representation of a Color Yeelight light support."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_YEELIGHT_RGB
@property
def _predefined_effects(self):
return YEELIGHT_COLOR_EFFECT_LIST
class YeelightWhiteTempLightSupport:
"""Representation of a Color Yeelight light."""
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_YEELIGHT_WHITE_TEMP
@property
def _predefined_effects(self):
return YEELIGHT_TEMP_ONLY_EFFECT_LIST
class YeelightNightLightSupport:
"""Representation of a Yeelight nightlight support."""
@property
def _turn_on_power_mode(self):
return PowerMode.NORMAL
class YeelightColorLightWithoutNightlightSwitch(
YeelightColorLightSupport, YeelightGenericLight
):
"""Representation of a Color Yeelight light."""
@property
def _brightness_property(self):
return "current_brightness"
class YeelightColorLightWithNightlightSwitch(
YeelightNightLightSupport, YeelightColorLightSupport, YeelightGenericLight
):
"""Representation of a Yeelight with rgb support and nightlight.
It represents case when nightlight switch is set to light.
"""
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and not self.device.is_nightlight_enabled
class YeelightWhiteTempWithoutNightlightSwitch(
YeelightWhiteTempLightSupport, YeelightGenericLight
):
"""White temp light, when nightlight switch is not set to light."""
@property
def _brightness_property(self):
return "current_brightness"
class YeelightWithNightLight(
YeelightNightLightSupport, YeelightWhiteTempLightSupport, YeelightGenericLight
):
"""Representation of a Yeelight with temp only support and nightlight.
It represents case when nightlight switch is set to light.
"""
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and not self.device.is_nightlight_enabled
class YeelightNightLightMode(YeelightGenericLight):
"""Representation of a Yeelight when in nightlight mode."""
@property
def name(self) -> str:
"""Return the name of the device if any."""
return f"{self.device.name} nightlight"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:weather-night"
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return super().is_on and self.device.is_nightlight_enabled
@property
def _brightness_property(self):
return "nl_br"
@property
def _turn_on_power_mode(self):
return PowerMode.MOONLIGHT
@property
def _predefined_effects(self):
return YEELIGHT_TEMP_ONLY_EFFECT_LIST
class YeelightNightLightModeWithAmbientSupport(YeelightNightLightMode):
"""Representation of a Yeelight, with ambient support, when in nightlight mode."""
@property
def _power_property(self):
return "main_power"
class YeelightNightLightModeWithWithoutBrightnessControl(YeelightNightLightMode):
"""Representation of a Yeelight, when in nightlight mode.
It represents case when nightlight mode brightness control is not supported.
"""
@property
def supported_features(self):
"""Flag no supported features."""
return 0
class YeelightWithAmbientWithoutNightlight(YeelightWhiteTempWithoutNightlightSwitch):
"""Representation of a Yeelight which has ambilight support.
And nightlight switch type is none.
"""
@property
def _power_property(self):
return "main_power"
class YeelightWithAmbientAndNightlight(YeelightWithNightLight):
"""Representation of a Yeelight which has ambilight support.
And nightlight switch type is set to light.
"""
@property
def _power_property(self):
return "main_power"
class YeelightAmbientLight(YeelightColorLightWithoutNightlightSwitch):
"""Representation of a Yeelight ambient light."""
PROPERTIES_MAPPING = {"color_mode": "bg_lmode"}
def __init__(self, *args, **kwargs):
"""Initialize the Yeelight Ambient light."""
super().__init__(*args, **kwargs)
self._min_mireds = kelvin_to_mired(6500)
self._max_mireds = kelvin_to_mired(1700)
self._light_type = LightType.Ambient
@property
def name(self) -> str:
"""Return the name of the device if any."""
return f"{self.device.name} ambilight"
@property
def _brightness_property(self):
return "bright"
def _get_property(self, prop, default=None):
bg_prop = self.PROPERTIES_MAPPING.get(prop)
if not bg_prop:
bg_prop = f"bg_{prop}"
return super()._get_property(bg_prop, default)
|
|
# coding=utf-8
# Copyright 2021 Google Health Research.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utilities to handle EHR predictive task."""
import abc
import copy
from typing import Any, Dict, List, Mapping, Optional, Union
from ehr_prediction_modeling import mask_manager
from ehr_prediction_modeling.tasks import mlp_task_layer
from ehr_prediction_modeling.tasks import task_data
from ehr_prediction_modeling.tasks import task_layers
from ehr_prediction_modeling.tasks import task_masks
from ehr_prediction_modeling.utils import batches
from ehr_prediction_modeling.utils import mask_utils
import tensorflow.compat.v1 as tf
from ehr_prediction_modeling import configdict
class Task(metaclass=abc.ABCMeta):
"""Interface for dealing with tasks."""
task_type = ""
def __init__(self, config, label_keys=None):
self._config = config
self.mask_manager = None
self._config.eval_masks = self._update_eval_mask_names_list(
self._config.eval_masks)
self._label_keys = label_keys if label_keys else []
self._task_layer = task_layers.get_task_layer(config, self.num_targets)
self._init_mask_manager()
def _init_mask_manager(self):
self.mask_manager = mask_manager.MaskManager(
task_config=self._config,
label_keys=self._label_keys,
window_hours=self.window_hours,
supported_train_masks=self._supported_train_masks,
supported_eval_masks=self._supported_eval_masks,
)
@property
@abc.abstractmethod
def default_masks(self) -> List[str]:
"""A list of masks that are used in all this task's composite masks."""
@property
def _supported_train_masks(self) -> Dict[str, List[str]]:
return {
task_masks.Train.BASE:
self.default_masks,
}
@property
@abc.abstractmethod
def _unformatted_supported_eval_masks(self) -> Mapping[str, List[str]]:
"""Returns mapping of supported eval masks without task_type prepended..
Returns:
Map the names of masks (without task_type prepended) available during
evaluation to their components.
"""
def _update_eval_mask_names_list(self, eval_masks: List[str]) -> List[str]:
"""Updates eval mask names to include type and expand hours since event.
Args:
eval_masks: Eval masks to update the names of.
Returns:
An updated list of eval mask names. See _update_eval_mask_names Returns
for a complete description of how mask names are updated.
"""
# Convert to a dict so the same update fn can be applied to a list.
empty_values = [None] * len(eval_masks)
eval_mask_dict = dict(zip(eval_masks, empty_values))
updated_eval_masks = self._update_eval_mask_names(eval_mask_dict)
return list(updated_eval_masks)
def _update_eval_mask_names(self, eval_masks: Dict[str, Optional[List[str]]]):
"""Updates eval mask names to include type and expand hours since event.
Args:
eval_masks: Eval masks to update the names of.
Returns:
A dict of eval masks with the keys updated. Keys will have task_type added
and any mask with 'since_event_eval' in the name will be expanded based on
time_since_event_hours_list. For example, if since_event_eval mask is a
key in eval_masks and config.time_since_event_hours_list = [24, 48]. The
resulting dict will have one entry for 24 hours after event and one
entry for 48 hours after event, but no entry for the bare
since_event_eval mask. If config.time_since_event_hours_list is empty, any
since_event_eval mask will be removed.
"""
since_event_masks = [
mask_name for mask_name in eval_masks.keys()
if task_masks.Eval.SINCE_EVENT in mask_name
]
for mask_name in since_event_masks:
for hours in self._config.get("time_since_event_hours_list", []):
new_mask_name = mask_name.replace(
task_masks.Eval.SINCE_EVENT,
f"{hours}_{mask_utils.SINCE_EVENT_MASK_SUFFIX}")
eval_masks[new_mask_name] = eval_masks[mask_name]
del eval_masks[mask_name]
return {
mask_utils.get_unique_mask_name(self.task_type, mask_name): components
for mask_name, components in eval_masks.items()
}
@property
def _supported_eval_masks(self) -> Dict[str, List[str]]:
"""Returns mapping of all supported eval masks with task_type prepended.
Expands since event masks to have one entry per time_since_event_hours_list.
Returns:
Map the names of masks (with task_type prepended) available during
evaluation to their components.
"""
unformatted_eval_masks = self._unformatted_supported_eval_masks
return self._update_eval_mask_names(unformatted_eval_masks)
@property
def _all_supported_masks(self) -> Dict[str, List[str]]:
masks = copy.copy(self._supported_train_masks)
masks.update(self._supported_eval_masks)
return masks
@property
def layer(self) -> task_layers.TaskLayers:
return self._task_layer
@property
def name(self) -> str:
return self._config.name
@property
@abc.abstractmethod
def prediction_task_type(self) -> str:
"""Returns one of the values defined in {types.TaskType}."""
@abc.abstractmethod
def get_label_dicts(
self) -> Dict[str, Union[tf.FixedLenSequenceFeature, tf.FixedLenFeature]]:
"""Returns a dictionary of labels to tensors that are used for the task."""
@property
@abc.abstractmethod
def num_targets(self) -> int:
"""Total number of targets for the task."""
@property
@abc.abstractmethod
def target_names(self) -> List[str]:
"""Names of targets for the task."""
@property
@abc.abstractmethod
def window_hours(self) -> List[int]:
"""The total number of time horizons.
Note that this list possibly contains dupplicated values, e.g. with the Labs
task. It is because there are several labs with the same time horizons,
corresponding to several different targets that may have different mask
values (see mask_utils.TIME_CUTOFF_MASK).
Returns:
A list of the time horizons (in hours) of all the targets. If several
targets have the same time horizon, the values are duplicated.
"""
@abc.abstractmethod
def _get_all_task_variables(
self, batch: batches.TFBatch,
model_output: tf.Tensor) -> task_data.TaskVariables:
"""Fetches all variables used by the task."""
def _get_targets_and_masks(self,
batch: batches.TFBatch) -> tf.Tensor:
targets = self.get_targets(batch)
train_loss_mask = self.get_train_mask(batch)
eval_mask_dict = self.get_eval_mask_dict(batch)
return task_data.TaskVariables(
targets=targets,
train_mask=train_loss_mask,
eval_mask_dict=eval_mask_dict,
)
def get_task_variables(
self, batch: batches.TFBatch,
model_output: Union[tf.Tensor, None]) -> tf.Tensor:
"""Computes variables for task.
Args:
batch: Either tf.NextQueuedSequenceBatch, containing a batch of data. Or
batches.TFBatch
model_output: Either Tensor, the output from the model, shape wnt [
num_unroll, batch_size, ndim_model_output]. Or None
Returns:
task_data.TaskVariables with all the variables from this task.
"""
if model_output is not None:
return self._get_all_task_variables(batch, model_output)
else:
return self._get_targets_and_masks(batch)
def get_targets(self, batch: batches.TFBatch) -> tf.Tensor:
return self._extract_labels(batch, self._label_keys)
def get_train_mask(self, batch: batches.TFBatch) -> tf.Tensor:
"""Computes the mask to be used to mask the training loss.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
Returns:
Tensor, the loss mask to be used in training, in time-major
shape wnct [num_unroll, batch_size, channels, num_targets].
"""
train_mask = self._config.get("train_mask", task_masks.Train.BASE)
if train_mask not in self._supported_train_masks:
raise ValueError(
"Train mask {mask} is not supported".format(mask=train_mask))
return self.mask_manager.get_masks([train_mask], batch)[train_mask]
def get_eval_mask_dict(
self, batch: batches.TFBatch) -> Dict[str, tf.Tensor]:
"""Computes the dict of loss masks to be used to mask evaluation.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
Returns:
dict of string mask name to Tensors, the loss masks to be used in
evaluation, in time-major shape wnct [num_unroll, batch_size, channels,
num_targets].
"""
for eval_mask in self._config.eval_masks:
if eval_mask not in self._supported_eval_masks:
raise ValueError(
"Eval mask {mask} is not supported".format(mask=eval_mask))
return self.mask_manager.get_masks(
self._config.eval_masks,
batch,
)
@property
def loss_weight(self) -> float:
return self._config.loss_weight
@property
def eval_masks(self) -> List[str]:
return self._config.eval_masks
@property
def task_layer_sizes(self) -> List[int]:
return self._config.get("task_layer_sizes", []).copy()
def _extract_labels(self, batch: batches.TFBatch,
label_keys: List[str]) -> tf.Tensor:
"""Extracts the labels denoted by label_keys from the data.
Args:
batch: tf.NextQueuedSequenceBatch, containing a batch of data.
label_keys: list of keys used to extract labels from the batch.
Returns:
Tensor in time-major shape wnct
[num_unroll, batch_size, channels, num_targets] with the labels for each
key given in label_keys.
"""
return tf.stack(
[batch.sequences[label_key] for label_key in label_keys], axis=3)
def get_hidden_layer(self) -> mlp_task_layer.HiddenTaskLayerType:
"""Returns the underlying modeling layer from this tasks layer."""
return self.layer.get_hidden_layer()
# pytype: disable=bad-return-type
@classmethod
@abc.abstractmethod
def config(cls) -> configdict.ConfigDict:
"""Config creation for the task."""
# pytype: enable=bad-return-type
# pytype: disable=bad-return-type
@classmethod
@abc.abstractmethod
def default_configs(cls) -> List[configdict.ConfigDict]:
"""Default task config."""
# pytype: enable=bad-return-type
|
|
from __future__ import print_function
import uuid
import random
import socket
import sys
import os
import traceback
from threading import Thread, Lock, Event
from multiprocessing.pool import ThreadPool
from contextlib import contextmanager
from datetime import datetime
from time import time, sleep
try:
import cPickle as pickle
except ImportError:
import pickle
import zmq
from ..compatibility import Queue, unicode
from .. import core
def pickle_dumps(obj):
return pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL)
MAX_DEALERS = 100
with open('log.workers', 'w') as f: # delete file
pass
def log(*args):
with open('log.workers', 'a') as f:
print('\n', *args, file=f)
log('Hello from worker.py')
@contextmanager
def logerrors():
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
log('Error!', str(e))
log('Traceback', str(tb))
raise
class Worker(object):
""" Asynchronous worker in a distributed dask computation pool
Parameters
----------
scheduler: string
Address of scheduler
hostname: string
A visible hostname/IP of this worker to the network
port_to_workers: int
Port on which to listen to worker connections
bind_to_workers: string
Addresses from which we accept worker connections, defaults to *
heartbeat: int, bool
The time between heartbeats in seconds, or False to turn off
heartbeats, defaults to 5
State
-----
status: string
Status of worker, either 'run' or 'closed'
to_workers: zmq.Socket
Router socket to serve requests from other workers
to_scheduler: zmq.Socket
Dealer socket to communicate with scheduler
See Also
--------
dask.distributed.scheduler.Scheduler
"""
def __init__(self, scheduler, data=None, nthreads=100,
hostname=None, port_to_workers=None, bind_to_workers='*',
block=False, heartbeat=5):
if isinstance(scheduler, unicode):
scheduler = scheduler.encode()
self.data = data if data is not None else dict()
self.pool = ThreadPool(nthreads)
self.scheduler = scheduler
self.heartbeat = heartbeat
self.status = 'run'
self.context = zmq.Context()
self.hostname = hostname or socket.gethostname()
self.to_workers = self.context.socket(zmq.ROUTER)
if port_to_workers is None:
port_to_workers = self.to_workers.bind_to_random_port('tcp://' + bind_to_workers)
else:
self.to_workers.bind('tcp://%s:%d' % (bind_to_workers, port))
self.address = ('tcp://%s:%s' % (self.hostname, port_to_workers)).encode()
self.dealers = dict()
self.lock = Lock()
self.queues = dict()
self.to_scheduler = self.context.socket(zmq.DEALER)
self.to_scheduler.setsockopt(zmq.IDENTITY, self.address)
self.to_scheduler.connect(scheduler)
self.send_to_scheduler({'function': 'register'}, {'pid': os.getpid()})
self.scheduler_functions = {'status': self.status_to_scheduler,
'compute': self.compute,
'getitem': self.getitem_scheduler,
'delitem': self.delitem,
'setitem': self.setitem,
'close': self.close_from_scheduler}
self.worker_functions = {'getitem': self.getitem_worker,
'getitem-ack': self.getitem_ack,
'status': self.status_to_worker}
log(self.address, 'Start up', self.scheduler)
self._listen_scheduler_thread = Thread(target=self.listen_to_scheduler)
self._listen_scheduler_thread.start()
self._listen_workers_thread = Thread(target=self.listen_to_workers)
self._listen_workers_thread.start()
if self.heartbeat:
self._heartbeat_thread = Thread(target=self.heart,
kwargs={'pulse': heartbeat})
self._heartbeat_thread.event = Event()
self._heartbeat_thread.start()
if block:
self.block()
def status_to_scheduler(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address, 'Status check', header['address'])
self.send_to_scheduler(out_header, 'OK')
def status_to_worker(self, header, payload):
out_header = {'jobid': header.get('jobid')}
log(self.address, 'Status check', header['address'])
self.send_to_worker(header['address'], out_header, 'OK')
def getitem_worker(self, header, payload):
""" Get data and send to another worker
See also:
Worker.collect
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, "Getitem for worker", header, payload)
header2 = {'function': 'getitem-ack',
'jobid': header.get('jobid')}
try:
result = self.data[payload['key']]
header2['status'] = 'OK'
except KeyError as e:
result = e
header2['status'] = 'Bad key'
payload = {'key': payload['key'],
'value': result,
'queue': payload['queue']}
self.send_to_worker(header['address'], header2, payload)
def getitem_ack(self, header, payload):
""" Receive data after sending a getitem request
See also:
Worker.getitem_worker
Worker.collect
"""
with logerrors():
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Getitem ack', payload)
assert header['status'] == 'OK'
self.data[payload['key']] = payload['value']
self.queues[payload['queue']].put(payload['key'])
def getitem_scheduler(self, header, payload):
""" Send local data to scheduler
See also:
Scheduler.gather
Scheduler.getitem_ack
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Get from scheduler', payload)
key = payload['key']
header2 = {'jobid': header.get('jobid')}
try:
result = self.data[key]
header2['status'] = 'OK'
except KeyError as e:
result = e
header2['status'] = 'Bad key'
header2['function'] = 'getitem-ack'
payload2 = {'key': key, 'value': result, 'queue': payload['queue']}
self.send_to_scheduler(header2, payload2)
def setitem(self, header, payload):
""" Assign incoming data to local dictionary
See also:
Scheduler.scatter
Scheduler.send_data
Scheduler.setitem_ack
"""
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Setitem', payload['key'])
key = payload['key']
value = payload['value']
self.data[key] = value
queue = payload.get('queue', False)
if queue:
header2 = {'jobid': header.get('jobid'),
'function': 'setitem-ack'}
payload2 = {'key': key, 'queue': queue}
log(self.address, 'Setitem send ack to scheduler',
header2, payload2)
self.send_to_scheduler(header2, payload2)
def delitem(self, header, payload):
""" Remove item from local data """
loads = header.get('loads', pickle.loads)
payload = loads(payload)
log(self.address, 'Delitem', payload)
key = payload['key']
del self.data[key]
# TODO: this should be replaced with a delitem-ack call
if payload.get('reply', False):
self.send_to_scheduler({'jobid': header.get('jobid')}, 'OK')
def send_to_scheduler(self, header, payload):
""" Send data to scheduler """
log(self.address, 'Send to scheduler', header)
header['address'] = self.address
header['timestamp'] = datetime.utcnow()
dumps = header.get('dumps', pickle_dumps)
with self.lock:
self.to_scheduler.send_multipart([pickle_dumps(header),
dumps(payload)])
def send_to_worker(self, address, header, payload):
""" Send data to workers
This is a bit tricky. We want to have one DEALER socket per worker.
We cache these in ``self.dealers``. If the number of worker peers is
high then we might run into having too many file descriptors open.
Currently we flush the cache of dealers periodically. This has yet to
be tested.
"""
if address not in self.dealers:
if len(self.dealers) > MAX_DEALERS:
for sock in self.dealers.values():
sock.close()
self.dealers.clear()
sock = self.context.socket(zmq.DEALER)
sock.connect(address)
self.dealers[address] = sock
header['address'] = self.address
header['timestamp'] = datetime.utcnow()
log(self.address, 'Send to worker', address, header)
dumps = header.get('dumps', pickle_dumps)
with self.lock:
self.dealers[address].send_multipart([pickle_dumps(header),
dumps(payload)])
def listen_to_scheduler(self):
"""
Event loop listening to commands from scheduler
Header and Payload should deserialize into dicts of the following form:
Header
{'function': name of function to call, see self.functions,
'jobid': job identifier, defaults to None,
'address': name of sender, defaults to zmq identity}
Payload
--Function specific, for setitem might include the following--
{'key': 'x',
'value': 10}
So the minimal request would be as follows:
>>> sock = context.socket(zmq.DEALER) # doctest: +SKIP
>>> sock.connect('tcp://my-address') # doctest: +SKIP
>>> header = {'function': 'status'}
>>> payload = {}
>>> sock.send_multipart(dumps(header), dumps(status)) # doctest: +SKIP
Or a more complex packet might be as follows:
>>> header = {'function': 'setitem', 'jobid': 1}
>>> payload = {'key': 'x', 'value': 10}
>>> sock.send_multipart(dumps(header), dumps(status)) # doctest: +SKIP
We match the function string against ``self.scheduler_functions`` to
pull out the actual function. We then execute this function with the
provided arguments in another thread from ``self.pool``. That function
may then choose to send results back to the sender.
See Also:
listen_to_workers
send_to_scheduler
"""
while self.status != 'closed':
# Wait on request
try:
if not self.to_scheduler.poll(100):
continue
except zmq.ZMQError:
break
with logerrors():
with self.lock:
header, payload = self.to_scheduler.recv_multipart()
header = pickle.loads(header)
log(self.address, 'Receive job from scheduler', header)
try:
function = self.scheduler_functions[header['function']]
except KeyError:
log(self.address, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def listen_to_workers(self):
""" Listen to communications from workers
See ``listen_to_scheduler`` for more in depth docstring
"""
while self.status != 'closed':
# Wait on request
try:
if not self.to_workers.poll(100):
continue
except zmq.ZMQError:
break
with logerrors():
address, header, payload = self.to_workers.recv_multipart()
header = pickle.loads(header)
if 'address' not in header:
header['address'] = address
log(self.address, 'Receive job from worker', address, header)
try:
function = self.worker_functions[header['function']]
except KeyError:
log(self.address, 'Unknown function', header)
else:
future = self.pool.apply_async(function, args=(header, payload))
def block(self):
""" Block until listener threads close
Warning: If some other thread doesn't call `.close()` then, in the
common case you can not easily escape from this.
"""
self._listen_workers_thread.join()
self._listen_scheduler_thread.join()
if self.heartbeat:
self._heartbeat_thread.join()
log('Unblocked')
def collect(self, locations):
""" Collect data from peers
Given a dictionary of desired data and who holds that data
This fires off getitem reqeusts to one of the hosts for each piece of
data then blocks on all of the responses, then inserts this data into
``self.data``.
Example
-------
>>> locations = {'x': ['tcp://alice:5000', 'tcp://bob:5000'],
... 'y': ['tcp://bob:5000']}
>>> worker.collect(locations) # doctest: +SKIP
Protocol
--------
1. Worker creates unique queue
2. For each data this worker chooses a worker at random that holds
that data and fires off a 'getitem' request
{'key': ..., 'queue': ...}
3. Recipient worker handles the request and fires back a 'getitem-ack'
with the data
{'key': ..., 'value': ..., 'queue': ...}
4. Local getitem_ack function adds the value to the local dict and
puts the key in the queue
5. Once all keys have run through the queue the collect function wakes
up again, releases the queue, and returns control
6? This is often called from Worker.compute; control often ends there
See also:
Worker.getitem
Worker.getitem_ack
Worker.compute
Scheduler.trigger_task
"""
socks = []
qkey = str(uuid.uuid1())
queue = Queue()
self.queues[qkey] = queue
# Send out requests for data
log(self.address, 'Collect data from peers', locations)
start = time()
counter = 0
with logerrors():
for key, locs in locations.items():
if key in self.data: # already have this locally
continue
worker = random.choice(tuple(locs)) # randomly select one peer
header = {'jobid': key,
'function': 'getitem'}
payload = {'function': 'getitem',
'key': key,
'queue': qkey}
self.send_to_worker(worker, header, payload)
counter += 1
for i in range(counter):
queue.get()
del self.queues[qkey]
log(self.address, 'Collect finishes', time() - start, 'seconds')
def compute(self, header, payload):
""" Compute dask task
Given a key, task, and locations of data
>>> from operator import add
>>> payload = {'key': 'z',
... 'task': (add, 'x', 'y'),
... 'locations': {'x': ['tcp://alice:5000']},
... 'queue': 'unique-identifier'}
Collect necessary data from locations (see ``collect``),
then compute task and store result into ``self.data``. Finally report
back to the scheduler that we're free.
"""
with logerrors():
# Unpack payload
loads = header.get('loads', pickle.loads)
payload = loads(payload)
locations = payload['locations']
key = payload['key']
task = payload['task']
# Grab data from peers
if locations:
self.collect(locations)
# Do actual work
start = time()
status = "OK"
log(self.address, "Start computation", key, task)
try:
result = core.get(self.data, task)
end = time()
except Exception as e:
status = e
end = time()
else:
self.data[key] = result
log(self.address, "End computation", key, task, status)
# Report finished to scheduler
header2 = {'function': 'finished-task'}
result = {'key': key,
'duration': end - start,
'status': status,
'dependencies': list(locations),
'queue': payload['queue']}
self.send_to_scheduler(header2, result)
def close_from_scheduler(self, header, payload):
log(self.address, 'Close signal from scheduler')
self.close()
def close(self):
with self.lock:
if self.status != 'closed':
self.status = 'closed'
do_close = True
else:
do_close = False
if do_close:
log(self.address, 'Close')
self.status = 'closed'
if self.heartbeat:
self._heartbeat_thread.event.set() # stop heartbeat
for sock in self.dealers.values():
sock.close(linger=1)
self.to_workers.close(linger=1)
self.to_scheduler.close(linger=1)
self.pool.close()
self.pool.join()
self.block()
self.context.destroy(linger=3)
def __del__(self):
self.close()
def heart(self, pulse=5):
"""Send a message to scheduler at a given interval"""
while self.status != 'closed':
header = {'function': 'heartbeat'}
payload = {}
self.send_to_scheduler(header, payload)
self._heartbeat_thread.event.wait(pulse)
def status():
return 'OK'
|
|
#!/usr/bin/env python
#
#===- run-clang-tidy.py - Parallel clang-tidy runner ---------*- python -*--===#
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
# FIXME: Integrate with clang-tidy-diff.py
"""
Parallel clang-tidy runner
==========================
Runs clang-tidy over all files in a compilation database. Requires clang-tidy
and clang-apply-replacements in $PATH.
Example invocations.
- Run clang-tidy on all files in the current working directory with a default
set of checks and show warnings in the cpp files and all project headers.
run-clang-tidy.py $PWD
- Fix all header guards.
run-clang-tidy.py -fix -checks=-*,llvm-header-guard
- Fix all header guards included from clang-tidy and header guards
for clang-tidy headers.
run-clang-tidy.py -fix -checks=-*,llvm-header-guard extra/clang-tidy \
-header-filter=extra/clang-tidy
Compilation database setup:
http://clang.llvm.org/docs/HowToSetupToolingForLLVM.html
"""
from __future__ import print_function
import argparse
import glob
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
import yaml
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def find_compilation_database(path):
"""Adjusts the directory until a compilation database is found."""
result = './'
while not os.path.isfile(os.path.join(result, path)):
if os.path.realpath(result) == '/':
print('Error: could not find compilation database.', file=sys.stderr)
sys.exit(1)
result += '../'
return os.path.realpath(result)
def make_absolute(f, directory):
if os.path.isabs(f):
return f
return os.path.normpath(os.path.join(directory, f))
def get_tidy_invocation(f, clang_tidy_binary, checks, tmpdir, build_path,
header_filter, extra_arg, extra_arg_before, quiet,
config):
"""Gets a command line for clang-tidy."""
start = [clang_tidy_binary]
if header_filter is not None:
start.append('-header-filter=' + header_filter)
else:
# Show warnings in all in-project headers by default.
start.append('-header-filter=^' + build_path + '/.*')
if checks:
start.append('-checks=' + checks)
if tmpdir is not None:
start.append('-export-fixes')
# Get a temporary file. We immediately close the handle so clang-tidy can
# overwrite it.
(handle, name) = tempfile.mkstemp(suffix='.yaml', dir=tmpdir)
os.close(handle)
start.append(name)
for arg in extra_arg:
start.append('-extra-arg=%s' % arg)
for arg in extra_arg_before:
start.append('-extra-arg-before=%s' % arg)
start.append('-p=' + build_path)
if quiet:
start.append('-quiet')
if config:
start.append('-config=' + config)
start.append(f)
return start
def merge_replacement_files(tmpdir, mergefile):
"""Merge all replacement files in a directory into a single file"""
# The fixes suggested by clang-tidy >= 4.0.0 are given under
# the top level key 'Diagnostics' in the output yaml files
mergekey="Diagnostics"
merged=[]
for replacefile in glob.iglob(os.path.join(tmpdir, '*.yaml')):
content = yaml.safe_load(open(replacefile, 'r'))
if not content:
continue # Skip empty files.
merged.extend(content.get(mergekey, []))
if merged:
# MainSourceFile: The key is required by the definition inside
# include/clang/Tooling/ReplacementsYaml.h, but the value
# is actually never used inside clang-apply-replacements,
# so we set it to '' here.
output = { 'MainSourceFile': '', mergekey: merged }
with open(mergefile, 'w') as out:
yaml.safe_dump(output, out)
else:
# Empty the file:
open(mergefile, 'w').close()
def check_clang_apply_replacements_binary(args):
"""Checks if invoking supplied clang-apply-replacements binary works."""
try:
subprocess.check_call([args.clang_apply_replacements_binary, '--version'])
except:
print('Unable to run clang-apply-replacements. Is clang-apply-replacements '
'binary correctly specified?', file=sys.stderr)
traceback.print_exc()
sys.exit(1)
def apply_fixes(args, tmpdir):
"""Calls clang-apply-fixes on a given directory."""
invocation = [args.clang_apply_replacements_binary]
if args.format:
invocation.append('-format')
if args.style:
invocation.append('-style=' + args.style)
invocation.append(tmpdir)
subprocess.call(invocation)
def run_tidy(args, tmpdir, build_path, queue, lock, failed_files):
"""Takes filenames out of queue and runs clang-tidy on them."""
while True:
name = queue.get()
invocation = get_tidy_invocation(name, args.clang_tidy_binary, args.checks,
tmpdir, build_path, args.header_filter,
args.extra_arg, args.extra_arg_before,
args.quiet, args.config)
proc = subprocess.Popen(invocation, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = proc.communicate()
if proc.returncode != 0:
failed_files.append(name)
with lock:
sys.stdout.write(' '.join(invocation) + '\n' + output.decode('utf-8') + '\n')
if len(err) > 0:
sys.stderr.write(err.decode('utf-8') + '\n')
queue.task_done()
def main():
parser = argparse.ArgumentParser(description='Runs clang-tidy over all files '
'in a compilation database. Requires '
'clang-tidy and clang-apply-replacements in '
'$PATH.')
parser.add_argument('-clang-tidy-binary', metavar='PATH',
default='clang-tidy',
help='path to clang-tidy binary')
parser.add_argument('-clang-apply-replacements-binary', metavar='PATH',
default='clang-apply-replacements',
help='path to clang-apply-replacements binary')
parser.add_argument('-checks', default=None,
help='checks filter, when not specified, use clang-tidy '
'default')
parser.add_argument('-config', default=None,
help='Specifies a configuration in YAML/JSON format: '
' -config="{Checks: \'*\', '
' CheckOptions: [{key: x, '
' value: y}]}" '
'When the value is empty, clang-tidy will '
'attempt to find a file named .clang-tidy for '
'each source file in its parent directories.')
parser.add_argument('-header-filter', default=None,
help='regular expression matching the names of the '
'headers to output diagnostics from. Diagnostics from '
'the main file of each translation unit are always '
'displayed.')
parser.add_argument('-export-fixes', metavar='filename', dest='export_fixes',
help='Create a yaml file to store suggested fixes in, '
'which can be applied with clang-apply-replacements.')
parser.add_argument('-j', type=int, default=0,
help='number of tidy instances to be run in parallel.')
parser.add_argument('files', nargs='*', default=['.*'],
help='files to be processed (regex on path)')
parser.add_argument('-fix', action='store_true', help='apply fix-its')
parser.add_argument('-format', action='store_true', help='Reformat code '
'after applying fixes')
parser.add_argument('-style', default='file', help='The style of reformat '
'code after applying fixes')
parser.add_argument('-p', dest='build_path',
help='Path used to read a compile command database.')
parser.add_argument('-extra-arg', dest='extra_arg',
action='append', default=[],
help='Additional argument to append to the compiler '
'command line.')
parser.add_argument('-extra-arg-before', dest='extra_arg_before',
action='append', default=[],
help='Additional argument to prepend to the compiler '
'command line.')
parser.add_argument('-quiet', action='store_true',
help='Run clang-tidy in quiet mode')
args = parser.parse_args()
db_path = 'compile_commands.json'
if args.build_path is not None:
build_path = args.build_path
else:
# Find our database
build_path = find_compilation_database(db_path)
try:
invocation = [args.clang_tidy_binary, '-list-checks']
invocation.append('-p=' + build_path)
if args.checks:
invocation.append('-checks=' + args.checks)
invocation.append('-')
subprocess.check_call(invocation)
except:
print('Unable to run clang-tidy.', file=sys.stderr)
sys.exit(1)
# Load the database and extract all files.
database = json.load(open(os.path.join(build_path, db_path)))
files = [make_absolute(entry['file'], entry['directory'])
for entry in database]
max_task = args.j
if max_task == 0:
max_task = multiprocessing.cpu_count()
tmpdir = None
if args.fix or args.export_fixes:
check_clang_apply_replacements_binary(args)
tmpdir = tempfile.mkdtemp()
# Build up a big regexy filter from all command line arguments.
file_name_re = re.compile('|'.join(args.files))
return_code = 0
try:
# Spin up a bunch of tidy-launching threads.
task_queue = queue.Queue(max_task)
# List of files with a non-zero return code.
failed_files = []
lock = threading.Lock()
for _ in range(max_task):
t = threading.Thread(target=run_tidy,
args=(args, tmpdir, build_path, task_queue, lock, failed_files))
t.daemon = True
t.start()
# Fill the queue with files.
for name in files:
if file_name_re.search(name):
task_queue.put(name)
# Wait for all threads to be done.
task_queue.join()
if len(failed_files):
return_code = 1
except KeyboardInterrupt:
# This is a sad hack. Unfortunately subprocess goes
# bonkers with ctrl-c and we start forking merrily.
print('\nCtrl-C detected, goodbye.')
if tmpdir:
shutil.rmtree(tmpdir)
os.kill(0, 9)
if args.export_fixes:
print('Writing fixes to ' + args.export_fixes + '...')
try:
merge_replacement_files(tmpdir, args.export_fixes)
except:
print('Error exporting fixes.\n', file=sys.stderr)
traceback.print_exc()
return_code=1
if args.fix:
print('Applying fixes...')
try:
apply_fixes(args, tmpdir)
except:
print('Error applying fixes.\n', file=sys.stderr)
traceback.print_exc()
return_code=1
if tmpdir:
shutil.rmtree(tmpdir)
sys.exit(return_code)
if __name__ == '__main__':
main()
|
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tests for Layer1 of DynamoDB v2
"""
import time
from tests.unit import unittest
from boto.dynamodb2 import exceptions
from boto.dynamodb2.layer1 import DynamoDBConnection
class DynamoDBv2Layer1Test(unittest.TestCase):
dynamodb = True
def setUp(self):
self.dynamodb = DynamoDBConnection()
self.table_name = 'test-%d' % int(time.time())
self.hash_key_name = 'username'
self.hash_key_type = 'S'
self.range_key_name = 'date_joined'
self.range_key_type = 'N'
self.read_units = 5
self.write_units = 5
self.attributes = [
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
{
'AttributeName': self.range_key_name,
'AttributeType': self.range_key_type,
}
]
self.schema = [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
]
self.provisioned_throughput = {
'ReadCapacityUnits': self.read_units,
'WriteCapacityUnits': self.write_units,
}
self.lsi = [
{
'IndexName': 'MostRecentIndex',
'KeySchema': [
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
{
'AttributeName': self.range_key_name,
'KeyType': 'RANGE',
},
],
'Projection': {
'ProjectionType': 'KEYS_ONLY',
}
}
]
def create_table(self, table_name, attributes, schema,
provisioned_throughput, lsi=None, wait=True):
# Note: This is a slightly different ordering that makes less sense.
result = self.dynamodb.create_table(
attributes,
table_name,
schema,
provisioned_throughput,
local_secondary_indexes=lsi
)
self.addCleanup(self.dynamodb.delete_table, table_name)
if wait:
while True:
description = self.dynamodb.describe_table(table_name)
if description['Table']['TableStatus'].lower() == 'active':
return result
else:
time.sleep(5)
else:
return result
def test_integrated(self):
result = self.create_table(
self.table_name,
self.attributes,
self.schema,
self.provisioned_throughput,
self.lsi
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Get the data.
record_1 = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
}, consistent_read=True)
self.assertEqual(record_1['Item']['username']['S'], 'johndoe')
self.assertEqual(record_1['Item']['first_name']['S'], 'John')
self.assertEqual(record_1['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
# Now in a batch.
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'jane'},
'first_name': {'S': 'Jane'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056789'},
'friend_count': {'N': '1'},
'friends': {'SS': ['johndoe']},
},
},
},
]
})
# Now a query.
lsi_results = self.dynamodb.query(
self.table_name,
index_name='MostRecentIndex',
key_conditions={
'username': {
'AttributeValueList': [
{'S': 'johndoe'},
],
'ComparisonOperator': 'EQ',
},
},
consistent_read=True
)
self.assertEqual(lsi_results['Count'], 1)
results = self.dynamodb.query(self.table_name, key_conditions={
'username': {
'AttributeValueList': [
{'S': 'jane'},
],
'ComparisonOperator': 'EQ',
},
'date_joined': {
'AttributeValueList': [
{'N': '1366050000'}
],
'ComparisonOperator': 'GT',
}
}, consistent_read=True)
self.assertEqual(results['Count'], 1)
# Now a scan.
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 2)
s_items = sorted([res['username']['S'] for res in results['Items']])
self.assertEqual(s_items, ['jane', 'johndoe'])
self.dynamodb.delete_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
})
results = self.dynamodb.scan(self.table_name)
self.assertEqual(results['Count'], 1)
# Parallel scan (minus client-side threading).
self.dynamodb.batch_write_item({
self.table_name: [
{
'PutRequest': {
'Item': {
'username': {'S': 'johndoe'},
'first_name': {'S': 'Johann'},
'last_name': {'S': 'Does'},
'date_joined': {'N': '1366058000'},
'friend_count': {'N': '1'},
'friends': {'SS': ['jane']},
},
},
'PutRequest': {
'Item': {
'username': {'S': 'alice'},
'first_name': {'S': 'Alice'},
'last_name': {'S': 'Expert'},
'date_joined': {'N': '1366056800'},
'friend_count': {'N': '2'},
'friends': {'SS': ['johndoe', 'jane']},
},
},
},
]
})
time.sleep(20)
results = self.dynamodb.scan(self.table_name, segment=0, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
results = self.dynamodb.scan(self.table_name, segment=1, total_segments=2)
self.assertTrue(results['Count'] in [1, 2])
def test_without_range_key(self):
result = self.create_table(
self.table_name,
[
{
'AttributeName': self.hash_key_name,
'AttributeType': self.hash_key_type,
},
],
[
{
'AttributeName': self.hash_key_name,
'KeyType': 'HASH',
},
],
self.provisioned_throughput
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friends': {'SS': ['alice', 'bob', 'jane']},
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Now try a range-less get.
johndoe = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
}, consistent_read=True)
self.assertEqual(johndoe['Item']['username']['S'], 'johndoe')
self.assertEqual(johndoe['Item']['first_name']['S'], 'John')
self.assertEqual(johndoe['Item']['friends']['SS'], [
'alice', 'bob', 'jane'
])
def test_throughput_exceeded_regression(self):
tiny_tablename = 'TinyThroughput'
tiny = self.create_table(
tiny_tablename,
self.attributes,
self.schema,
{
'ReadCapacityUnits': 1,
'WriteCapacityUnits': 1,
}
)
self.dynamodb.put_item(tiny_tablename, {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
})
self.dynamodb.put_item(tiny_tablename, {
'username': {'S': 'jane'},
'first_name': {'S': 'Jane'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056669'},
})
self.dynamodb.put_item(tiny_tablename, {
'username': {'S': 'alice'},
'first_name': {'S': 'Alice'},
'last_name': {'S': 'Expert'},
'date_joined': {'N': '1366057000'},
})
time.sleep(20)
for i in range(100):
# This would cause an exception due to a non-existant instance variable.
self.dynamodb.scan(tiny_tablename)
def test_recursive(self):
result = self.create_table(
self.table_name,
self.attributes,
self.schema,
self.provisioned_throughput,
self.lsi
)
self.assertEqual(
result['TableDescription']['TableName'],
self.table_name
)
description = self.dynamodb.describe_table(self.table_name)
self.assertEqual(description['Table']['ItemCount'], 0)
# Create some records with one being a recursive shape.
record_1_data = {
'username': {'S': 'johndoe'},
'first_name': {'S': 'John'},
'last_name': {'S': 'Doe'},
'date_joined': {'N': '1366056668'},
'friend_count': {'N': '3'},
'friend_data': {'M': {'username': {'S': 'alice'},
'friend_count': {'N': '4'}}}
}
r1_result = self.dynamodb.put_item(self.table_name, record_1_data)
# Get the data.
record_1 = self.dynamodb.get_item(self.table_name, key={
'username': {'S': 'johndoe'},
'date_joined': {'N': '1366056668'},
}, consistent_read=True)
self.assertEqual(record_1['Item']['username']['S'], 'johndoe')
self.assertEqual(record_1['Item']['first_name']['S'], 'John')
recursive_data = record_1['Item']['friend_data']['M']
self.assertEqual(recursive_data['username']['S'], 'alice')
self.assertEqual(recursive_data['friend_count']['N'], '4')
|
|
# yellowbrick.classifier.class_prediction_error
# Shows the balance of classes and their associated predictions.
#
# Author: Larry Gray
# Author: Benjamin Bengfort
# Created: Fri Jul 20 10:26:25 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: class_prediction_error.py [] lwgray@gmail.com $
"""
Shows the balance of classes and their associated predictions.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from sklearn.utils.multiclass import unique_labels
from yellowbrick.draw import bar_stack
from yellowbrick.classifier.base import ClassificationScoreVisualizer
from yellowbrick.exceptions import ModelError, YellowbrickValueError, NotFitted
try:
# See #1124: this allows compatibility for scikit-learn >= 0.20
from sklearn.metrics._classification import _check_targets
except ImportError:
from sklearn.metrics.classification import _check_targets
##########################################################################
## Class Prediction Error Chart
##########################################################################
class ClassPredictionError(ClassificationScoreVisualizer):
"""
Class Prediction Error chart that shows the support for each class in the
fitted classification model displayed as a stacked bar. Each bar is segmented
to show the distribution of predicted classes for each class. It is initialized
with a fitted model and generates a class prediction error chart on draw.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
kwargs : dict
Keyword arguments passed to the visualizer base classes.
Attributes
----------
classes_ : ndarray of shape (n_classes,)
The class labels observed while fitting.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting.
score_ : float
An evaluation metric of the classifier on test data produced when
``score()`` is called. This metric is between 0 and 1 -- higher scores are
generally better. For classifiers, this score is usually accuracy, but
ensure you check the underlying model for more details about the score.
predictions_ : ndarray
An ndarray of predictions whose rows are the true classes and
whose columns are the predicted classes
"""
def __init__(
self,
estimator,
ax=None,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
**kwargs
):
super(ClassPredictionError, self).__init__(
estimator,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
def score(self, X, y):
"""
Generates a 2D array where each row is the count of the
predicted classes and each column is the true class
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features
y : ndarray or Series of length n
An array or series of target or class values
Returns
-------
score_ : float
Global accuracy score
"""
# Must be computed before calling super
# We're relying on predict to raise NotFitted
y_pred = self.predict(X)
y_type, y_true, y_pred = _check_targets(y, y_pred)
if y_type not in ("binary", "multiclass"):
raise YellowbrickValueError("{} is not supported".format(y_type))
# Get the indices of the unique labels
indices = unique_labels(y_true, y_pred)
labels = self._labels()
# Call super to compute self.score_ and verify classes
try:
super(ClassPredictionError, self).score(X, y)
except ModelError as e:
# raise visualizer-specific errors
if labels is not None and len(labels) < len(indices):
raise NotImplementedError(
"filtering classes is currently not supported"
)
else:
raise e
# Ensure all labels are used
if labels is not None and len(labels) > len(indices):
raise ModelError(
"y and y_pred contain zero values for one of the specified classes"
)
# Create a table of predictions whose rows are the true classes
# and whose columns are the predicted classes; each element
# is the count of predictions for that class that match the true
# value of that class.
self.predictions_ = np.array(
[
[(y_pred[y == label_t] == label_p).sum() for label_p in indices]
for label_t in indices
]
)
self.draw()
return self.score_
def draw(self):
"""
Renders the class prediction error across the axis.
Returns
-------
ax : Matplotlib Axes
The axes on which the figure is plotted
"""
if not hasattr(self, "predictions_") or not hasattr(self, "classes_"):
raise NotFitted.from_estimator(self, "draw")
legend_kws = {"bbox_to_anchor": (1.04, 0.5), "loc": "center left"}
bar_stack(
self.predictions_,
self.ax,
labels=list(self.classes_),
ticks=self.classes_,
colors=self.class_colors_,
legend_kws=legend_kws,
)
return self.ax
def finalize(self, **kwargs):
"""
Adds a title and axis labels to the visualizer, ensuring that the
y limits zoom the visualization in to the area of interest. Finalize
also calls tight layout to ensure that no parts of the figure are
cut off.
Notes
-----
Generally this method is called from show and not directly by the user.
"""
# Set the title
self.set_title("Class Prediction Error for {}".format(self.name))
# Set the axes labels
self.ax.set_xlabel("actual class")
self.ax.set_ylabel("number of predicted class")
# Compute the ceiling for the y limit
cmax = max([sum(predictions) for predictions in self.predictions_])
self.ax.set_ylim(0, cmax + cmax * 0.1)
# Ensure the legend fits on the figure
self.fig.tight_layout(rect=[0, 0, 0.90, 1])
##########################################################################
## Quick Method
##########################################################################
def class_prediction_error(
estimator,
X_train,
y_train,
X_test=None,
y_test=None,
ax=None,
classes=None,
encoder=None,
is_fitted="auto",
force_model=False,
show=True,
**kwargs
):
"""Class Prediction Error
Divides the dataset X and y into train and test splits, fits the model on the train
split, then scores the model on the test split. The visualizer displays the support
for each class in the fitted classification model displayed as a stacked bar plot.
Each bar is segmented to show the distribution of predicted classes for each class.
Parameters
----------
estimator : estimator
A scikit-learn estimator that should be a classifier. If the model is
not a classifier, an exception is raised. If the internal model is not
fitted, it is fit when the visualizer is fitted, unless otherwise specified
by ``is_fitted``.
X_train : ndarray or DataFrame of shape n x m
A feature array of n instances with m features the model is trained on.
Used to fit the visualizer and also to score the visualizer if test splits are
not directly specified.
y_train : ndarray or Series of length n
An array or series of target or class values. Used to fit the visualizer and
also to score the visualizer if test splits are not specified.
X_test : ndarray or DataFrame of shape n x m, default: None
An optional feature array of n instances with m features that the model
is scored on if specified, using X_train as the training data.
y_test : ndarray or Series of length n, default: None
An optional array or series of target or class values that serve as actual
labels for X_test for scoring purposes.
ax : matplotlib Axes, default: None
The axes to plot the figure on. If not specified the current axes will be
used (or generated if required).
classes : list of str, defult: None
The class labels to use for the legend ordered by the index of the sorted
classes discovered in the ``fit()`` method. Specifying classes in this
manner is used to change the class names to a more specific format or
to label encoded integer classes. Some visualizers may also use this
field to filter the visualization for specific classes. For more advanced
usage specify an encoder rather than class labels.
encoder : dict or LabelEncoder, default: None
A mapping of classes to human readable labels. Often there is a mismatch
between desired class labels and those contained in the target variable
passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch
ensuring that classes are labeled correctly in the visualization.
is_fitted : bool or str, default="auto"
Specify if the wrapped estimator is already fitted. If False, the estimator
will be fit when the visualizer is fit, otherwise, the estimator will not be
modified. If "auto" (default), a helper method will check if the estimator
is fitted before fitting it again.
force_model : bool, default: False
Do not check to ensure that the underlying estimator is a classifier. This
will prevent an exception when the visualizer is initialized but may result
in unexpected or unintended behavior.
show: bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs: dict
Keyword arguments passed to the visualizer base classes.
Returns
-------
viz : ClassPredictionError
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
viz = ClassPredictionError(
estimator=estimator,
ax=ax,
classes=classes,
encoder=encoder,
is_fitted=is_fitted,
force_model=force_model,
**kwargs
)
# Fit the visualizer (calls draw)
viz.fit(X_train, y_train, **kwargs)
# Score the visualizer
if X_test is not None and y_test is not None:
viz.score(X_test, y_test)
elif X_test is not None or y_test is not None:
raise YellowbrickValueError("must specify both X_test and y_test or neither")
else:
viz.score(X_train, y_train)
# Draw the final visualization
if show:
viz.show()
else:
viz.finalize()
# Return the visualizer
return viz
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ssd_mobilenet_v2_feature_extractor."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.models import ssd_feature_extractor_test
from object_detection.models import ssd_mobilenet_v2_feature_extractor
from object_detection.models import ssd_mobilenet_v2_keras_feature_extractor
slim = tf.contrib.slim
@parameterized.parameters(
{'use_keras': False},
{'use_keras': True},
)
class SsdMobilenetV2FeatureExtractorTest(
ssd_feature_extractor_test.SsdFeatureExtractorTestBase):
def _create_feature_extractor(self, depth_multiplier, pad_to_multiple,
use_explicit_padding=False, use_keras=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
Returns:
an ssd_meta_arch.SSDFeatureExtractor object.
"""
min_depth = 32
if use_keras:
return (ssd_mobilenet_v2_keras_feature_extractor.
SSDMobileNetV2KerasFeatureExtractor(
is_training=False,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams=self._build_conv_hyperparams(),
freeze_batchnorm=False,
inplace_batchnorm_update=False,
use_explicit_padding=use_explicit_padding,
name='MobilenetV2'))
else:
return ssd_mobilenet_v2_feature_extractor.SSDMobileNetV2FeatureExtractor(
False,
depth_multiplier,
min_depth,
pad_to_multiple,
self.conv_hyperparams_fn,
use_explicit_padding=use_explicit_padding)
def test_extract_features_returns_correct_shapes_128(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_128_explicit_padding(
self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_explicit_padding=True,
use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_dynamic_inputs(
self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 8, 8, 576), (2, 4, 4, 1280),
(2, 2, 2, 512), (2, 1, 1, 256),
(2, 1, 1, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shapes_with_dynamic_inputs(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_299(self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_enforcing_min_depth(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 0.5**12
pad_to_multiple = 1
expected_feature_map_shape = [(2, 19, 19, 192), (2, 10, 10, 32),
(2, 5, 5, 32), (2, 3, 3, 32),
(2, 2, 2, 32), (2, 1, 1, 32)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_returns_correct_shapes_with_pad_to_multiple(
self, use_keras):
image_height = 299
image_width = 299
depth_multiplier = 1.0
pad_to_multiple = 32
expected_feature_map_shape = [(2, 20, 20, 576), (2, 10, 10, 1280),
(2, 5, 5, 512), (2, 3, 3, 256),
(2, 2, 2, 256), (2, 1, 1, 128)]
self.check_extract_features_returns_correct_shape(
2, image_height, image_width, depth_multiplier, pad_to_multiple,
expected_feature_map_shape, use_keras=use_keras)
def test_extract_features_raises_error_with_invalid_image_size(
self, use_keras):
image_height = 32
image_width = 32
depth_multiplier = 1.0
pad_to_multiple = 1
self.check_extract_features_raises_error_with_invalid_image_size(
image_height, image_width, depth_multiplier, pad_to_multiple,
use_keras=use_keras)
def test_preprocess_returns_correct_value_range(self, use_keras):
image_height = 128
image_width = 128
depth_multiplier = 1
pad_to_multiple = 1
test_image = np.random.rand(4, image_height, image_width, 3)
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(test_image)
self.assertTrue(np.all(np.less_equal(np.abs(preprocessed_image), 1.0)))
def test_variables_only_created_in_scope(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
scope_name = 'MobilenetV2'
self.check_feature_extractor_variables_under_scope(
depth_multiplier, pad_to_multiple, scope_name, use_keras=use_keras)
def test_variable_count(self, use_keras):
depth_multiplier = 1
pad_to_multiple = 1
variables = self.get_feature_extractor_variables(
depth_multiplier, pad_to_multiple, use_keras=use_keras)
self.assertEqual(len(variables), 292)
def test_has_fused_batchnorm(self, use_keras):
image_height = 40
image_width = 40
depth_multiplier = 1
pad_to_multiple = 1
image_placeholder = tf.placeholder(tf.float32,
[1, image_height, image_width, 3])
feature_extractor = self._create_feature_extractor(depth_multiplier,
pad_to_multiple,
use_keras=use_keras)
preprocessed_image = feature_extractor.preprocess(image_placeholder)
if use_keras:
_ = feature_extractor(preprocessed_image)
else:
_ = feature_extractor.extract_features(preprocessed_image)
self.assertTrue(any(op.type == 'FusedBatchNorm'
for op in tf.get_default_graph().get_operations()))
if __name__ == '__main__':
tf.test.main()
|
|
# A comprehensive implementation of Attention Mechanism for Neural Networks
# Supporting:
# * Multi-head attention
# * Self-attention mechanism
# * Using more odin.backend function to make it easier transfer between
# tensorflow and pytorch
#
# Some suggestion for designing attention-based model:
# * Attention First, Feedforward Later (the sandwich design) is suggested in
# (Press et al. 2019), more attention layers at the bottom, and more
# feed-forward layer at the end.
# * Balancing the number of self-attention and feedforward sublayers appears
# to be a desirable property
# * Use scale `1/sqrt(dim)` for dot-product attention (Vaswani et al. 2017).
# *
#
# References:
# Bahdanau, D., et al., 2014. Neural Machine Translation by Jointly Learning
# to Align and Translate. arXiv:1409.0473 [cs, stat].
# Graves, A., et al., 2014. Neural Turing Machines.
# arXiv:1410.5401 [cs].
# Xu, K., et al., 2015. Show, Attend and Tell: Neural Image Caption Generation
# with Visual Attention. arXiv:1502.03044 [cs].
# Luong, M.T., et al., 2015. Effective Approaches to Attention-based Neural
# Machine Translation. arXiv:1508.04025 [cs].
# Cheng, J., et al., 2016. Long Short-Term Memory-Networks for Machine Reading.
# arXiv:1601.06733 [cs].
# Kim, Y., et al., 2017. Structured Attention Networks.
# arXiv:1702.00887 [cs].
# Vaswani, A., et al., 2017. Attention Is All You Need.
# arXiv:1706.03762 [cs].
# Mishra, N., et al., 2018. A Simple Neural Attentive Meta-Learner.
# arXiv:1707.03141 [cs, stat].
# Wang, Y., et al., 2019. Transformer-based Acoustic Modeling for Hybrid
# Speech Recognition. arXiv:1910.09799 [cs, eess].
# Park, K., 2019. github.com/Kyubyong/transformer
# Alexander H. Liu, 2019. github.com/Alexander-H-Liu/End-to-end-ASR-Pytorch
# Macar O.U., 2019. https://github.com/uzaymacar/attention-mechanisms
# Press, O., Smith, N.A., Levy, O., n.d. 2019. Improving Transformer Models by
# Reordering their Sublayers 8.
from __future__ import absolute_import, division, print_function
import warnings
from enum import IntFlag
from enum import auto as enum_auto
from functools import partial
from odin import backend as bk
from odin import bay
from odin.utils import as_tuple
def _split_and_concat(x, num_heads):
return bk.stack(bk.split(x, num_heads, axis=-1), axis=0)
def _get_num_heads(query):
r""" return the number of attention heads.
return 0 if no multi-heads attention applied """
ndim = query.ndim if hasattr(query, 'ndim') else query.shape.ndims
if ndim == 3:
num_heads = 0
else: # multi-heads attention
num_heads = query.shape[0]
return num_heads
def create_attention_heads(input_dim,
num_heads=2,
depth=1,
use_bias=True,
activation='relu'):
r""" Create multi-heads attention projection
`[batch_size, Tq, dim]` to `[num_heads, batch_size, Tq, dim]`
"""
num_heads = int(num_heads)
depth = int(depth)
if num_heads > 1 and depth > 0:
use_bias = as_tuple(use_bias, N=depth, t=bool)
activation = as_tuple(activation, N=depth)
layers = [
bk.nn.Dense(input_dim * num_heads, use_bias=bias, activation=activ)
for bias, activ in zip(use_bias, activation)
]
layers.append(bk.nn.Lambda(partial(_split_and_concat, num_heads=num_heads)))
return bk.nn.Sequential(layers)
else:
return bk.nn.Identity()
class AttentionMechanism(IntFlag):
r""" The taxomony of all attention
The meaning of `query`, `value` and `key` depend on the application. In the
case of text similarity, for example, `query` is the sequence embeddings of
the first piece of text and `value` is the sequence embeddings of the second
piece of text. Hence, the attention determines alignment between `query` and
`value`, `key` is usually the same tensor as value.
A mapping from `query` to `key` will be learned during the attention.
To use this method in your attention layer, follow the steps:
* Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
`[batch_size, Tv]` to calculate the attention `scores`.
* Pass `scores` and `value` tensors to this method. The method applies
`scores_mask`, calculates `attention_distribution = softmax(scores)`, then
returns `matmul(attention_distribution, value).
* Apply `query_mask` and return the result.
The following method call order is recommended:
* `validate`: make the no duplicated steps stored in the `AttentionMechanism`
* `prepare`: prepare the query, key, value and masks according to the given
mechanism.
* `score`: get the attention scores given the query and the key
* `normalize` (optional): normalize the multi-heads attention scores.
* `align`: create attention distribution, use this distribution to align
the query and the value
All family of attention mechanism is summarized into follow
a hierarchical structure, the order is as follow:
The input space of the attention mechansim:
- `Intra` (a.k.a. self-attention):
- `Inter`:
The attending positions within the input space:
- `PosGlobal`: global attention
- `PosLocalM`: local monotonic positioning
- `PosLocalP`: local predictive positioning
The alignment of the position:
- `AlignSoft`:
- `AlignRelax`: using gumble softmax for "relaxed" hard attention
- `AlignHard`:
The score function in which the attention logits are calculated:
- `ScoreLocation`:
- `ScoreAdditive`:
- `ScoreDotProd`:
- `ScoreCosine`:
- `ScoreGeneral`:
Since many studies try to group attention algorithm into categories, we take
a more flexbile approach that allow a random path passing through each stage
to create the final algorithm, e.g.
- `Intra` to `PosGlobal` to `AlignSoft` to `ScoreLocation`
- `Inter` to `PosGlobal` to `AlignHard` to `ScoreConcat`
and so on.
# TODO:
* Down sampled multihead attention
* Sparse attention
"""
# ====== input space ====== #
Intra = enum_auto() # a.k.a. self-attention
Inter = enum_auto() # a.k.a. inter-attention
# ====== attending positions ====== #
PosGlobal = enum_auto()
PosLocalM = enum_auto() # local monotonic
PosLocalP = enum_auto() # local predictive
# ====== alignment function ====== #
AlignSoft = enum_auto()
AlignHard = enum_auto()
AlignRelax = enum_auto()
# ====== alignment score function ====== #
ScoreLocation = enum_auto()
ScoreAdditive = enum_auto()
ScoreDotProd = enum_auto()
ScoreCosine = enum_auto()
ScoreGeneral = enum_auto()
def __or__(self, other):
# delete the duplicated bit, then setting the new bit
att = super().__or__(other)
for group in _GROUPS:
if other in group:
for g in group:
if g == other:
continue
att = att & ~g
break
return att
def __str__(self):
text = super().__str__()
text = text.replace(self.__class__.__name__ + '.', '')
return text
@property
def is_self_attention(self):
r""" self-attention is intra-attention, in contrast to inter-attention
which determines the alignment between two different sequences. """
self.validate()
if Intra in self:
return True
return False
@property
def is_soft_attention(self):
return AlignSoft in self
@property
def is_hard_attention(self):
return AlignSoft not in self
def validate(self):
def count_and_check(groups):
duplication = [g for g in groups if g in self]
c = len(duplication)
if c == 0:
raise ValueError(
"The created mechanism must contain one of the following: %s" %
', '.join([str(g) for g in groups]))
elif c > 1:
raise ValueError(
"The created mechanism contain duplicated methods of the same stage: %s"
% ', '.join([str(g) for g in duplication]))
for g in _GROUPS:
count_and_check(g)
return self
def prepare(self, query, key=None, value=None, mask=None):
r""" Preparing input for attention model
Returns:
query: Query (or target sequence) tensor of shape `[batch_size, Tq, dim]`.
key: Key (or source sequence) tensor of shape `[batch_size, Tv, dim]`.
value: Value (or source sequence) tensor of shape `[batch_size, Tv, dim]`.
mask: list of the following
* query_mask: A boolean mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
* value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
"""
# by default, if key is not provide, using value
query = bk.array(query, ignore_none=True)
key = bk.array(key, ignore_none=True)
value = bk.array(value, ignore_none=True)
# ====== check if intra-attention ====== #
if self.is_self_attention:
if (key is not None or value is not None):
warnings.warn(
"Self-attention (intra-attention) need only query, "
"ignore provided key and value",
category=UserWarning)
if key is not None:
key = query
if value is not None:
value = query
### inter-attention
else:
if key is None:
key = value
if value is None: # value must always provided
raise RuntimeError("value must be given of inter-sequences attention.")
# ====== masks ====== #
if self.is_self_attention: # only 1 mask is need
if isinstance(mask, (tuple, list)):
q_mask = mask[0]
else:
q_mask = mask
v_mask = None
else:
q_mask = mask[0] if mask else None
v_mask = mask[1] if mask else None
if v_mask is not None:
if v_mask.shape[1] != value.shape[1]:
raise RuntimeError(
"Value mask has time dimension %d, but value has time dimension %d"
% (v_mask.shape[1], value.shape[1]))
# ====== return ====== #
return query, key, value, \
bk.array(q_mask, ignore_none=True), bk.array(v_mask, ignore_none=True)
def normalize(self, scores):
r""" Normalize attention scores using "fro"-norm that encouraging diversity
among attention heads math::`P = ||A^T*A - I||_2^2` (Kim et al. 2017)
Arguments:
scores: Tensor with shape `[batch_size * num_heads, Tq, Tv]`
"""
# it is easier to assume there is always 1-head at least
num_heads = _get_num_heads(scores)
if num_heads == 0:
return bk.cast(0., scores.dtype)
# [batch_size, num_heads, Tq * Tv]
scoresT = bk.swapaxes(bk.reshape(scores, shape=([0], [1], -1)), 0, 1)
# [batch_size, Tq * Tv, num_heads]
scores = bk.swapaxes(scoresT, 1, 2)
# [batch_size, num_heads, num_heads]
A = bk.matmul(scoresT, scores)
# [batch_size, num_heads, num_heads]
I = bk.eye(num_heads, dtype=A.dtype)
I = bk.expand_dims(I, axis=0)
I = bk.tile(I, reps=A.shape[0], axis=0)
# normalized
P = bk.norm(A - I, p="fro")**2
return P
def score(self,
query,
key=None,
scale=1,
window_width=None,
q_proj=None,
target_proj=None):
r"""
Arguments:
query: Query (or target sequence) tensor of shape
`[batch_size, Tq, dim]` or `[num_heads, batch_size, Tq, dim]` in case
of multi-heads attention.
key: Key (or source sequence) tensor of shape
`[batch_size, Tv, dim]` or `[num_heads, batch_size, Tv, dim]` in case
of multi-heads attention.
scale: single `Scalar` or `Tensor` of shape `[dim]` for scaling
the attention scores, suggested `1/sqrt(dim)` in (Vaswani et al. 2017).
window_width : `None`, `Integer` or `Float` ([0, 1]). The total number of
frames for a single window in local attention (i.e. `left + 1 + right`)
Can be given as a fixed number of frames (`int`), or percentage of
the sequence length (`float`). If `None`, use `Tq`
q_proj : `Dense`, instance of dense or fully connected layer
- for `ScoreLocation`, the number of hidden unit is `1`
- for `ScoreGeneral`, the number of hidden unit is `dim`
target_proj : `Dense`, for predictive local attention, applying
a fully connected network on target sequence (i.e. the query) to
predict the position on source sequence (i.e. the key).
The layer must has output dimension equal to 1 and return logit value.
Returns:
Tensor of shape `[num_heads, batch_size, Tq, Tv]`, or
`[num_heads, batch_size, Tq, 1]` if `ScoreLocation`
"""
### Check if multi-head attention is used
num_heads = _get_num_heads(query)
if num_heads > 0:
query = bk.reshape(query, [-1] + [i for i in query.shape[2:]])
if key is not None:
key = bk.reshape(key, [-1] + [i for i in key.shape[2:]])
Tq = query.shape[1]
Tv = Tq if key is None else key.shape[1]
# scale shape is `[]` or `[dim]`
scale = bk.array(scale, dtype=query.dtype)
### Check the window width
if window_width is None:
window_width = Tq
elif window_width < 1:
window_width = window_width * Tv
window_width = int(window_width)
### Locative attention
if AttentionMechanism.ScoreLocation in self:
if PosLocalM in self or PosLocalP in self:
raise NotImplementedError(
"ScoreLocation only support Global attention, but given: %s" %
str(self))
# [batch_size * num_heads, Tq, dim]
scores = bk.reduce_mean(scale) * q_proj(query)
assert scores.shape[-1] == 1, \
" q_proj must have only 1 hidden unit, but given %d" % scores.shape[-1]
### Other score mode need the key tensor
else:
if key is None:
raise ValueError("key must be provided for attention type: %s" %
str(self))
### Attention position (local or global)
if PosLocalM in self:
key = key[:, -window_width:]
elif PosLocalP in self:
pt = bk.sigmoid(target_proj(bk.reshape(query, ([0], -1))))
assert pt.shape[-1] == 1, \
"target_proj must project the query [., Tq * dim] to [., 1], i.e. " + \
"predicting the attention position on source sequence using " + \
"knowledge from target sequence."
pt = Tv * pt # `[batch_size * num_heads, 1]`
# `[batch_size * num_heads, Tv]`
# Eq (10) (Luong et al. 2015)
gauss_est = bk.exp(-bk.square(bk.arange(Tv, dtype=pt.dtype) - pt) /
(2 * bk.square(window_width / 2)))
# `[batch_size * num_heads, 1, Tv]`
gauss_est = bk.expand_dims(gauss_est, axis=1)
### Additive or concat method
if AttentionMechanism.ScoreAdditive in self:
# [batch_size * num_heads, Tq, 1, dim]
q = bk.expand_dims(query, axis=2)
# [batch_size * num_heads, 1, Tv, dim]
k = bk.expand_dims(key, axis=1)
# [batch_size * num_heads, Tq, Tv]
scores = bk.reduce_sum(scale * bk.tanh(q + k), axis=-1)
### Dot product or multiplicative scoring
elif AttentionMechanism.ScoreDotProd in self:
# this is a trick to make attention_scale broadcastable when
# scale_tied=False
scores = bk.matmul(scale * query, bk.swapaxes(key, 1, 2))
### cosine scoring
elif AttentionMechanism.ScoreCosine in self:
# [batch_size * num_heads, Tq, 1, dim]
q = bk.expand_dims(query, axis=2)
# [batch_size * num_heads, 1, Tv, dim]
k = bk.expand_dims(key, axis=1)
# [batch_size * num_heads, Tq, Tv, dim]
scores = (q * k) / (bk.norm(q, p=2) * bk.norm(k, p=2))
scores = bk.reduce_sum(scale * scores, axis=-1, keepdims=False)
### general method with only project on the query
elif AttentionMechanism.ScoreGeneral in self:
query = q_proj(query)
assert query.shape[-1] == key.shape[-1], \
" q_proj must have %d hidden units, but given %d units" % \
(key.shape[-1], query.shape[-1])
scores = bk.matmul(scale * query, bk.swapaxes(key, 1, 2))
else:
raise NotImplementedError("No support for attention_type='%s'" %
str(self))
### applying the local-predictive attention
if PosLocalP in self:
scores = scores * gauss_est
### get back the multi-heads shape
if num_heads > 0:
scores = bk.reshape(scores,
shape=[num_heads, -1] + [i for i in scores.shape[1:]])
return scores
def align(self,
scores,
value,
query=None,
q_mask=None,
v_mask=None,
causal=False,
residual=False,
dropout=0,
temporal_dropout=False,
sample_shape=1,
temperature=0.5,
training=None):
r"""Applies attention scores to the given value tensor.
Arguments:
scores: Attention Scores float tensor of shape
`[num_heads, batch_size, Tq, Tv]`.
value: Value (or source sequence) tensor of shape
`[num_heads, batch_size, Tv, dim]`.
query: Query (or target sequence) tensor of shape
`[num_heads, batch_size, Tq, dim]`.
q_mask: A boolean query mask `Tensor` of shape `[batch_size, Tq]`.
If given, the output will be zero at the positions where
`mask==False`.
v_mask: A boolean value mask `Tensor` of shape `[batch_size, Tv]`.
If given, will apply the mask such that values at positions where
`mask==False` do not contribute to the result.
dropout : Float. Dropout probability of the attention scores.
temporal_dropout : Boolean. If `True`, using the same dropout mask along
temporal axis (i.e. the 1-st dimension)
sample_shape (`Integer`) : number of mcmc samples for estimating the gradient
of hard attention
temperature: An 0-D `Tensor`, representing the temperature
of a set of RelaxedOneHotCategorical distributions. The temperature
should be positive.
Returns:
attended sequence: Tensor of shape
* `[sample_shape, num_heads, batch_size, Tq, dim]` for (hard + multi-heads)
* `[sample_shape, batch_size, Tq, dim]` for (hard + no-head)
* `[num_heads, batch_size, Tq, dim]` for (soft + multi-heads)
* `[batch_size, Tq, dim]` for (soft + no-head)
attention distribution : for soft attention, return Tensor of shape
* `[num_heads, batch_size, Tq]` for self-attention
* `[num_heads, batch_size, Tq, Tv]` for inter-attention.
for hard attention, return one-hot categorical distribution of shape
* `[sample_shape, num_heads, batch_size, Tq]` for self-attention
* `[sample_shape, num_heads, batch_size, Tq, Tv]` for inter-attention.
if multi-heads attention wasn't used, omit the `[num_heads]`.
"""
num_heads = _get_num_heads(scores)
if num_heads == 0:
Tq = scores.shape[1]
Tv = scores.shape[2]
else:
Tq = scores.shape[2]
Tv = scores.shape[3]
if value is None:
if query is None:
raise ValueError("both query and value are None, "
"at least one of them must be given")
value = query
# ====== Causal mask ====== #
if causal:
# Creates a lower triangular mask, so position i cannot attend to
# positions j>i. This prevents the flow of information from the future
# into the past.
scores_shape = scores.shape
# causal_mask_shape = [1, Tq, Tv].
causal_mask_shape = bk.concatenate(
[bk.ones_like(scores_shape[:-2]), scores_shape[-2:]], axis=0)
causal_mask = bk.tril_mask(causal_mask_shape)
else:
causal_mask = None
if v_mask is not None:
# LocalM applied
if PosLocalM in self:
v_mask = v_mask[:, -Tv:]
# Mask of shape [batch_size, 1, Tv].
v_mask = bk.expand_dims(v_mask, axis=-2)
v_mask = bk.cast(v_mask, 'bool')
if num_heads > 0:
v_mask = bk.expand_dims(v_mask, axis=0)
scores_mask = bk.logical_and(v_mask, causal_mask)
### applying the scores mask
if scores_mask is not None:
padding_mask = bk.logical_not(scores_mask)
# Bias so padding positions do not contribute to attention distribution.
scores -= 1.e9 * bk.cast(padding_mask, dtype=scores.dtype)
# ====== convert attention score to distribution ====== #
# if the last dimension is 1, no point for applying softmax, hence,
# softmax to the second last dimension
### soft attention
if AlignSoft in self:
attention_distribution = bk.softmax(
scores, axis=-2 if scores.shape[-1] == 1 else -1)
### relaxed hard attention
elif AlignRelax in self:
attention_distribution = bay.distributions.RelaxedOneHotCategorical(
temperature=temperature,
logits=bk.squeeze(scores, axis=-1)
if scores.shape[-1] == 1 else scores)
fsample = partial(bay.Distribution.sample, sample_shape=sample_shape)
attention_distribution = bay.coercible_tensor(
attention_distribution, convert_to_tensor_fn=fsample)
### hard attention
elif AlignHard in self:
attention_distribution = bay.distributions.OneHotCategorical(
logits=bk.squeeze(scores, axis=-1)
if scores.shape[-1] == 1 else scores,
dtype=value.dtype)
fsample = partial(bay.Distribution.sample, sample_shape=sample_shape)
attention_distribution = bay.coercible_tensor(
attention_distribution, convert_to_tensor_fn=fsample)
# ====== dropout the attention scores ====== #
attention = bk.dropout(attention_distribution,
p_drop=dropout,
axis=1 if temporal_dropout else None,
training=training and dropout > 0)
# ====== applying the attention ====== #
if self.is_self_attention and ScoreLocation in self:
result = bk.expand_dims(bk.array(attention), axis=-1) * value \
if attention.shape[-1] != 1 else \
attention * value
else:
if PosLocalM in self:
value = value[:, -Tv:] if num_heads == 0 else value[:, :, -Tv:]
result = bk.matmul(attention, value)
# ====== applying the Query mask ====== #
if q_mask is not None:
assert q_mask.shape[1] == Tq,\
"Query mask has time dimension %d, but query has time dimension %d" \
% (q_mask.shape[1], Tq)
# Mask of shape [batch_size, Tq, 1].
q_mask = bk.expand_dims(q_mask, axis=-1)
result *= bk.cast(q_mask, dtype=result.dtype)
# ====== residual connection ====== #
if residual:
if query is None:
raise ValueError("query must be given for residual connection")
result += query
# ====== return ====== #
return result, attention_distribution
def compute_mask(self, mask=None):
if mask:
q_mask = mask[0] if isinstance(mask, (tuple, list)) else mask
return bk.array(q_mask)
# shortcut to make it easier
Intra = AttentionMechanism.Intra
Inter = AttentionMechanism.Inter
PosGlobal = AttentionMechanism.PosGlobal
PosLocalM = AttentionMechanism.PosLocalM
PosLocalP = AttentionMechanism.PosLocalP
AlignSoft = AttentionMechanism.AlignSoft
AlignRelax = AttentionMechanism.AlignRelax
AlignHard = AttentionMechanism.AlignHard
ScoreLocation = AttentionMechanism.ScoreLocation
ScoreAdditive = AttentionMechanism.ScoreAdditive
ScoreDotProd = AttentionMechanism.ScoreDotProd
ScoreCosine = AttentionMechanism.ScoreCosine
ScoreGeneral = AttentionMechanism.ScoreGeneral
_GROUPS = [
(Intra, Inter), \
(PosGlobal, PosLocalM, PosLocalP), \
(AlignSoft, AlignHard, AlignRelax), \
(ScoreLocation, ScoreAdditive, ScoreDotProd, ScoreCosine, ScoreGeneral)
]
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import test
def _weights_type_combinations():
return combinations.combine(weights_type=["list", "tensor", "dataset"])
def _get_weights_of_type(weights_list, weights_type):
if weights_type == "list":
return weights_list
if weights_type == "tensor":
return ops.convert_to_tensor(weights_list, name="weights")
return dataset_ops.Dataset.from_tensors(weights_list).repeat()
class DirectedInterleaveDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
selector_dataset = dataset_ops.Dataset.range(10).repeat(100)
input_datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(100) for i in range(10)
]
dataset = dataset_ops._DirectedInterleaveDataset(selector_dataset,
input_datasets)
next_element = self.getNext(dataset)
for _ in range(100):
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
def _normalize(self, vec):
return vec / vec.sum()
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasets(self, weights_type):
random_seed.set_random_seed(1619)
num_samples = 5000
rand_probs = self._normalize(np.random.random_sample((5,)))
# Use chi-squared test to assert that the observed distribution matches the
# expected distribution. Based on the implementation in
# "third_party/tensorflow/python/kernel_tests/multinomial_op_test.py".
for probs in [[.85, .05, .1], rand_probs, [1.]]:
weights = _get_weights_of_type(np.asarray(probs), weights_type)
classes = len(probs)
# Create a dataset that samples each integer in `[0, num_datasets)`
# with probability given by `weights[i]`.
dataset = dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(i).repeat() for i in range(classes)
], weights)
dataset = dataset.take(num_samples)
next_element = self.getNext(dataset)
freqs = np.zeros([classes])
for _ in range(num_samples):
freqs[self.evaluate(next_element())] += 1
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
self.assertLess(self._chi2(probs, freqs / num_samples), 1e-2)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsStoppingOnEmptyDataset(self, weights_type):
# Sampling stops when the first dataset is exhausted.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
samples_list = self.getIteratorOutput(self.getNext(sample_dataset))
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsSkippingEmptyDataset(self, weights_type):
# Sampling skips the first dataset after it becomes empty.
weights = _get_weights_of_type(np.asarray([.5, .1, .4]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(np.int64(-1)),
dataset_ops.Dataset.from_tensors(np.int64(1)).repeat(),
dataset_ops.Dataset.range(10).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False).take(100)
samples_list = self.getIteratorOutput(self.getNext(sample_dataset))
self.assertLen(samples_list, 100)
self.assertEqual(samples_list.count(-1), 1)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromDatasetsWithZeroWeight(self, weights_type):
# Sampling stops when the second dataset is exhausted.
weights = _get_weights_of_type(np.asarray([0., 1.]), weights_type)
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(2),
dataset_ops.Dataset.from_tensors(1).repeat(2)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [1, 1])
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
_weights_type_combinations()))
def testSampleFromEmptyDataset(self, weights_type):
weights = _get_weights_of_type(np.asarray([1., 0.]), weights_type)
datasets = [
dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=True)
self.assertDatasetProduces(sample_dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsSkippingDatasetsWithZeroWeight(self):
# Sampling skips the first dataset.
weights = np.asarray([0., 1.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1)
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [1])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsAllWeightsAreZero(self):
# Sampling skips both datasets.
weights = np.asarray([0., 0.])
datasets = [
dataset_ops.Dataset.from_tensors(-1).repeat(),
dataset_ops.Dataset.from_tensors(1).repeat()
]
sample_dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, weights=weights, stop_on_empty_dataset=False)
self.assertDatasetProduces(sample_dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsCardinality(self):
ds1 = dataset_ops.Dataset.from_tensors([1.0]).repeat()
ds2 = dataset_ops.Dataset.from_tensors([2.0]).repeat()
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2])
self.assertEqual(self.evaluate(ds.cardinality()), dataset_ops.INFINITE)
@combinations.generate(test_base.default_test_combinations())
def testSampleFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
ds = dataset_ops.Dataset.sample_from_datasets([ds1, ds2],
weights=[0.3, 0.7])
ds = ds.flat_map(lambda x: x)
next_element = self.getNext(ds)
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasets(self):
words = [b"foo", b"bar", b"baz"]
datasets = [dataset_ops.Dataset.from_tensors(w).repeat() for w in words]
choice_array = np.random.randint(3, size=(15,), dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset)
next_element = self.getNext(dataset)
for i in choice_array:
self.assertEqual(words[i], self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsStoppingOnEmptyDataset(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=True)
self.assertDatasetProduces(dataset, [b"foo", b"foo"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsSkippingEmptyDatasets(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(2),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
choice_array = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int64)
choice_dataset = dataset_ops.Dataset.from_tensor_slices(choice_array)
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets, choice_dataset, stop_on_empty_dataset=False)
# Chooses 2 elements from the first dataset while the selector specifies 3.
self.assertDatasetProduces(
dataset,
[b"foo", b"foo", b"bar", b"bar", b"bar", b"baz", b"baz", b"baz"])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsChoiceDatasetIsEmpty(self):
datasets = [
dataset_ops.Dataset.from_tensors(b"foo").repeat(),
dataset_ops.Dataset.from_tensors(b"bar").repeat(),
dataset_ops.Dataset.from_tensors(b"baz").repeat(),
]
dataset = dataset_ops.Dataset.choose_from_datasets(
datasets,
choice_dataset=dataset_ops.Dataset.range(0),
stop_on_empty_dataset=False)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testChooseFromDatasetsNested(self):
ds1 = dataset_ops.Dataset.range(10).window(2)
ds2 = dataset_ops.Dataset.range(10, 20).window(2)
choice_dataset = dataset_ops.Dataset.range(2).repeat(5)
ds = dataset_ops.Dataset.choose_from_datasets([ds1, ds2], choice_dataset)
ds = ds.flat_map(lambda x: x)
expected = []
for i in range(5):
for j in range(2):
expected.extend([10*j + 2*i, 10*j + 2*i + 1])
self.assertDatasetProduces(ds, expected)
@combinations.generate(test_base.default_test_combinations())
def testErrors(self):
with self.assertRaisesRegex(ValueError, r"should have the same length"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[0.25, 0.25, 0.25, 0.25])
with self.assertRaisesRegex(TypeError, "`tf.float32` or `tf.float64`"):
dataset_ops.Dataset.sample_from_datasets(
[dataset_ops.Dataset.range(10),
dataset_ops.Dataset.range(20)],
weights=[1, 1])
with self.assertRaisesRegex(TypeError, "must have the same type"):
dataset_ops.Dataset.sample_from_datasets([
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(0.0)
])
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.sample_from_datasets(datasets=[], weights=[])
with self.assertRaisesRegex(TypeError, "tf.int64"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(TypeError, "scalar"):
dataset_ops.Dataset.choose_from_datasets(
[
dataset_ops.Dataset.from_tensors(0),
dataset_ops.Dataset.from_tensors(1)
],
choice_dataset=dataset_ops.Dataset.from_tensors([1.0]))
with self.assertRaisesRegex(errors.InvalidArgumentError, "out of range"):
dataset = dataset_ops.Dataset.choose_from_datasets(
[dataset_ops.Dataset.from_tensors(0)],
choice_dataset=dataset_ops.Dataset.from_tensors(
constant_op.constant(1, dtype=dtypes.int64)))
next_element = self.getNext(dataset)
self.evaluate(next_element())
with self.assertRaisesRegex(
ValueError, r"Invalid `datasets`. `datasets` should not be empty."):
dataset_ops.Dataset.choose_from_datasets(
datasets=[], choice_dataset=dataset_ops.Dataset.from_tensors(1.0))
with self.assertRaisesRegex(
TypeError, r"`choice_dataset` should be a `tf.data.Dataset`"):
datasets = [dataset_ops.Dataset.range(42)]
dataset_ops.Dataset.choose_from_datasets(datasets, choice_dataset=None)
class SampleFromDatasetsCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, probs, num_samples):
datasets = [
dataset_ops.Dataset.from_tensors(i).repeat(None)
for i in range(len(probs))
]
dataset = dataset_ops.Dataset.sample_from_datasets(
datasets, probs, seed=1813)
return dataset.take(num_samples)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
verify_fn(
self, lambda: self._build_dataset([0.5, 0.5], 100), num_outputs=100)
if __name__ == "__main__":
test.main()
|
|
import contextlib
import datetime
import functools
import mock
from django.http import HttpRequest
from django.utils import timezone
from nose import SkipTest
from nose.tools import assert_equal, assert_not_equal
from framework.auth import Auth
from framework.celery_tasks.handlers import celery_teardown_request
from osf.models import Sanction
from tests.base import get_default_metaschema
from website.archiver import ARCHIVER_SUCCESS
from website.archiver import listeners as archiver_listeners
def requires_module(module):
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
try:
__import__(module)
except ImportError:
raise SkipTest()
return fn(*args, **kwargs)
return wrapper
return decorator
def assert_logs(log_action, node_key, index=-1):
"""A decorator to ensure a log is added during a unit test.
:param str log_action: NodeLog action
:param str node_key: key to get Node instance from self
:param int index: list index of log to check against
Example usage:
@assert_logs(NodeLog.UPDATED_FIELDS, 'node')
def test_update_node(self):
self.node.update({'title': 'New Title'}, auth=self.auth)
TODO: extend this decorator to check log param correctness?
"""
def outer_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
node = getattr(self, node_key)
last_log = node.logs.latest()
func(self, *args, **kwargs)
node.reload()
new_log = node.logs.order_by('-date')[-index - 1]
assert_not_equal(last_log._id, new_log._id)
assert_equal(new_log.action, log_action)
node.save()
return wrapper
return outer_wrapper
def assert_preprint_logs(log_action, preprint_key, index=-1):
"""A decorator to ensure a log is added during a unit test.
:param str log_action: PreprintLog action
:param str preprint_key: key to get Preprint instance from self
:param int index: list index of log to check against
Example usage:
@assert_logs(PreprintLog.UPDATED_FIELDS, 'preprint')
def test_update_preprint(self):
self.preprint.update({'title': 'New Title'}, auth=self.auth)
TODO: extend this decorator to check log param correctness?
"""
def outer_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
preprint = getattr(self, preprint_key)
last_log = preprint.logs.latest()
func(self, *args, **kwargs)
preprint.reload()
new_log = preprint.logs.order_by('-created')[-index - 1]
assert_not_equal(last_log._id, new_log._id)
assert_equal(new_log.action, log_action)
preprint.save()
return wrapper
return outer_wrapper
def assert_not_logs(log_action, node_key, index=-1):
def outer_wrapper(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
node = getattr(self, node_key)
last_log = node.logs.latest()
func(self, *args, **kwargs)
node.reload()
new_log = node.logs.order_by('-date')[-index - 1]
assert_not_equal(new_log.action, log_action)
assert_equal(last_log._id, new_log._id)
node.save()
return wrapper
return outer_wrapper
def assert_equals(item_one, item_two):
item_one.sort()
item_two.sort()
assert item_one == item_two
@contextlib.contextmanager
def assert_latest_log(log_action, node_key, index=0):
node = node_key
last_log = node.logs.latest()
node.reload()
yield
new_log = node.logs.order_by('-date')[index] if hasattr(last_log, 'date') else node.logs.order_by('-created')[index]
assert last_log._id != new_log._id
assert new_log.action == log_action
@contextlib.contextmanager
def assert_latest_log_not(log_action, node_key, index=0):
node = node_key
last_log = node.logs.latest()
node.reload()
yield
new_log = node.logs.order_by('-date')[index] if hasattr(last_log, 'date') else node.logs.order_by('-created')[index]
assert new_log.action != log_action
assert last_log._id == new_log._id
@contextlib.contextmanager
def mock_archive(project, schema=None, auth=None, data=None, parent=None,
embargo=False, embargo_end_date=None,
retraction=False, justification=None, autoapprove_retraction=False,
autocomplete=True, autoapprove=False):
""" A context manager for registrations. When you want to call Node#register_node in
a test but do not want to deal with any of this side effects of archiver, this
helper allows for creating a registration in a safe fashion.
:param bool embargo: embargo the registration (rather than RegistrationApproval)
:param bool autocomplete: automatically finish archival?
:param bool autoapprove: automatically approve registration approval?
:param bool retraction: retract the registration?
:param str justification: a justification for the retraction
:param bool autoapprove_retraction: automatically approve retraction?
Example use:
project = ProjectFactory()
with mock_archive(project) as registration:
assert_true(registration.is_registration)
assert_true(registration.archiving)
assert_true(registration.is_pending_registration)
with mock_archive(project, autocomplete=True) as registration:
assert_true(registration.is_registration)
assert_false(registration.archiving)
assert_true(registration.is_pending_registration)
with mock_archive(project, autocomplete=True, autoapprove=True) as registration:
assert_true(registration.is_registration)
assert_false(registration.archiving)
assert_false(registration.is_pending_registration)
"""
schema = schema or get_default_metaschema()
auth = auth or Auth(project.creator)
data = data or ''
with mock.patch('framework.celery_tasks.handlers.enqueue_task'):
registration = project.register_node(
schema=schema,
auth=auth,
data=data,
parent=parent,
)
if embargo:
embargo_end_date = embargo_end_date or (
timezone.now() + datetime.timedelta(days=20)
)
registration.root.embargo_registration(
project.creator,
embargo_end_date
)
else:
registration.root.require_approval(project.creator)
if autocomplete:
root_job = registration.root.archive_job
root_job.status = ARCHIVER_SUCCESS
root_job.sent = False
root_job.done = True
root_job.save()
sanction = registration.root.sanction
mock.patch.object(root_job, 'archive_tree_finished', mock.Mock(return_value=True))
mock.patch('website.archiver.tasks.archive_success.delay', mock.Mock())
archiver_listeners.archive_callback(registration)
if autoapprove:
sanction = registration.root.sanction
sanction.state = Sanction.APPROVED
# save or _on_complete no worky
sanction.save()
sanction._on_complete(project.creator)
sanction.save()
if retraction:
justification = justification or 'Because reasons'
retraction = registration.retract_registration(project.creator, justification=justification)
if autoapprove_retraction:
retraction.state = Sanction.APPROVED
retraction._on_complete(project.creator)
retraction.save()
registration.save()
yield registration
def make_drf_request(*args, **kwargs):
from rest_framework.request import Request
http_request = HttpRequest()
# The values here don't matter; they just need
# to be present
http_request.META['SERVER_NAME'] = 'localhost'
http_request.META['SERVER_PORT'] = 8000
# A DRF Request wraps a Django HttpRequest
return Request(http_request, *args, **kwargs)
def make_drf_request_with_version(version='2.0', *args, **kwargs):
req = make_drf_request(*args, **kwargs)
req.parser_context['kwargs'] = {'version': 'v2'}
req.version = version
return req
class MockAuth(object):
def __init__(self, user):
self.user = user
self.logged_in = True
self.private_key = None
self.private_link = None
mock_auth = lambda user: mock.patch('framework.auth.Auth.from_kwargs', mock.Mock(return_value=MockAuth(user)))
def unique(factory):
"""
Turns a factory function into a new factory function that guarentees unique return
values. Note this uses regular item equivalence to check uniqueness, so this may not
behave as expected with factories with complex return values.
Example use:
unique_name_factory = unique(fake.name)
unique_name = unique_name_factory()
"""
used = []
@functools.wraps(factory)
def wrapper():
item = factory()
over = 0
while item in used:
if over > 100:
raise RuntimeError('Tried 100 times to generate a unqiue value, stopping.')
item = factory()
over += 1
used.append(item)
return item
return wrapper
@contextlib.contextmanager
def run_celery_tasks():
yield
celery_teardown_request()
|
|
from SCons.Script import ARGUMENTS
import os
def noColors():
try:
return int(ARGUMENTS['colors']) == 0
except KeyError:
return False
def xterm_color(string, color):
colors = {'none': "0",
'black': "0;30",
'red': "0;31",
'green': "0;32",
'brown': "0;33",
'blue': "0;34",
'purple': "0;35",
'cyan': "0;36",
'light-gray': "0;37",
'dark-gray': "1:30",
'light-red': "1;31",
'light-green': "1;32",
'yellow': "1;33",
'light-blue': "1;34",
'light-purple': "1;35",
'light-cyan': "1;36",
'white': "1;37"}
return "\033[%sm%s\033[0m" % (colors[color], string)
def isPlatform(platform):
import sys
return platform in sys.platform
def isWindows():
return isPlatform("win32")
def isLinux():
return isPlatform("linux")
def isOSX104():
import platform
return isPlatform("darwin") and platform.processor() == 'powerpc'
# Assume 10.6 and up
def isOSX():
return isPlatform("darwin") and not isOSX104()
# todo: figure out when we are on an xterm
def isXterm():
# assume linux and osx are ok
return not isWindows()
def colorize(string, color):
if noColors():
return string
if isXterm():
return xterm_color(string, color)
return string
def colorResult(what):
if what != 0:
return colorize('yes', 'light-green')
else:
return colorize('no', 'light-red')
def peg_to_cpp(target, source, env):
import sys
sys.path.append("src/mugen/parser")
sys.path.append(".")
import peg, re, cpp_generator
name = source[0].name
parser = peg.make_peg_parser(re.sub('\..*', '', name))
fout = open(target[0].path, 'w')
fout.write(cpp_generator.generate(parser(source[0].path)))
fout.write('\n')
fout.close()
# Build a cpp file from a peg definition
def pegBuilder(environment):
from SCons.Builder import Builder
from SCons.Action import Action
return Builder(action = Action(peg_to_cpp, environment['PEG_MAKE']),
suffix = '.cpp',
src_suffix = '.peg')
def readExec(program):
import os
try:
return os.popen(program).readline().replace("\n",'')
except OSError:
return ""
# Try to execute a script that will produce some compiler flags but fail
# gracefully if the script dies or can't be found
def safeParseConfig(environment, config):
# redirects stderr, not super safe
def version1():
import sys
out = open('fail.log', 'w')
old_stderr = sys.stderr
try:
sys.stderr = out
environment.ParseConfig(config)
out.close()
sys.stderr = old_stderr
except Exception, e:
out.close()
sys.stderr = old_stderr
raise e
# use the subprocess module to pass the output of stdout directly
# to mergeflags and trash stderr
# Not done yet!! This requires python 2.4
def version2():
import subprocess
process = subprocess.Popen(config.split(' '), stdout = subprocess.PIPE)
# p = subprocess.Popen(["ruby", "-e", code], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out = p.stdout.readline().strip()
environment.MergeFlags(out)
version1()
# Create a function that pulls out some key from the shell environment
def makeUseEnvironment(key, default):
def use():
import os
try:
return int(os.environ[key]) == 1
except KeyError:
return default
return use
def makeUseArgument(key, default):
def use():
try:
return int(ARGUMENTS[key]) == 1
except KeyError:
return default
return use
useGch = makeUseArgument('gch', True)
usePrx = makeUseEnvironment('prx', False)
isVerbose = makeUseArgument('verbose', False)
useIntel = makeUseEnvironment('intel', False)
useMinpspw = makeUseEnvironment('minpspw', False)
useAndroid = makeUseEnvironment('android', False)
useAndroidX86 = makeUseEnvironment('androidx86', False)
useIos = makeUseEnvironment('ios', False)
usePs3 = makeUseEnvironment('ps3', False)
useNDS = makeUseEnvironment('nds', False)
useDingoo = makeUseEnvironment('dingoo', False)
useXenon = makeUseEnvironment('xenon', False)
usePandora = makeUseEnvironment('pandora', False)
useWii = makeUseEnvironment('wii', False)
useLLVM = makeUseEnvironment('llvm', False)
useNacl = makeUseEnvironment('nacl', False)
useMpg123 = makeUseEnvironment('mpg123', False)
useMad = makeUseEnvironment('mad', False)
useGCW = makeUseEnvironment('gcw', False)
nativeCompile = makeUseEnvironment('native', False)
enableProfiled = makeUseEnvironment('PROFILE', False)
showTiming = makeUseEnvironment('timing', False)
useAllegro4 = makeUseEnvironment('allegro4', False)
useWii = makeUseEnvironment('wii', False)
def useAllegro():
def byEnv():
try:
return os.environ['ALLEGRO'] == '1'
except KeyError:
return False
def byArgument():
try:
return int(ARGUMENTS['allegro']) == 1
except KeyError:
return False
return byEnv() or byArgument()
def useAllegro5():
def byEnv():
try:
return os.environ['ALLEGRO5'] == '1'
except KeyError:
return False
def byArgument():
try:
return int(ARGUMENTS['allegro5']) == 1
except KeyError:
return False
# FIXME: hack to specify android here
return True or byEnv() or byArgument() or useAndroid() or useAndroidX86()
def useSDL():
return not useAllegro() and not useAllegro5()
# Replace standard tool invocations with nice colored text
def lessVerbose(env):
link_color = 'light-red'
ar_color = 'yellow'
ranlib_color = 'light-purple'
peg_color = 'light-cyan'
env['CCCOMSTR'] = "%s %s" % (colorize('Compiling c file', 'light-green'), colorize('$SOURCE', 'light-blue'))
env['SHCCCOMSTR'] = "%s %s" % (colorize('Compiling c file', 'light-green'), colorize('$SOURCE', 'light-blue'))
env['CXXCOMSTR'] = "%s %s" % (colorize('Compiling c++ file', 'light-green'), colorize('$SOURCE', 'light-blue'))
env['SHCXXCOMSTR'] = "%s %s" % (colorize('Compiling c++ file', 'light-green'), colorize('$SOURCE', 'light-blue'))
env['LINKCOMSTR'] = "%s %s" % (colorize('Linking', link_color), colorize('$TARGET', 'light-blue'))
env['SHLINKCOMSTR'] = "%s %s" % (colorize('Linking', link_color), colorize('$TARGET', 'light-blue'))
env['ARCOMSTR'] = "%s %s" % (colorize('Building library', ar_color), colorize('$TARGET', 'light-blue'))
env['RANLIBCOMSTR'] = "%s %s" % (colorize('Indexing library', ranlib_color), colorize('$TARGET', 'light-blue'))
env['PEG_MAKE'] = "%s %s" % (colorize('Creating peg parser', peg_color), colorize('$TARGET', 'light-blue'))
return env
def configure_backend(environment, backends, custom_tests):
config = environment.Configure(custom_tests = custom_tests)
if not config.CheckCompiler():
config.Finish()
raise Exception("No c++ compiler found. Install gcc or clang")
class OkBackend(Exception):
pass
class NoBackend(Exception):
pass
try:
for backend in backends:
if backend == 'SDL' and config.CheckSDL():
environment.Append(CPPDEFINES = ['USE_SDL'])
environment['PAINTOWN_BACKEND'] = 'sdl'
environment.Append(PAINTOWN_PLATFORM = ['sdl'])
raise OkBackend()
if backend == 'Allegro4' and config.CheckAllegro4():
environment.Append(CPPDEFINES = ['USE_ALLEGRO'])
environment['PAINTOWN_BACKEND'] = 'allegro4'
environment.Append(PAINTOWN_PLATFORM = ['allegro4'])
raise OkBackend()
if backend == 'Allegro5' and config.CheckAllegro5():
environment.Append(CPPDEFINES = ['USE_ALLEGRO5'])
environment['PAINTOWN_BACKEND'] = 'allegro5'
environment.Append(PAINTOWN_PLATFORM = ['allegro5'])
raise OkBackend()
config.Finish()
raise NoBackend()
except OkBackend:
pass
return config.Finish()
def checkCompiler(context):
context.Message("Checking for a compiler (%s) ... " % context.env['CXX'])
ok = context.TryCompile("""
int main(int argc, char ** argv){
return 0;
}
""", ".cpp")
context.Result(colorResult(ok))
return ok
def detectCPUs():
import os
"""
Detects the number of CPUs on a system. Cribbed from pp.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if "SC_NPROCESSORS_ONLN" in os.sysconf_names:
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if "NUMBER_OF_PROCESSORS" in os.environ:
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"]);
if ncpus > 0:
return ncpus
return 1 # Default
|
|
# Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is a messenger service for working with the log.
It does two things:
a) Listen on the "log" channel. You can send messages to this
channel with keys lowerLevels/RaiseLevels/setLevels to adjust
global log levels. See _process_commands() for more info.
b) You can join any channel named log_<something> (your session
ID is a good choice for the something), and a LogBot will
join it. This will result in receiving log messages. In
your join message (or afterwards), you can configure levels,
the message formats, etc. See LogService for more details.
"""
from pox.core import core
from pox.messenger import *
from pox.lib.revent.revent import autoBindEvents
import logging
import traceback
log = core.getLogger()
# These attributes are copied verbatim from the log record
_attributes = [
'created','filename','funcName','levelname','levelno','lineno',
'module','msecs','name','pathname','process','processName',
'relativeCreated','thread','threadName','args',
]
class LogFilter (object):
"""
Filters messages from the web server component
It's a nasty situation when you're using the HTTP messenger transport
to view the log when in debug mode, as every webserver log message
creates a messenger message which creates a webserver message, ...
This just turns off debug messages from the webserver.
"""
def filter (self, record):
if record.levelno != logging.DEBUG: return True
if record.name == "web.webcore.server": return False
return True
class LogHandler (logging.Handler):
"""
A Python logging.Handler for the messenger
Accepts dictionaries with configuration info:
KEY VALUE
level Minimum log level to output (probably one of CRITICAL,
ERROR, WARNING, INFO or DEBUG)
format fmt argument to logging.Formatter
dateFormat datefmt argument to logging.Formatter
json true if you want a bunch of attributes from the LogRecord to
be included. In some cases these are stringized since the
originals are objects and we don't pickle/jsonpickle them.
subsystems A list of logger names to listen to. A "null"/None entry in
the list means the root logger (which is also the default).
add_subsystems A list of ADDITIONAL subsystems to listen to.
"""
#NOTE: We take advantage of the fact that the default value for the
# argument to getLogger() is None. This is currently true, but
# isn't documented, so it might change in the future (though I
# don't see why it would!). Doing this "the right way" results
# in much uglier code.
def __init__ (self, channel, params):
logging.Handler.__init__(self)
self._channel = channel
self.addFilter(LogFilter())
self._json = False
self._format = False # Not valid, should never be set
self._dateFormat = None
self.subsystems = []
if "format" not in params:
params["format"] = None # Force update
if 'subsystems' not in params:
self._add_subsystems([None])
self._process_parameters(params)
def _add_subsystems (self, subsystems):
"""
Add log subsystems to listen to
"""
for subsystem in subsystems:
if subsystem in self.subsystems: continue
try:
logging.getLogger(subsystem).addHandler(self)
self.subsystems.append(subsystem)
except:
pass
def _drop_subsystems (self):
"""
Stop listening to all log subsystems
"""
for subsystem in self.subsystems:
logging.getLogger(subsystem).removeHandler(self)
self.subsystems = []
def _process_parameters (self, params):
if "level" in params:
self.setLevel(params["level"])
if "subsystems" in params:
self._drop_subsystems()
self._add_subsystems(params['subsystems'])
if 'add_subsystems' in params:
self._add_subsystems(params['add_subsystems'])
if 'remove_subsystems' in params:
#TODO
log.error('remove_subsystems unimplemented')
if "json" in params:
self._json = params['json']
if "setLevels" in params:
levels = params['setLevels']
if isinstance(levels, dict):
for k,v in levels.iteritems():
l = core.getLogger(k)
l.setLevel(v)
else:
core.getLogger().setLevel(levels)
doFormat = False
if "format" in params:
fmt = params['format']
if fmt is not self._format:
self._format = fmt
doFormat = True
if "dateFormat" in params:
dateFormat = params['dateFormat']
if dateFormat is not self._dateFormat:
self._dateFormat = dateFormat
doFormat = True
if doFormat:
self.setFormatter(logging.Formatter(self._format, self._dateFormat))
def _close (self):
self._drop_subsystems()
def emit (self, record):
o = {'message' : self.format(record)}
#o['message'] = record.getMessage()
if self._json:
for attr in _attributes:
o[attr] = getattr(record, attr)
o['asctime'] = self.formatter.formatTime(record, self._dateFormat)
if record.exc_info:
o['exc_info'] = [str(record.exc_info[0]),
str(record.exc_info[1]),
traceback.format_tb(record.exc_info[2],1)]
o['exc'] = traceback.format_exception(*record.exc_info)
self._channel.send(o)
def _process_commands (msg):
"""
Processes logger commands
"""
def get (key):
r = msg.get(key)
if r is not None:
if not isinstance(r, list):
r = {None:r}
else:
return {}
return r
lowerLevels = get("lowerLevels") # less verbose
raiseLevels = get("raiseLevels") # more verbose
setLevels = get("setLevels")
for k,v in lowerLevels.iteritems():
logger = core.getLogger(k)
level = logging._checkLevel(v)
if not l.isEnabledFor(level+1):
logger.setLevel(v)
for k,v in raiseLevels.iteritems():
logger = core.getLogger(k)
if not l.isEnabledFor(v):
logger.setLevel(v)
for k,v in setLevels.iteritems():
logger = core.getLogger(k)
logger.setLevel(v)
message = msg.get("message", None)
if message:
level = msg.get("level", "DEBUG")
if isinstance(level, basestring):
import logging
if not level.isalpha():
level = logging.DEBUG
else:
level = level.upper()
level = getattr(logging, level, logging.DEBUG)
sub = msg.get("subsystem", "<external>")
logging.getLogger(sub).log(level, message)
class LogBot (ChannelBot):
def _init (self, extra):
self._handler = None
def _join (self, event, con, msg):
#self.reply(event, hello = "Hello, %s!" % (con,))
if self._handler is not None:
log.warning("Multiple clients on channel " + self.channel.name)
else:
self._handler = LogHandler(self.channel, msg)
def _leave (self, con, empty):
if empty:
self._handler._close()
self._handler = None
def _unhandled (self, event):
_process_commands(event.msg)
self._handler._process_parameters(event.msg)
def _handle_new_channel (event):
if event.channel.name.startswith("log_"):
# New channel named log_<something>? Add a log bot.
LogBot(event.channel)
def launch (nexus = "MessengerNexus"):
def start (nexus):
# One bot for default log channel
real_nexus = core.components[nexus]
LogBot(real_nexus.get_channel('log'))
# This will create new channels on demand
real_nexus.addListener(ChannelCreate, _handle_new_channel)
core.call_when_ready(start, nexus, args=[nexus])
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by both the GRU and LSTM classes."""
# pylint: disable=g-direct-tensorflow-import
import uuid
import tensorflow.compat.v2 as tf
from tensorflow.python.eager.context import get_device_name
# The following string constants are used by Defun approach for unified backend
# of LSTM and GRU.
_FUNCTION_API_NAME_ATTRIBUTE = 'api_implements'
_FUNCTION_DEVICE_ATTRIBUTE = 'api_preferred_device'
CPU_DEVICE_NAME = 'CPU'
GPU_DEVICE_NAME = 'GPU'
# The following number constants are used to represent the runtime of the defun
# backend function. Since the CPU/GPU implementation are mathematically same, we
# need some signal for the function to indicate which function is executed. This
# is for testing purpose to verify the correctness of swapping backend function.
RUNTIME_UNKNOWN = 0
RUNTIME_CPU = 1
RUNTIME_GPU = 2
CUDNN_AVAILABLE_MSG = 'Layer %s will use cuDNN kernels when running on GPU.'
CUDNN_NOT_AVAILABLE_MSG = ('Layer %s will not use cuDNN kernels since it '
'doesn\'t meet the criteria. It will '
'use a generic GPU kernel as fallback when running '
'on GPU.')
def use_new_gru_lstm_impl():
return False
# TODO(b/169707691): The wrapper can be removed if TFLite doesn't need to rely
# on supportive attributes from LSTM/GRU.
class DefunWrapper:
"""A wrapper with no deep copy of the Defun in LSTM/GRU layer."""
def __init__(self, time_major, go_backwards, layer_name):
self.time_major = time_major
self.go_backwards = go_backwards
self.layer_name = layer_name
if self.layer_name not in ['lstm', 'gru']:
raise ValueError('Defun wrapper only applies to LSTM and GRU layer, '
'but given {}'.format(self.layer_name))
# The first two attributes are added to support TFLite use case.
supportive_attributes = {
'time_major': self.time_major,
'go_backwards': self.go_backwards,
_FUNCTION_API_NAME_ATTRIBUTE: self.layer_name + '_' + str(uuid.uuid4())
}
if self.layer_name == 'lstm':
from keras.layers.rnn import lstm # pylint: disable=g-import-not-at-top
layer_func = lstm.lstm_with_backend_selection
else:
from keras.layers.rnn import gru # pylint: disable=g-import-not-at-top
layer_func = gru.gru_with_backend_selection
self.defun_layer = tf.__internal__.function.defun_with_attributes(
layer_func,
attributes=supportive_attributes,
autograph=False)
def __deepcopy__(self, memo):
new_wrapper = type(self)(
self.time_major, self.go_backwards, self.layer_name)
memo[id(self)] = new_wrapper
return new_wrapper
def canonical_to_params(weights, biases, shape, transpose_weights=False):
"""Utility function convert variable to cuDNN compatible parameter.
Note that Keras weights for kernels are different from the cuDNN format. Eg.:
```
Keras cuDNN
[[0, 1, 2], <---> [[0, 2, 4],
[3, 4, 5]] [1, 3, 5]]
```
If the input weights need to be in a unified format, then set
`transpose_weights=True` to convert the weights.
Args:
weights: list of weights for the individual kernels and recurrent kernels.
biases: list of biases for individual gate.
shape: the shape for the converted variables that will be feed to cuDNN.
transpose_weights: boolean, whether to transpose the weights.
Returns:
The converted weights that can be feed to cuDNN ops as param.
"""
def convert(w):
return tf.transpose(w) if transpose_weights else w
weights = [tf.reshape(convert(x), shape) for x in weights]
biases = [tf.reshape(x, shape) for x in biases]
return tf.concat(weights + biases, axis=0)
def is_sequence_right_padded(mask):
"""Check the mask tensor and see if it right padded.
For cuDNN kernel, it uses the sequence length param to skip the tailing
timestep. If the data is left padded, or not a strict right padding (has
masked value in the middle of the sequence), then cuDNN kernel won't be work
properly in those cases.
Left padded data: [[False, False, True, True, True]].
Right padded data: [[True, True, True, False, False]].
Mixture of mask/unmasked data: [[True, False, True, False, False]].
Note that for the mixed data example above, the actually data RNN should see
are those 2 Trues (index 0 and 2), the index 1 False should be ignored and not
pollute the internal states.
Args:
mask: the Boolean tensor with shape [batch, timestep]
Returns:
boolean scalar tensor, whether the mask is strictly right padded.
"""
max_seq_length = tf.shape(mask)[1]
count_of_true = tf.reduce_sum(tf.cast(mask, tf.int32), axis=1)
right_padded_mask = tf.sequence_mask(
count_of_true, maxlen=max_seq_length)
return tf.reduce_all(tf.equal(mask, right_padded_mask))
def has_fully_masked_sequence(mask):
# See https://github.com/tensorflow/tensorflow/issues/33148 for more details.
# Cudnn kernel will error out if the input sequence contains any fully masked
# data. We walk around this issue by rerouting the computation to standard
# kernel, until the issue on cudnn side has been fixed.
# For a fully masked sequence, it will contain all Falses. To make it easy to
# check, we inverse the boolean, check if any of the sequence has all True.
return tf.reduce_any(
tf.reduce_all(
tf.logical_not(mask),
axis=1))
def is_cudnn_supported_inputs(mask, time_major):
if time_major:
mask = tf.transpose(mask)
return tf.logical_and(
is_sequence_right_padded(mask),
tf.logical_not(has_fully_masked_sequence(mask)))
def calculate_sequence_by_mask(mask, time_major):
"""Calculate the sequence length tensor (1-D) based on the masking tensor.
The masking tensor is a 2D boolean tensor with shape [batch, timestep]. For
any timestep that should be masked, the corresponding field will be False.
Consider the following example:
a = [[True, True, False, False],
[True, True, True, False]]
It is a (2, 4) tensor, and the corresponding sequence length result should be
1D tensor with value [2, 3]. Note that the masking tensor must be right
padded that could be checked by, e.g., `is_sequence_right_padded()`.
Args:
mask: Boolean tensor with shape [batch, timestep] or [timestep, batch] if
time_major=True.
time_major: Boolean, which indicates whether the mask is time major or batch
major.
Returns:
sequence_length: 1D int32 tensor.
"""
timestep_index = 0 if time_major else 1
return tf.reduce_sum(tf.cast(mask, tf.int32), axis=timestep_index)
def generate_defun_backend(unique_api_name, preferred_device, func,
supportive_attributes):
function_attributes = {
_FUNCTION_API_NAME_ATTRIBUTE: unique_api_name,
_FUNCTION_DEVICE_ATTRIBUTE: preferred_device,
}
function_attributes.update(supportive_attributes)
return tf.__internal__.function.defun_with_attributes(
func=func, attributes=function_attributes, autograph=False)
def get_context_device_type():
"""Parse the current context and return the device type, eg CPU/GPU."""
current_device = get_device_name()
if current_device is None:
return None
return tf.compat.v1.DeviceSpec.from_string(current_device).device_type
def runtime(runtime_name):
with tf.device('/cpu:0'):
return tf.constant(
runtime_name, dtype=tf.float32, name='runtime')
def read_variable_value(v):
"""Read the value of a variable if it is variable."""
if isinstance(v, tf.Variable):
return v.read_value()
return v
def function_register(func, *args, **kwargs):
"""Register a specialization of a `Function` into the graph.
This won't actually call the function with the inputs, and only put the
function definition into graph. Register function with different input param
will result into multiple version of functions registered in graph.
Args:
func: the `Function` instance that generated by a @defun
*args: input arguments for the Python function.
**kwargs: input keyword arguments for the Python function.
Returns:
a `ConcreteFunction` object specialized to inputs and execution context.
Raises:
ValueError: When the input function is not a defun wrapped python function.
"""
concrete_func = func.get_concrete_function(*args, **kwargs)
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
return concrete_func
|
|
# -*- coding:utf-8 -*-
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import json
from django import forms
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import string_concat
from django_mysql.validators import (
ListMaxLengthValidator, ListMinLengthValidator, SetMaxLengthValidator,
SetMinLengthValidator
)
class SimpleListField(forms.CharField):
default_error_messages = {
'item_n_invalid': _('Item %(nth)s in the list did not validate: '),
'no_double_commas': _('No leading, trailing, or double commas.'),
}
def __init__(self, base_field, max_length=None, min_length=None,
*args, **kwargs):
self.base_field = base_field
super(SimpleListField, self).__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(ListMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(ListMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, list):
return ",".join(
six.text_type(self.base_field.prepare_value(v))
for v in value
)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = []
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(ValidationError(
self.error_messages['no_double_commas'],
code='no_double_commas',
))
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_n_invalid'],
error.message),
code='item_n_invalid',
params={'nth': i},
))
values.append(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleListField, self).validate(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(ValidationError(
string_concat(
self.error_messages['item_n_invalid'],
message),
code='item_invalid',
params={'nth': i}
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleListField, self).run_validators(value)
errors = []
for i, item in enumerate(value, start=1):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(ValidationError(
string_concat(
self.error_messages['item_n_invalid'],
message),
code='item_n_invalid',
params={'nth': i}
))
if errors:
raise ValidationError(errors)
class SimpleSetField(forms.CharField):
empty_values = list(validators.EMPTY_VALUES) + [set()]
default_error_messages = {
'item_invalid': _('Item "%(item)s" in the set did not validate: '),
'item_n_invalid': _('Item %(nth)s in the set did not validate: '),
'no_double_commas': _('No leading, trailing, or double commas.'),
'no_duplicates': _("Duplicates are not supported. "
"'%(item)s' appears twice or more.")
}
def __init__(self, base_field, max_length=None, min_length=None,
*args, **kwargs):
self.base_field = base_field
super(SimpleSetField, self).__init__(*args, **kwargs)
if max_length is not None:
self.max_length = max_length
self.validators.append(SetMaxLengthValidator(int(max_length)))
if min_length is not None:
self.min_length = min_length
self.validators.append(SetMinLengthValidator(int(min_length)))
def prepare_value(self, value):
if isinstance(value, set):
return ",".join(
six.text_type(self.base_field.prepare_value(v))
for v in value
)
return value
def to_python(self, value):
if value and len(value):
items = value.split(",")
else:
items = []
errors = []
values = set()
for i, item in enumerate(items, start=1):
if not len(item):
errors.append(ValidationError(
self.error_messages['no_double_commas'],
code='no_double_commas',
))
continue
try:
value = self.base_field.to_python(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_n_invalid'],
error.message),
code='item_n_invalid',
params={'nth': i},
))
if value in values:
errors.append(ValidationError(
self.error_messages['no_duplicates'],
code='no_duplicates',
params={'item': item}
))
else:
values.add(value)
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleSetField, self).validate(value)
errors = []
for item in value:
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'],
message),
code='item_invalid',
params={'item': item}
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleSetField, self).run_validators(value)
errors = []
for item in value:
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
for message in error.messages:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'],
message),
code='item_invalid',
params={'item': item}
))
if errors:
raise ValidationError(errors)
class JSONField(forms.CharField):
default_error_messages = {
'invalid': _("'%(value)s' value must be valid JSON."),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', forms.Textarea)
super(JSONField, self).__init__(**kwargs)
def to_python(self, value):
if value in self.empty_values:
return None
try:
return json.loads(value)
except ValueError:
raise forms.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={'value': value},
)
def prepare_value(self, value):
return json.dumps(value)
|
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFMA v2 benchmark."""
import copy
import time
import apache_beam as beam
import numpy as np
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tensorflow_model_analysis import constants
from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from tensorflow_model_analysis.evaluators import poisson_bootstrap
from tensorflow_model_analysis.extractors import example_weights_extractor
from tensorflow_model_analysis.extractors import features_extractor
from tensorflow_model_analysis.extractors import labels_extractor
from tensorflow_model_analysis.extractors import legacy_input_extractor
from tensorflow_model_analysis.extractors import predictions_extractor
from tensorflow_model_analysis.extractors import unbatch_extractor
from tensorflow_model_analysis.metrics import metric_specs as metric_specs_util
from tensorflow_model_analysis.metrics import metric_types
from tensorflow_model_analysis.utils import model_util
from tensorflow_model_analysis.utils import util
import tfx
from tfx.benchmarks import benchmark_utils
from tfx.benchmarks import benchmark_base
from tfx_bsl.coders import example_coder
from tfx_bsl.tfxio import record_based_tfxio
from tfx_bsl.tfxio import test_util
# Maximum number of examples within a record batch.
_BATCH_SIZE = 1000
# Number of iterations.
_ITERS = 1
# TODO(b/147827582): Also add "TF-level" Keras benchmarks for how TFMAv2
# gets predictions / computes metrics.
class TFMAV2BenchmarkBase(benchmark_base.BenchmarkBase):
"""TFMA benchmark."""
def __init__(self, dataset, **kwargs):
# Benchmark runners may pass extraneous arguments we don't care about.
del kwargs
super().__init__()
self._dataset = dataset
def _init_model(self, multi_model, validation):
# The benchmark runner will instantiate this class twice - once to determine
# the benchmarks to run, and once to actually to run them. However, Keras
# freezes if we try to load the same model twice. As such, we have to pull
# the model loading out of the constructor into a separate method which we
# call before each benchmark.
if multi_model:
metric_specs = metric_specs_util.specs_from_metrics(
[tf.keras.metrics.AUC(name="auc", num_thresholds=10000)],
model_names=["candidate", "baseline"])
if validation:
# Only one metric, adding a threshold for all slices.
metric_specs[0].metrics[0].threshold.CopyFrom(
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.5}, upper_bound={"value": 0.5}),
change_threshold=tfma.GenericChangeThreshold(
absolute={"value": -0.001},
direction=tfma.MetricDirection.HIGHER_IS_BETTER)))
self._eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(name="candidate", label_key="tips"),
tfma.ModelSpec(
name="baseline", label_key="tips", is_baseline=True)
],
metrics_specs=metric_specs)
self._eval_shared_models = {
"candidate":
tfma.default_eval_shared_model(
self._dataset.trained_saved_model_path(),
eval_config=self._eval_config,
model_name="candidate"),
"baseline":
tfma.default_eval_shared_model(
self._dataset.trained_saved_model_path(),
eval_config=self._eval_config,
model_name="baseline")
}
else:
metric_specs = metric_specs_util.specs_from_metrics(
[tf.keras.metrics.AUC(name="auc", num_thresholds=10000)])
if validation:
# Only one metric, adding a threshold for all slices.
metric_specs[0].metrics[0].threshold.CopyFrom(
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={"value": 0.5}, upper_bound={"value": 0.5})))
self._eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(label_key="tips")],
metrics_specs=metric_specs)
self._eval_shared_models = {
"":
tfma.default_eval_shared_model(
self._dataset.trained_saved_model_path(),
eval_config=self._eval_config)
}
def _max_num_examples(self):
# TFMA is slower than TFT, so use a smaller number of examples from the
# dataset.
limit = 100000
parent_max = super()._max_num_examples()
if parent_max is None:
return limit
return min(parent_max, limit)
def report_benchmark(self, **kwargs):
if "extras" not in kwargs:
kwargs["extras"] = {}
# Note that the GIT_COMMIT_ID is not included in the packages themselves:
# it must be injected by an external script.
kwargs["extras"]["commit_tfx"] = (
getattr(tfx, "GIT_COMMIT_ID", None) or
getattr(tfx, "__version__", None))
kwargs["extras"]["commit_tfma"] = (
getattr(tfma, "GIT_COMMIT_ID", None) or
getattr(tfma, "__version__", None))
# Stdout for use in tools which read the benchmark results from stdout.
print(self._get_name(), kwargs["wall_time"],
"({}x)".format(kwargs["iters"]))
super().report_benchmark(**kwargs)
def _runMiniPipeline(self, multi_model):
"""Benchmark a "mini" TFMA - predict, slice and compute metrics.
Runs a "mini" version of TFMA in a Beam pipeline. Records the wall time
taken for the whole pipeline.
Args:
multi_model: True if multiple models should be used in the benchmark.
"""
self._init_model(multi_model, validation=False)
pipeline = self._create_beam_pipeline()
tfx_io = test_util.InMemoryTFExampleRecord(
schema=benchmark_utils.read_schema(
self._dataset.tf_metadata_schema_path()),
raw_record_column_name=constants.ARROW_INPUT_COLUMN)
raw_data = (
pipeline
| "Examples" >> beam.Create(
self._dataset.read_raw_dataset(
deserialize=False, limit=self._max_num_examples()))
| "BatchExamples" >> tfx_io.BeamSource()
| "InputsToExtracts" >> tfma.BatchedInputsToExtracts())
def rescale_labels(extracts):
# Transform labels to [0, 1] so we can test metrics that require labels in
# that range.
result = copy.copy(extracts)
result[constants.LABELS_KEY] = self._transform_labels(
extracts[constants.LABELS_KEY])
return result
_ = (
raw_data
| "FeaturesExtractor" >> features_extractor.FeaturesExtractor(
eval_config=self._eval_config).ptransform
| "LabelsExtractor" >> labels_extractor.LabelsExtractor(
eval_config=self._eval_config).ptransform
| "RescaleLabels" >> beam.Map(rescale_labels)
| "ExampleWeightsExtractor" >> example_weights_extractor
.ExampleWeightsExtractor(eval_config=self._eval_config).ptransform
| "PredictionsExtractor" >> predictions_extractor.PredictionsExtractor(
eval_config=self._eval_config,
eval_shared_model=self._eval_shared_models).ptransform
| "UnbatchExtractor" >> unbatch_extractor.UnbatchExtractor().ptransform
| "SliceKeyExtractor" >> tfma.extractors.SliceKeyExtractor().ptransform
| "ComputeMetricsPlotsAndValidations" >>
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=self._eval_config,
eval_shared_model=self._eval_shared_models).ptransform)
start = time.time()
for _ in range(_ITERS):
result = pipeline.run()
result.wait_until_finish()
end = time.time()
delta = end - start
self.report_benchmark(
iters=_ITERS,
wall_time=delta,
extras={
"num_examples":
self._dataset.num_examples(limit=self._max_num_examples())
})
def benchmarkMiniPipeline(self):
self._runMiniPipeline(False)
def benchmarkMiniPipelineMultiModel(self):
self._runMiniPipeline(True)
def _readDatasetIntoExtracts(self):
"""Read the raw dataset and massage examples into Extracts."""
records = []
for x in self._dataset.read_raw_dataset(
deserialize=False, limit=self._max_num_examples()):
records.append({tfma.INPUT_KEY: x, tfma.SLICE_KEY_TYPES_KEY: ()})
return records
def _readDatasetIntoBatchedExtracts(self):
"""Read the raw dataset and massage examples into batched Extracts."""
serialized_examples = list(
self._dataset.read_raw_dataset(
deserialize=False, limit=self._max_num_examples()))
# TODO(b/153996019): Once the TFXIO interface that returns an iterator of
# RecordBatch is available, clean this up.
coder = example_coder.ExamplesToRecordBatchDecoder(
serialized_schema=benchmark_utils.read_schema(
self._dataset.tf_metadata_schema_path()).SerializeToString())
batches = []
for i in range(0, len(serialized_examples), _BATCH_SIZE):
example_batch = serialized_examples[i:i + _BATCH_SIZE]
record_batch = record_based_tfxio.AppendRawRecordColumn(
coder.DecodeBatch(example_batch), constants.ARROW_INPUT_COLUMN,
example_batch)
batches.append({constants.ARROW_RECORD_BATCH_KEY: record_batch})
return batches
def _transform_labels(self, labels):
# Transform labels to [0, 1] so we can test metrics that require labels in
# that range.
if len(self._eval_config.model_specs) > 1:
updated_labels = {}
for s in self._eval_config.model_specs:
updated_labels[s.name] = np.array(
[1.0 / (1.0 + x) for x in labels[s.name]])
return updated_labels
else:
return np.array([1.0 / (1.0 + x) for x in labels])
def _extract_features_and_labels(self, batched_extract):
"""Extract features from extracts containing arrow table."""
# This function is a combination of
# _ExtractFeatures.extract_features in extractors/features_extractor.py
# and _ExtractLabels.extract_labels in extractors/labels_extractor.py
result = copy.copy(batched_extract)
(record_batch, serialized_examples) = (
features_extractor._drop_unsupported_columns_and_fetch_raw_data_column( # pylint: disable=protected-access
batched_extract[constants.ARROW_RECORD_BATCH_KEY]))
features = result[
constants.FEATURES_KEY] if constants.FEATURES_KEY in result else {}
features.update(util.record_batch_to_tensor_values(record_batch))
result[constants.FEATURES_KEY] = features
result[constants.INPUT_KEY] = serialized_examples
labels = (
model_util.get_feature_values_for_model_spec_field(
list(self._eval_config.model_specs), "label_key", "label_keys",
result, True))
result[constants.LABELS_KEY] = self._transform_labels(labels)
return result
def _runInputExtractorManualActuation(self, multi_model):
"""Benchmark InputExtractor "manually"."""
self._init_model(multi_model, validation=False)
records = self._readDatasetIntoExtracts()
extracts = []
start = time.time()
for _ in range(_ITERS):
for elem in records:
extracts.append(
legacy_input_extractor._ParseExample(elem, self._eval_config)) # pylint: disable=protected-access
end = time.time()
delta = end - start
self.report_benchmark(
iters=_ITERS, wall_time=delta, extras={"num_examples": len(records)})
# "Manual" micro-benchmarks
def benchmarkInputExtractorManualActuation(self):
self._runInputExtractorManualActuation(False)
# "Manual" micro-benchmarks
def benchmarkInputExtractorManualActuationMultiModel(self):
self._runInputExtractorManualActuation(True)
def _runFeaturesExtractorManualActuation(self, multi_model):
"""Benchmark FeaturesExtractor "manually"."""
self._init_model(multi_model, validation=False)
extracts = self._readDatasetIntoBatchedExtracts()
num_examples = sum(
[e[constants.ARROW_RECORD_BATCH_KEY].num_rows for e in extracts])
result = []
start = time.time()
for _ in range(_ITERS):
for e in extracts:
result.append(self._extract_features_and_labels(e))
end = time.time()
delta = end - start
self.report_benchmark(
iters=_ITERS, wall_time=delta, extras={"num_examples": num_examples})
# "Manual" micro-benchmarks
def benchmarkFeaturesExtractorManualActuation(self):
self._runFeaturesExtractorManualActuation(False)
# "Manual" micro-benchmarks
def benchmarkFeaturesExtractorManualActuationMultiModel(self):
self._runFeaturesExtractorManualActuation(True)
def _runPredictionsExtractorManualActuation(self, multi_model):
"""Benchmark PredictionsExtractor "manually"."""
self._init_model(multi_model, validation=False)
extracts = self._readDatasetIntoBatchedExtracts()
num_examples = sum(
[e[constants.ARROW_RECORD_BATCH_KEY].num_rows for e in extracts])
extracts = [self._extract_features_and_labels(e) for e in extracts]
prediction_do_fn = model_util.ModelSignaturesDoFn(
eval_config=self._eval_config,
eval_shared_models=self._eval_shared_models,
signature_names={
constants.PREDICTIONS_KEY: {
name: [None] for name in self._eval_shared_models
}
},
prefer_dict_outputs=False)
prediction_do_fn.setup()
start = time.time()
for _ in range(_ITERS):
predict_result = []
for e in extracts:
predict_result.extend(prediction_do_fn.process(e))
end = time.time()
delta = end - start
self.report_benchmark(
iters=_ITERS, wall_time=delta, extras={"num_examples": num_examples})
# "Manual" micro-benchmarks
def benchmarkPredictionsExtractorManualActuation(self):
self._runPredictionsExtractorManualActuation(False)
# "Manual" micro-benchmarks
def benchmarkPredictionsExtractorManualActuationMultiModel(self):
self._runPredictionsExtractorManualActuation(True)
def _runMetricsPlotsAndValidationsEvaluatorManualActuation(
self,
with_confidence_intervals,
multi_model,
metrics_specs=None,
validation=False):
"""Benchmark MetricsPlotsAndValidationsEvaluator "manually"."""
self._init_model(multi_model, validation)
if not metrics_specs:
metrics_specs = self._eval_config.metrics_specs
extracts = self._readDatasetIntoBatchedExtracts()
num_examples = sum(
[e[constants.ARROW_RECORD_BATCH_KEY].num_rows for e in extracts])
extracts = [self._extract_features_and_labels(e) for e in extracts]
prediction_do_fn = model_util.ModelSignaturesDoFn(
eval_config=self._eval_config,
eval_shared_models=self._eval_shared_models,
signature_names={
constants.PREDICTIONS_KEY: {
name: [None] for name in self._eval_shared_models
}
},
prefer_dict_outputs=False)
prediction_do_fn.setup()
# Have to predict first
predict_result = []
for e in extracts:
predict_result.extend(prediction_do_fn.process(e))
# Unbatch extracts
unbatched_extracts = []
for e in predict_result:
unbatched_extracts.extend(unbatch_extractor._extract_unbatched_inputs(e)) # pylint: disable=protected-access
# Add global slice key.
for e in unbatched_extracts:
e[tfma.SLICE_KEY_TYPES_KEY] = ()
# Now Evaluate
inputs_per_accumulator = 1000
start = time.time()
for _ in range(_ITERS):
computations, _, _, _ = (
# pylint: disable=protected-access
metrics_plots_and_validations_evaluator
._filter_and_separate_computations(
metric_specs_util.to_computations(
metrics_specs, eval_config=self._eval_config)))
# pylint: enable=protected-access
processed = []
for elem in unbatched_extracts:
processed.append(
next(
metrics_plots_and_validations_evaluator._PreprocessorDoFn( # pylint: disable=protected-access
computations).process(elem)))
combiner = metrics_plots_and_validations_evaluator._ComputationsCombineFn( # pylint: disable=protected-access
computations=computations)
if with_confidence_intervals:
combiner = poisson_bootstrap._BootstrapCombineFn(combiner) # pylint: disable=protected-access
combiner.setup()
accumulators = []
for batch in benchmark_utils.batched_iterator(processed,
inputs_per_accumulator):
accumulator = combiner.create_accumulator()
for elem in batch:
accumulator = combiner.add_input(accumulator, elem)
accumulators.append(accumulator)
final_accumulator = combiner.merge_accumulators(accumulators)
final_output = combiner.extract_output(final_accumulator)
end = time.time()
delta = end - start
# Sanity check the example count. This is not timed.
example_count_key = metric_types.MetricKey(
name="example_count", model_name="candidate" if multi_model else "")
if example_count_key in final_output:
example_count = final_output[example_count_key]
else:
raise ValueError("example_count_key ({}) was not in the final list of "
"metrics. metrics were: {}".format(
example_count_key, final_output))
if with_confidence_intervals:
# If we're computing using confidence intervals, the example count will
# not be exact.
lower_bound = int(0.9 * num_examples)
upper_bound = int(1.1 * num_examples)
if example_count < lower_bound or example_count > upper_bound:
raise ValueError("example count out of bounds: expecting "
"%d < example_count < %d, but got %d" %
(lower_bound, upper_bound, example_count))
else:
# If we're not using confidence intervals, we expect the example count to
# be exact.
if example_count != num_examples:
raise ValueError("example count mismatch: expecting %d got %d" %
(num_examples, example_count))
self.report_benchmark(
iters=_ITERS,
wall_time=delta,
extras={
"inputs_per_accumulator": inputs_per_accumulator,
"num_examples": num_examples
})
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorManualActuationNoConfidenceIntervals(
self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False, multi_model=False, validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorManualActuationNoConfidenceIntervalsMultiModel(
self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False, multi_model=True, validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorManualActuationWithConfidenceIntervals(
self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=True, multi_model=False, validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorManualActuationWithConfidenceIntervalsMultiModel(
self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=True, multi_model=True, validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorAUC10k(self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False,
multi_model=False,
metrics_specs=metric_specs_util.specs_from_metrics([
tf.keras.metrics.AUC(name="auc", num_thresholds=10000),
]),
validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorAUC10kMultiModel(self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False,
multi_model=True,
metrics_specs=metric_specs_util.specs_from_metrics(
[
tf.keras.metrics.AUC(name="auc", num_thresholds=10000),
],
model_names=["candidate", "baseline"]),
validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorBinaryClassification(self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False,
multi_model=False,
metrics_specs=metric_specs_util.specs_from_metrics([
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.AUC(name="auc", num_thresholds=10000),
tf.keras.metrics.AUC(
name="auc_precison_recall", curve="PR", num_thresholds=10000),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
tfma.metrics.MeanLabel(name="mean_label"),
tfma.metrics.MeanPrediction(name="mean_prediction"),
tfma.metrics.Calibration(name="calibration"),
tfma.metrics.ConfusionMatrixPlot(name="confusion_matrix_plot"),
tfma.metrics.CalibrationPlot(name="calibration_plot"),
]),
validation=True)
# "Manual" micro-benchmarks
def benchmarkMetricsPlotsAndValidationsEvaluatorBinaryClassificationMultiModel(
self):
self._runMetricsPlotsAndValidationsEvaluatorManualActuation(
with_confidence_intervals=False,
multi_model=True,
metrics_specs=metric_specs_util.specs_from_metrics([
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.AUC(name="auc", num_thresholds=10000),
tf.keras.metrics.AUC(
name="auc_precison_recall", curve="PR", num_thresholds=10000),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
tfma.metrics.MeanLabel(name="mean_label"),
tfma.metrics.MeanPrediction(name="mean_prediction"),
tfma.metrics.Calibration(name="calibration"),
tfma.metrics.ConfusionMatrixPlot(name="confusion_matrix_plot"),
tfma.metrics.CalibrationPlot(name="calibration_plot"),
],
model_names=[
"candidate",
"baseline"
]),
validation=True)
|
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Libs
from salt.states import win_dism as dism
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch
)
ensure_in_syspath('../../')
dism.__salt__ = {}
dism.__opts__ = {}
class WinDismTestCase(TestCase):
def test_capability_installed(self):
'''
Test capability installed state
'''
expected = {
'comment': "Installed Capa2",
'changes': {'capability': {'new': 'Capa2'},
'retcode': 0},
'name': 'Capa2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Capa1'], ['Capa1', 'Capa2']])
mock_add = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Capa2', 'somewhere', True, None, False)
self.assertEqual(out, expected)
def test_capability_installed_failure(self):
'''
Test installing a capability which fails with DISM
'''
expected = {
'comment': "Failed to install Capa2: Failed",
'changes': {},
'name': 'Capa2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Capa1'], ['Capa1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Capa2', 'somewhere', True, None, False)
self.assertEqual(out, expected)
def test_capability_installed_installed(self):
'''
Test installing a capability already installed
'''
expected = {
'comment': "The capability Capa2 is already installed",
'changes': {},
'name': 'Capa2',
'result': True}
mock_installed = MagicMock(
return_value=["Capa1", "Capa2"])
mock_add = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_installed,
'dism.add_capability': mock_add}):
out = dism.capability_installed('Capa2', 'somewhere', True)
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_capability_removed(self):
'''
Test capability removed state
'''
expected = {
'comment': "Removed Capa2",
'changes': {'capability': {'old': 'Capa2'},
'retcode': 0},
'name': 'Capa2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Capa1', 'Capa2'], ['Capa1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.remove_capability': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_removed('Capa2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with('Capa2', None, False)
self.assertEqual(out, expected)
def test_capability_removed_failure(self):
'''
Test removing a capability which fails with DISM
'''
expected = {
'comment': "Failed to remove Capa2: Failed",
'changes': {},
'name': 'Capa2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Capa1', 'Capa2'], ['Capa1', 'Capa2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.remove_capability': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.capability_removed('Capa2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Capa2', None, False)
self.assertEqual(out, expected)
def test_capability_removed_removed(self):
'''
Test removing a capability already removed
'''
expected = {
'comment': "The capability Capa2 is already removed",
'changes': {},
'name': 'Capa2',
'result': True}
mock_removed = MagicMock(
return_value=["Capa1"])
mock_remove = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_capabilities': mock_removed,
'dism.add_capability': mock_remove}):
out = dism.capability_removed('Capa2', 'somewhere', True)
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
def test_feature_installed(self):
'''
Test installing a feature with DISM
'''
expected = {
'comment': "Installed Feat2",
'changes': {'feature': {'new': 'Feat2'},
'retcode': 0},
'name': 'Feat2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Feat1'], ['Feat1', 'Feat2']])
mock_add = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_installed('Feat2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Feat2', None, None, False, False, None, False)
self.assertEqual(out, expected)
def test_feature_installed_failure(self):
'''
Test installing a feature which fails with DISM
'''
expected = {
'comment': "Failed to install Feat2: Failed",
'changes': {},
'name': 'Feat2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Feat1'], ['Feat1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_installed('Feat2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Feat2', None, None, False, False, None, False)
self.assertEqual(out, expected)
def test_feature_installed_installed(self):
'''
Test installing a feature already installed
'''
expected = {
'comment': "The feature Feat1 is already installed",
'changes': {},
'name': 'Feat1',
'result': True}
mock_installed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1', 'Feat2']])
mock_add = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_installed,
'dism.add_feature': mock_add}):
out = dism.feature_installed('Feat1')
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_feature_removed(self):
'''
Test removing a feature with DISM
'''
expected = {
'comment': "Removed Feat2",
'changes': {'feature': {'old': 'Feat2'},
'retcode': 0},
'name': 'Feat2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Feat2', False, None, False)
self.assertEqual(out, expected)
def test_feature_removed_failure(self):
'''
Test removing a feature which fails with DISM
'''
expected = {
'comment': "Failed to remove Feat2: Failed",
'changes': {},
'name': 'Feat2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Feat1', 'Feat2'], ['Feat1', 'Feat2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Feat2', False, None, False)
self.assertEqual(out, expected)
def test_feature_removed_removed(self):
'''
Test removing a feature already removed
'''
expected = {
'comment': "The feature Feat2 is already removed",
'changes': {},
'name': 'Feat2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Feat1'], ['Feat1']])
mock_remove = MagicMock()
with patch.dict(
dism.__salt__, {'dism.installed_features': mock_removed,
'dism.remove_feature': mock_remove}):
out = dism.feature_removed('Feat2')
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
def test_package_installed(self):
'''
Test installing a package with DISM
'''
expected = {
'comment': "Installed Pack2",
'changes': {'package': {'new': 'Pack2'},
'retcode': 0},
'name': 'Pack2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Pack1'], ['Pack1', 'Pack2']])
mock_add = MagicMock(
return_value={'retcode': 0})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_failure(self):
'''
Test installing a package which fails with DISM
'''
expected = {
'comment': "Failed to install Pack2: Failed",
'changes': {},
'name': 'Pack2',
'result': False}
mock_installed = MagicMock(
side_effect=[['Pack1'], ['Pack1']])
mock_add = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_with()
mock_add.assert_called_once_with(
'Pack2', False, False, None, False)
self.assertEqual(out, expected)
def test_package_installed_installed(self):
'''
Test installing a package already installed
'''
expected = {
'comment': "The package Pack2 is already installed: Pack2",
'changes': {},
'name': 'Pack2',
'result': True}
mock_installed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1', 'Pack2']])
mock_add = MagicMock()
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_installed,
'dism.add_package': mock_add,
'dism.package_info': mock_info}):
out = dism.package_installed('Pack2')
mock_installed.assert_called_once_with()
assert not mock_add.called
self.assertEqual(out, expected)
def test_package_removed(self):
'''
Test removing a package with DISM
'''
expected = {
'comment': "Removed Pack2",
'changes': {'package': {'old': 'Pack2'},
'retcode': 0},
'name': 'Pack2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1']])
mock_remove = MagicMock(
return_value={'retcode': 0})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_failure(self):
'''
Test removing a package which fails with DISM
'''
expected = {
'comment': "Failed to remove Pack2: Failed",
'changes': {},
'name': 'Pack2',
'result': False}
mock_removed = MagicMock(
side_effect=[['Pack1', 'Pack2'], ['Pack1', 'Pack2']])
mock_remove = MagicMock(
return_value={'retcode': 67, 'stdout': 'Failed'})
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
with patch.dict(dism.__opts__, {'test': False}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_with()
mock_remove.assert_called_once_with(
'Pack2', None, False)
self.assertEqual(out, expected)
def test_package_removed_removed(self):
'''
Test removing a package already removed
'''
expected = {
'comment': "The package Pack2 is already removed",
'changes': {},
'name': 'Pack2',
'result': True}
mock_removed = MagicMock(
side_effect=[['Pack1'], ['Pack1']])
mock_remove = MagicMock()
mock_info = MagicMock(
return_value={'Package Identity': 'Pack2'})
with patch.dict(
dism.__salt__, {'dism.installed_packages': mock_removed,
'dism.remove_package': mock_remove,
'dism.package_info': mock_info}):
out = dism.package_removed('Pack2')
mock_removed.assert_called_once_with()
assert not mock_remove.called
self.assertEqual(out, expected)
if __name__ == '__main__':
from integration import run_tests
run_tests(WinDismTestCase, needs_daemon=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.