input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
#!/usr/bin/env python
""" ngc - n-grams count
License: 3-clause BSD (see https://opensource.org/licenses/BSD-3-Clause)
Author: <NAME>
"""
import getopt
import logging
import os
import re
import string
import sys
import unicode2ascii
# Version string used by the what(1) and ident(1) commands:
ID = "@(#) $Id: ngc - n-grams count v1.0.2 (September 26, 2021) by <NAME> $"
# Default parameters. Can be superseded by command line options
parameters = {
"Convert": {
"Unicode to ASCII": False,
"Upper to lower case": False,
"Lower to upper case": False,
"Spaces to one space": False,
},
"Discard": {
"Unicode characters": False,
"Upper case letters": False,
"Lower case letters": False,
"Connection symbols": False, # ' -
"Digits": False,
"Punctuation": False, # . , ; : ! ?
"Other printable symbols": False,
"Spaces": False, # space tab return formfeed vtab
"Control characters": False,
},
"Length": 1,
"Fixed block": False, # Sliding-window mode by default
"Word boundary": False,
"Partial": {
"Discard": False,
"Keep": True,
"Justify": False,
},
"Show": {
"Text": False,
"N-grams": True,
"Summary": False,
},
}
occurrences = {}
summary = {
"Upper case letters": 0,
"Lower case letters": 0,
"Connection symbols": 0,
"Digits": 0,
"Punctuation": 0,
"Other printable symbols": 0,
"Spaces": 0,
"Other spaces": 0,
"Control characters": 0,
"Unicode letters": 0,
"Unicode marks": 0,
"Unicode numbers": 0,
"Unicode punctuations": 0,
"Unicode symbols": 0,
"Unicode separators": 0,
"Unicode others": 0,
"All unicode characters": 0,
"All characters": 0,
"All n-grams": 0
}
################################################################################
def initialize_debugging(program_name):
"""Debugging set up"""
console_log_format = program_name + ": %(levelname)s: %(message)s"
logging.basicConfig(format=console_log_format, level=logging.DEBUG)
logging.disable(logging.INFO)
################################################################################
def display_help():
"""Displays usage and help"""
print("usage: ngc [-b|--block] [-c|--convert ARGS] [--debug]", file=sys.stderr)
print(" [-d|--discard ARGS] [--help|-?] [-l|--length ARG]", file=sys.stderr)
print(" [-p|--partial ARG] [-q|--quiet] [-s|--summary] [-t|--text]", file=sys.stderr)
print(" [--version] [-w|--word] [--] [filename ...]", file=sys.stderr)
print(" ----------------- ----------------------------------------------------",
file=sys.stderr
)
print(" -b|--block Use fixed- instead of sliding-windows blocks", file=sys.stderr)
print(" -c|--convert ARGS Convert text input. A combination of:", file=sys.stderr)
print(" ARG = a - Unicode characters to ASCII (remove accents)", file=sys.stderr)
print(" ARG = l - Upper case letters to lower", file=sys.stderr)
print(" ARG = u - Lower case letters to upper", file=sys.stderr)
print(" ARG = s - Spaces-like characters to 1 space", file=sys.stderr)
print(" ARGS l and u can't be used at the same time", file=sys.stderr)
print(" -d|--discard ARGS Discard characters. A combination of:", file=sys.stderr)
print(" ARG = U - Unicode characters", file=sys.stderr)
print(" ARG = u - Upper case letters", file=sys.stderr)
print(" ARG = l - Lower case letters", file=sys.stderr)
print(" ARG = L - All letters", file=sys.stderr)
print(" ARG = c - Connection symbols ('-)", file=sys.stderr)
print(" ARG = d - Digits", file=sys.stderr)
print(" ARG = p - Punctuation (.,;:!?)", file=sys.stderr)
print(" ARG = o - Other printable symbols", file=sys.stderr)
print(" ARG = s - Spaces (space, tab, return, formfeed, vtab)", file=sys.stderr)
print(" ARG = n - Non printable Control characters", file=sys.stderr)
print(" -l|--length ARG Length of the n-gram. Defaults to 1", file=sys.stderr)
print(" -p|--partial ARG What to do with partial blocks? One among:", file=sys.stderr)
print(" ARG = d - Discard", file=sys.stderr)
print(" ARG = k - Keep as-is", file=sys.stderr)
print(" ARG = j - Keep but right-justify with spaces", file=sys.stderr)
print(" -q|--quiet Don't show occurrences and frequency by n-gram", file=sys.stderr)
print(" -s|--summary Show a summary of what was processed", file=sys.stderr)
print(" -t|--text Show modified text input", file=sys.stderr)
print(" -w|--word Respect Word boundaries (delimited by spaces)", file=sys.stderr)
print(" --debug Enable debug mode", file=sys.stderr)
print(" --help|-? Print usage and this help message and exit", file=sys.stderr)
print(" --version Print version and exit", file=sys.stderr)
print(" -- Options processing terminator", file=sys.stderr)
print(file=sys.stderr)
################################################################################
def process_environment_variables():
"""Process environment variables"""
if "NGC_DEBUG" in os.environ.keys():
logging.disable(logging.NOTSET)
################################################################################
def process_command_line():
"""Process command line"""
# pylint: disable=C0103
global parameters
# pylint: enable=C0103
# option letters followed by : expect an argument
# same for option strings followed by =
character_options = "bc:d:l:p:qstw?"
string_options = [
"block",
"convert=",
"debug",
"discard=",
"help",
"length=",
"partial=",
"quiet",
"summary",
"text",
"version",
"word",
]
try:
options, remaining_arguments = getopt.getopt(
sys.argv[1:], character_options, string_options
)
except getopt.GetoptError as error:
logging.critical(error)
display_help()
sys.exit(1)
for option, argument in options:
if option in ("-b", "--block"):
parameters["Fixed block"] = True
elif option in ("-c", "--convert"):
if 'l' in argument and 'u' in argument:
logging.critical("-c|--convert parameter can't contain [lu] at the same time")
sys.exit(1)
if 'a' in argument:
parameters["Convert"]["Unicode to ASCII"] = True
if 'l' in argument:
parameters["Convert"]["Upper to lower case"] = True
if 'u' in argument:
parameters["Convert"]["Lower to upper case"] = True
if 's' in argument:
parameters["Convert"]["Spaces to one space"] = True
elif option in ("-d", "--discard"):
if 'U' in argument:
parameters["Discard"]["Unicode characters"] = True
if 'u' in argument:
parameters["Discard"]["Upper case letters"] = True
if 'l' in argument:
parameters["Discard"]["Lower case letters"] = True
if 'L' in argument:
parameters["Discard"]["Upper case letters"] = True
parameters["Discard"]["Lower case letters"] = True
if 'c' in argument:
parameters["Discard"]["Connection symbols"] = True
if 'd' in argument:
parameters["Discard"]["Digits"] = True
if 'p' in argument:
parameters["Discard"]["Punctuation"] = True
if 'o' in argument:
parameters["Discard"]["Other printable symbols"] = True
if 's' in argument:
parameters["Discard"]["Spaces"] = True
if 'n' in argument:
parameters["Discard"]["Control characters"] = True
elif option in ("-l", "--length"):
if argument.isdigit() and int(argument) >= 0:
parameters["Length"] = int(argument)
else:
logging.critical("-l|--length parameter must be a strictly positive integer")
sys.exit(1)
elif option in ("-p", "--partial"):
if len(argument) > 1 or argument not in ('d', 'k', 'j'):
logging.critical("-p|--partial parameter must be a single character among [dkj]")
sys.exit(1)
if argument == 'd':
parameters["Partial"]["Discard"] = True
parameters["Partial"]["Keep"] = False
elif argument == 'j':
parameters["Partial"]["Justify"] = True
parameters["Partial"]["Keep"] = False
elif option in ("-q", "--quiet"):
parameters["Show"]["N-grams"] = False
elif option in ("-s", "--summary"):
parameters["Show"]["Summary"] = True
elif option in ("-t", "--text"):
parameters["Show"]["Text"] = True
elif option in ("-w", "--word"):
parameters["Word boundary"] = True
elif option == "--debug":
logging.disable(logging.NOTSET)
elif option in ("--help", "-?"):
display_help()
sys.exit(0)
elif option == "--version":
print(ID.replace("@(" + "#)" + " $" + "Id" + ": ", "").replace(" $", ""))
sys.exit(0)
logging.debug("process_command_line(): parameters:")
logging.debug(parameters)
logging.debug("process_command_line(): remaining_arguments:")
logging.debug(remaining_arguments)
return remaining_arguments
################################################################################
def handle_partial_n_gram(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if not parameters["Partial"]["Discard"]:
if parameters["Partial"]["Justify"]:
for _ in range(parameters["Length"] - len(text)):
text += " "
if text in occurrences:
occurrences[text] += 1
else:
occurrences[text] = 1
summary["All n-grams"] += 1
################################################################################
def frequency_analysis(text):
"""Analyze n-grams frequency in a string"""
# pylint: disable=C0103
global occurrences, summary
# pylint: enable=C0103
if parameters["Show"]["Summary"]:
for character in text:
if ord(character) < 128:
if character in string.ascii_uppercase:
summary["Upper case letters"] += 1
elif character in string.ascii_lowercase:
summary["Lower case letters"] += 1
elif character in ("'", "-"):
summary["Connection symbols"] += 1
elif character in string.digits:
summary["Digits"] += 1
elif character in (".", ",", ";", ":", "!", "?"):
summary["Punctuation"] += 1
elif character == " ":
summary["Spaces"] += 1
elif character in string.whitespace:
summary["Other spaces"] += 1
elif (ord(character) < 32 and ord(character) not in (9, 11, 12, 13)) \
or ord(character) == 127:
summary["Control characters"] += 1
else:
summary["Other printable symbols"] += 1
else:
summary["All unicode characters"] += 1
if unicode2ascii.is_unicode_letter(character):
summary["Unicode letters"] += 1
elif unicode2ascii.is_unicode_mark(character):
summary["Unicode marks"] += 1
elif unicode2ascii.is_unicode_number(character):
summary["Unicode numbers"] += 1
elif unicode2ascii.is_unicode_punctuation(character):
summary["Unicode punctuations"] += 1
elif unicode2ascii.is_unicode_symbol(character):
summary["Unicode symbols"] += 1
elif unicode2ascii.is_unicode_separator(character):
summary["Unicode separators"] += 1
else:
summary["Unicode others"] += 1
if len(text) <= parameters["Length"]:
if text:
handle_partial_n_gram(text)
else:
i = 0
while i < len(text) + 1 - parameters["Length"]:
sequence = text[i:i + parameters["Length"]]
if sequence in occurrences:
occurrences[sequence] += 1
else:
occurrences[sequence] = 1
summary["All n-grams"] += 1
if parameters["Fixed block"]:
i += parameters["Length"]
else:
i += 1
if i < len(text):
handle_partial_n_gram(text[i:])
################################################################################
def process_line(line):
"""Process a text line"""
# pylint: disable=C0103
global summary
# pylint: enable=C0103
line = line.rstrip(os.linesep)
# Conversions:
if parameters["Convert"]["Unicode to ASCII"]:
line = unicode2ascii.unicode_to_ascii_string(line)
if parameters["Convert"]["Upper to lower case"]:
line = line.lower()
if parameters["Convert"]["Lower to upper case"]:
line = line.upper()
# Discards:
if parameters["Discard"]["Unicode characters"]:
line = "".join([c for c in line if ord(c) < 128])
if parameters["Discard"]["Upper case letters"]:
line = re.sub(r"[A-Z]+", "", line)
if parameters["Discard"]["Lower case letters"]:
line = re.sub(r"[a-z]+", "", line)
if parameters["Discard"]["Connection symbols"]:
| |
<reponame>vovanz/invoke
import json
import os
import sys
from invoke.util import six
from mock import patch, Mock, ANY
import pytest
from pytest import skip
from pytest_relaxed import trap
from invoke import (
Program, Collection, Task, FilesystemLoader, Executor, Config,
UnexpectedExit, Result,
)
from invoke import main
from invoke.util import cd
from _util import (
load, expect, skip_if_windows, run, support_file, support_path,
)
ROOT = os.path.abspath(os.path.sep)
pytestmark = pytest.mark.usefixtures("integration")
class Program_:
class init:
"__init__"
def may_specify_version(self):
assert Program(version='1.2.3').version == '1.2.3'
def default_version_is_unknown(self):
assert Program().version == 'unknown'
def may_specify_namespace(self):
foo = load('foo')
assert Program(namespace=foo).namespace is foo
def may_specify_name(self):
assert Program(name='Myapp').name == 'Myapp'
def may_specify_binary(self):
assert Program(binary='myapp').binary == 'myapp'
def loader_class_defaults_to_FilesystemLoader(self):
assert Program().loader_class is FilesystemLoader
def may_specify_loader_class(self):
klass = object()
assert Program(loader_class=klass).loader_class == klass
def executor_class_defaults_to_Executor(self):
assert Program().executor_class is Executor
def may_specify_executor_class(self):
klass = object()
assert Program(executor_class=klass).executor_class == klass
def config_class_defaults_to_Config(self):
assert Program().config_class is Config
def may_specify_config_class(self):
klass = object()
assert Program(config_class=klass).config_class == klass
class miscellaneous:
"miscellaneous behaviors"
def debug_flag_activates_logging(self):
# Have to patch our logger to get in before logcapture kicks in.
with patch('invoke.util.debug') as debug:
Program().run("invoke -d -c debugging foo")
debug.assert_called_with('my-sentinel')
def bytecode_skipped_by_default(self):
expect('-c foo mytask')
assert sys.dont_write_bytecode
def write_pyc_explicitly_enables_bytecode_writing(self):
expect('--write-pyc -c foo mytask')
assert not sys.dont_write_bytecode
class normalize_argv:
@patch('invoke.program.sys')
def defaults_to_sys_argv(self, mock_sys):
argv = ['inv', '--version']
mock_sys.argv = argv
p = Program()
p.print_version = Mock()
p.run(exit=False)
p.print_version.assert_called()
def uses_a_list_unaltered(self):
p = Program()
p.print_version = Mock()
p.run(['inv', '--version'], exit=False)
p.print_version.assert_called()
def splits_a_string(self):
p = Program()
p.print_version = Mock()
p.run("inv --version", exit=False)
p.print_version.assert_called()
class name:
def defaults_to_capitalized_binary_when_None(self):
expect("myapp --version", out="Myapp unknown\n", invoke=False)
def benefits_from_binary_absolute_behavior(self):
"benefits from binary()'s absolute path behavior"
expect("/usr/local/bin/myapp --version", out="Myapp unknown\n",
invoke=False)
def uses_overridden_value_when_given(self):
p = Program(name='NotInvoke')
expect("--version", out="NotInvoke unknown\n", program=p)
class binary:
def defaults_to_argv_when_None(self):
stdout, _ = run("myapp --help", invoke=False)
assert "myapp [--core-opts]" in stdout
def uses_overridden_value_when_given(self):
stdout, _ = run(
"myapp --help", invoke=False, program=Program(binary='nope'),
)
assert "nope [--core-opts]" in stdout
@trap
def use_binary_basename_when_invoked_absolutely(self):
Program().run("/usr/local/bin/myapp --help", exit=False)
stdout = sys.stdout.getvalue()
assert "myapp [--core-opts]" in stdout
assert "/usr/local/bin" not in stdout
class print_version:
def displays_name_and_version(self):
expect(
"--version",
program=Program(name="MyProgram", version='0.1.0'),
out="MyProgram 0.1.0\n"
)
class initial_context:
def contains_truly_core_arguments_regardless_of_namespace_value(self):
# Spot check. See integration-style --help tests for full argument
# checkup.
for program in (Program(), Program(namespace=Collection())):
for arg in ('--complete', '--debug', '--warn-only', '--list'):
stdout, _ = run("--help", program=program)
assert arg in stdout
def null_namespace_triggers_task_related_args(self):
program = Program(namespace=None)
for arg in program.task_args():
stdout, _ = run("--help", program=program)
assert arg.name in stdout
def non_null_namespace_does_not_trigger_task_related_args(self):
for arg in Program().task_args():
program = Program(namespace=Collection(mytask=Task(Mock())))
stdout, _ = run("--help", program=program)
assert arg.name not in stdout
class load_collection:
def complains_when_default_collection_not_found(self):
# NOTE: assumes system under test has no tasks.py in root. Meh.
with cd(ROOT):
expect("-l", err="Can't find any collection named 'tasks'!\n")
def complains_when_explicit_collection_not_found(self):
expect(
"-c huhwhat -l",
err="Can't find any collection named 'huhwhat'!\n",
)
@trap
def uses_loader_class_given(self):
klass = Mock(side_effect=FilesystemLoader)
Program(loader_class=klass).run("myapp --help foo", exit=False)
klass.assert_called_with(start=ANY, config=ANY)
class execute:
def uses_executor_class_given(self):
klass = Mock()
Program(executor_class=klass).run("myapp foo", exit=False)
klass.assert_called_with(ANY, ANY, ANY)
klass.return_value.execute.assert_called_with(ANY)
def executor_is_given_access_to_core_args_and_remainder(self):
klass = Mock()
cmd = "myapp -e foo -- myremainder"
Program(executor_class=klass).run(cmd, exit=False)
core = klass.call_args[0][2]
assert core[0].args['echo'].value
assert core.remainder == "myremainder"
class core_args:
def returns_core_args_list(self):
# Mostly so we encode explicity doc'd public API member in tests.
# Spot checks good enough, --help tests include the full deal.
core_args = Program().core_args()
core_arg_names = [x.names[0] for x in core_args]
for name in ('complete', 'help', 'pty', 'version'):
assert name in core_arg_names
# Also make sure it's a list for easier tweaking/appending
assert isinstance(core_args, list)
class run:
# NOTE: some of these are integration-style tests, but they are still
# fast tests (so not needing to go into the integration suite) and
# touch on transformations to the command line that occur above, or
# around, the actual parser classes/methods (thus not being suitable
# for the parser's own unit tests).
def seeks_and_loads_tasks_module_by_default(self):
expect('foo', out="Hm\n")
def does_not_seek_tasks_module_if_namespace_was_given(self):
expect(
'foo',
err="No idea what 'foo' is!\n",
program=Program(namespace=Collection('blank'))
)
def explicit_namespace_works_correctly(self):
# Regression-ish test re #288
ns = Collection.from_module(load('integration'))
expect(
'print-foo',
out='foo\n',
program=Program(namespace=ns),
)
def allows_explicit_task_module_specification(self):
expect("-c integration print-foo", out="foo\n")
def handles_task_arguments(self):
expect("-c integration print-name --name inigo", out="inigo\n")
def can_change_collection_search_root(self):
for flag in ('-r', '--search-root'):
expect(
"{} branch/ alt-root".format(flag),
out="Down with the alt-root!\n",
)
def can_change_collection_search_root_with_explicit_module_name(self):
for flag in ('-r', '--search-root'):
expect(
"{} branch/ -c explicit lyrics".format(flag),
out="Don't swear!\n",
)
@trap
@patch('invoke.program.sys.exit')
def ParseErrors_display_message_and_exit_1(self, mock_exit):
p = Program()
# Run with a definitely-parser-angering incorrect input; the fact
# that this line doesn't raise an exception and thus fail the
# test, is what we're testing...
nah = 'nopenotvalidsorry'
p.run("myapp {}".format(nah))
# Expect that we did print the core body of the ParseError (e.g.
# "no idea what foo is!") and exit 1. (Intent is to display that
# info w/o a full traceback, basically.)
stderr = sys.stderr.getvalue()
assert stderr == "No idea what '{}' is!\n".format(nah)
mock_exit.assert_called_with(1)
@trap
@patch('invoke.program.sys.exit')
def UnexpectedExit_exits_with_code_when_no_hiding(self, mock_exit):
p = Program()
oops = UnexpectedExit(Result(
command='meh',
exited=17,
hide=tuple(),
))
p.execute = Mock(side_effect=oops)
p.run("myapp foo")
# Expect NO repr printed, because stdout/err were not hidden, so we
# don't want to add extra annoying verbosity - we want to be more
# Make-like here.
assert sys.stderr.getvalue() == ""
# But we still exit with expected code (vs e.g. 1 or 0)
mock_exit.assert_called_with(17)
@trap
@patch('invoke.program.sys.exit')
def shows_UnexpectedExit_str_when_streams_hidden(self, mock_exit):
p = Program()
oops = UnexpectedExit(Result(
command='meh',
exited=54,
stdout='things!',
stderr='ohnoz!',
encoding='utf-8',
hide=('stdout', 'stderr'),
))
p.execute = Mock(side_effect=oops)
p.run("myapp foo")
# Expect repr() of exception prints to stderr
# NOTE: this partially duplicates a test in runners.py; whatever.
stderr = sys.stderr.getvalue()
assert stderr == """Encountered a bad command exit code!
Command: 'meh'
Exit code: 54
Stdout:
things!
Stderr:
ohnoz!
"""
# And exit with expected code (vs e.g. 1 or 0)
mock_exit.assert_called_with(54)
@trap
@patch('invoke.program.sys.exit')
def UnexpectedExit_str_encodes_stdout_and_err(self, mock_exit):
p = Program()
oops = UnexpectedExit(Result(
command='meh',
exited=54,
stdout=u'this is not ascii: \u1234',
stderr=u'this is also not ascii: \u4321',
encoding='utf-8',
hide=('stdout', 'stderr'),
))
p.execute = Mock(side_effect=oops)
p.run("myapp foo")
# NOTE: using explicit binary ASCII here, & accessing raw
# getvalue() of the faked sys.stderr (spec.trap auto-decodes it
# normally) to have a not-quite-tautological test. otherwise we'd
# just be comparing unicode to unicode. shrug?
expected = b"""Encountered a bad command exit code!
Command: 'meh'
Exit code: 54
Stdout:
this is not ascii: \xe1\x88\xb4
Stderr:
this is also not ascii: \xe4\x8c\xa1
"""
got = six.BytesIO.getvalue(sys.stderr)
assert got == expected
def should_show_core_usage_on_core_parse_failures(self):
skip()
def should_show_context_usage_on_context_parse_failures(self):
skip()
@trap
@patch('invoke.program.sys.exit')
def turns_KeyboardInterrupt_into_exit_code_1(self, mock_exit):
p = Program()
p.execute = Mock(side_effect=KeyboardInterrupt)
p.run("myapp -c foo mytask")
mock_exit.assert_called_with(1)
class help_:
"--help"
class core:
def empty_invocation_with_no_default_task_prints_help(self):
stdout, _ = run("-c foo")
assert "Core options:" in stdout
# TODO: On Windows, we don't get a pty, so we don't get a
# guaranteed terminal size of 80x24. Skip for now, but maybe
# a suitable fix would be to just strip all whitespace from the
# returned and expected values before testing. Then terminal
# size is ignored.
@skip_if_windows
def core_help_option_prints_core_help(self):
# TODO: change dynamically based on parser contents?
# e.g. no core args == no [--core-opts],
# no tasks == no task stuff?
# NOTE: test will trigger default pty size of 80x24, so the
# below string is formatted appropriately.
# TODO: add more unit-y tests for specific behaviors:
# * fill terminal w/ columns + spacing
# * line-wrap help text in its own column
expected = """
Usage: inv[oke] [--core-opts] task1 [--task1-opts] ... taskN [--taskN-opts]
Core options:
--complete Print tab-completion candidates for given
parse remainder.
--hide=STRING Set default value of run()'s 'hide' kwarg.
--no-dedupe Disable task deduplication.
--prompt-for-sudo-password Prompt user at start of session for the
sudo.password config value.
--write-pyc Enable creation of .pyc files.
-c STRING, --collection=STRING Specify collection name to load.
-d, --debug Enable debug output.
-D INT, --list-depth=INT When listing tasks, only show the first INT
levels.
-e, --echo Echo executed commands before running.
-f STRING, --config=STRING Runtime configuration file to use.
-F STRING, --list-format=STRING Change the display format used when listing
tasks. Should be one of: flat (default),
nested, json.
-h [STRING], --help[=STRING] Show core or per-task help and exit.
-l [STRING], --list[=STRING] List available tasks, optionally limited to
a namespace.
-p, --pty Use a pty when | |
<reponame>ProfFan/maestral
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
This module provides user configuration file management and is mostly copied from the
config module of the Spyder IDE.
"""
import ast
import os
import os.path as osp
import re
import shutil
import time
import configparser as cp
from threading import RLock
import logging
from typing import Optional, List, Tuple, Dict, Union, Any
logger = logging.getLogger(__name__)
DefaultsType = List[Tuple[str, Dict[str, Any]]]
InputDefaultsType = Union[DefaultsType, Dict[str, Any], None]
# =============================================================================
# Auxiliary classes
# =============================================================================
class NoDefault:
pass
# =============================================================================
# Defaults class
# =============================================================================
class DefaultsConfig(cp.ConfigParser):
"""
Class used to save defaults to a file and as base class for UserConfig.
"""
_lock = RLock()
def __init__(self, path: str) -> None:
super(DefaultsConfig, self).__init__(interpolation=None)
dirname, basename = osp.split(path)
filename, ext = osp.splitext(basename)
self._path = dirname
self._name = filename
self._suffix = ext
if not osp.isdir(osp.dirname(self._path)):
os.makedirs(osp.dirname(self._path))
def _set(self, section: str, option: str, value: Any) -> None:
"""Private set method"""
if not self.has_section(section):
self.add_section(section)
if not isinstance(value, str):
value = repr(value)
super(DefaultsConfig, self).set(section, option, value)
def save(self) -> None:
"""Save config into the associated file."""
fpath = self.get_config_fpath()
# See spyder-ide/spyder#1086 and spyder-ide/spyder#1242 for background
# on why this method contains all the exception handling.
with self._lock:
try:
# The "easy" way
self.__write_file(fpath)
except EnvironmentError:
try:
# The "delete and sleep" way
if osp.isfile(fpath):
os.remove(fpath)
time.sleep(0.05)
self.__write_file(fpath)
except Exception:
logger.warning(
"Failed to write user configuration to disk", exc_info=True
)
def __write_file(self, fpath: str) -> None:
os.makedirs(self._path, exist_ok=True)
with open(fpath, "w", encoding="utf-8") as configfile:
self.write(configfile)
def get_config_fpath(self) -> str:
"""Return the ini file where this configuration is stored."""
return osp.join(self._path, self._name + self._suffix)
def set_defaults(self, defaults: DefaultsType) -> None:
"""Set default values and save to defaults folder location."""
for section, options in defaults:
for option in options:
new_value = options[option]
self._set(section, option, new_value)
# =============================================================================
# User config class
# =============================================================================
class UserConfig(DefaultsConfig):
"""
UserConfig class, based on ConfigParser.
Parameters
----------
path: str
Configuration file will be saved to this path.
defaults: {} or [(str, {}),]
Dictionary containing options *or* list of tuples (sec_name, options)
load: bool
If a previous configuration file is found, load will take the values
from this existing file, instead of using default values.
version: str
version of the configuration file in 'major.minor.micro' format.
backup: bool
A backup will be created on version changes and on initial setup.
remove_obsolete: bool
If `True`, values that were removed from the configuration on version
change, are removed from the saved configuration file.
Notes
-----
The 'get' and 'set' arguments number and type differ from the reimplemented
methods. 'defaults' is an attribute and not a method.
"""
DEFAULT_SECTION_NAME = "main"
def __init__(
self,
path: str,
defaults: InputDefaultsType = None,
load: bool = True,
version: str = "0.0.0",
backup: bool = False,
remove_obsolete: bool = False,
) -> None:
"""UserConfig class, based on ConfigParser."""
super(UserConfig, self).__init__(path=path)
self._load = load
self._version = self._check_version(version)
self._backup = backup
self._remove_obsolete = remove_obsolete
self._defaults_folder = "defaults"
self._backup_folder = "backups"
self._backup_suffix = ".bak"
self._defaults_name_prefix = "defaults"
self.default_config = self._check_defaults(defaults)
if backup:
self._make_backup()
if load:
# If config file already exists, it overrides Default options
previous_fpath = self.get_previous_config_fpath()
self._load_from_ini(previous_fpath)
old_version = self.get_version(version)
self._old_version = old_version
# Save new defaults
self._save_new_defaults(self.default_config)
# Updating defaults only if major/minor version is different
major_ver = self._get_major_version(version)
major_old_ver = self._get_major_version(self._old_version)
minor_ver = self._get_minor_version(version)
minor_old_ver = self._get_minor_version(self._old_version)
if major_ver != major_old_ver or minor_ver != minor_old_ver:
if backup:
self._make_backup(version=old_version)
self.apply_configuration_patches(old_version=old_version)
# Remove deprecated options if major version has changed
if remove_obsolete and major_ver != major_old_ver:
self._remove_deprecated_options(old_version)
# Set new version number
self.set_version(version, save=False)
if defaults is None:
# If no defaults are defined set file settings as default
self.set_as_defaults()
# --- Helpers and checkers ---------------------------------------------------------
@staticmethod
def _get_minor_version(version: str) -> str:
"""Return the 'major.minor' components of the version."""
return version[: version.rfind(".")]
@staticmethod
def _get_major_version(version: str) -> str:
"""Return the 'major' component of the version."""
return version[: version.find(".")]
@staticmethod
def _check_version(version: str) -> str:
"""Check version is compliant with format."""
regex_check = re.match(r"^(\d+).(\d+).(\d+)$", version)
if regex_check is None:
raise ValueError(
"Version number {} is incorrect - must be in "
"major.minor.micro format".format(version)
)
return version
def _check_defaults(self, defaults: InputDefaultsType) -> DefaultsType:
"""Check if defaults are valid and update defaults values."""
if defaults is None:
defaults = [(self.DEFAULT_SECTION_NAME, {})]
elif isinstance(defaults, dict):
defaults = [(self.DEFAULT_SECTION_NAME, defaults)]
elif isinstance(defaults, list):
# Check is a list of tuples with strings and dictionaries
for sec, options in defaults:
assert isinstance(sec, str)
assert isinstance(options, dict)
for opt, _ in options.items():
assert isinstance(opt, str)
else:
raise ValueError("`defaults` must be a dict or a list of tuples!")
# This attribute is overriding a method from cp.ConfigParser
self.default_config = defaults
if defaults is not None:
self.reset_to_defaults(save=False)
for sec, options in defaults:
if sec == self.DEFAULT_SECTION_NAME:
options["version"] = self._version
return defaults
@classmethod
def _check_section_option(cls, section: str, option: str) -> str:
"""Check section and option types."""
if section is None:
section = cls.DEFAULT_SECTION_NAME
elif not isinstance(section, str):
raise RuntimeError("Argument 'section' must be a string")
if not isinstance(option, str):
raise RuntimeError("Argument 'option' must be a string")
return section
def _make_backup(
self, version: Optional[str] = None, old_version: Optional[str] = None
) -> None:
"""
Make a backup of the configuration file.
If `old_version` is `None` a normal backup is made. If `old_version`
is provided, then the backup was requested for minor version changes
and appends the version number to the backup file.
"""
fpath = self.get_config_fpath()
fpath_backup = self.get_backup_fpath_from_version(
version=version, old_version=old_version
)
path = os.path.dirname(fpath_backup)
if not osp.isdir(path):
os.makedirs(path)
try:
shutil.copyfile(fpath, fpath_backup)
except IOError:
pass
def _load_from_ini(self, fpath: str) -> None:
"""Load config from the associated file found at `fpath`."""
with self._lock:
try:
self.read(fpath, encoding="utf-8")
except cp.MissingSectionHeaderError:
logger.error("File contains no section headers.")
def _load_old_defaults(self, old_version: str) -> cp.ConfigParser:
"""Read old defaults."""
old_defaults = cp.ConfigParser()
fpath = self.get_defaults_fpath_from_version(old_version)
old_defaults.read(fpath)
return old_defaults
def _save_new_defaults(self, defaults: DefaultsType) -> None:
"""Save new defaults."""
path = self.get_defaults_fpath_from_version()
new_defaults = DefaultsConfig(path=path)
if not osp.isfile(new_defaults.get_config_fpath()):
new_defaults.set_defaults(defaults)
new_defaults.save()
def _remove_deprecated_options(self, old_version: str) -> None:
"""
Remove options which are present in the file but not in defaults.
"""
for section in self.sections():
for option, _ in self.items(section, raw=True):
if self.get_default(section, option) is NoDefault:
try:
self.remove_option(section, option)
if len(self.items(section, raw=True)) == 0:
self.remove_section(section)
except cp.NoSectionError:
self.remove_section(section)
# --- Compatibility API ------------------------------------------------------------
def get_previous_config_fpath(self) -> str:
"""Return the last configuration file used if found."""
return self.get_config_fpath()
def get_config_fpath_from_version(self, version: Optional[str] = None) -> str:
"""
Return the configuration path for given version.
If no version is provided, it returns the current file path.
"""
return self.get_config_fpath()
def get_backup_fpath_from_version(
self, version: Optional[str] = None, old_version: Optional[str] = None
) -> str:
"""
Get backup location based on version.
`old_version` can be used for checking compatibility whereas `version`
relates to adding the version to the file name.
To be reimplemented if versions changed backup location.
"""
fpath = self.get_config_fpath()
path = osp.join(osp.dirname(fpath), self._backup_folder)
new_fpath = osp.join(path, osp.basename(fpath))
if version is None:
backup_fpath = "{}{}".format(new_fpath, self._backup_suffix)
else:
backup_fpath = "{}-{}{}".format(new_fpath, version, self._backup_suffix)
return backup_fpath
def get_defaults_path_name_from_version(
self, old_version: Optional[str] = None
) -> Tuple[str, str]:
"""
Get defaults location based on version.
To be reimplemented if versions changed defaults location.
"""
version = old_version if old_version else self._version
defaults_path = osp.join(
osp.dirname(self.get_config_fpath()), self._defaults_folder
)
if version is None:
name = "{}-{}".format(self._defaults_name_prefix, self._name)
else:
name = "{}-{}-{}".format(self._defaults_name_prefix, self._name, version)
if not osp.isdir(defaults_path):
os.makedirs(defaults_path)
return defaults_path, name
def get_defaults_fpath_from_version(self, old_version: str = None) -> str:
"""
Get defaults location based on version.
To be reimplemented if versions changed defaults location.
"""
defaults_path, name = self.get_defaults_path_name_from_version(old_version)
return osp.join(defaults_path, name + self._suffix)
def apply_configuration_patches(self, old_version: str = None) -> None:
"""
Apply any patch to configuration values on version changes.
To be reimplemented if patches to configuration values are needed.
"""
pass
# --- Public API -------------------------------------------------------------------
def get_version(self, version: str = "0.0.0") -> str:
"""Return configuration (not application!) version."""
return self.get(self.DEFAULT_SECTION_NAME, "version", version)
def set_version(self, version: str = "0.0.0", save: bool = True) -> | |
State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='some Energy, only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Metering_Function(Function):
"""
A Function That Allows To Get Data From A Meter, Such As Current Meter
Reading Or Instantaneous Demand
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Command._rules = [('only', [[Get_Current_Meter_Value_Command], [Get_Meter_Data_Command], [Get_Meter_History_Command]]), ('min|1', [[Command]])]
self.Has_Meter_Reading_Type._rules = [('only', [[Commodity], [Property]])]
self.Has_Meter_Reading._rules = [('only', [[Measurement]])]
self.Has_Command._instance_identifier = self.get_identifier()
self.Has_Meter_Reading_Type._instance_identifier = self.get_identifier()
self.Has_Meter_Reading._instance_identifier = self.get_identifier()
# Relation fields
Has_Command: RelationField = RelationField(
name='Has_Command',
rule='only (Get_Current_Meter_Value_Command or Get_Meter_Data_Command) or Get_Meter_History_Command), min 1 Command',
inverse_of=['Is_Command_Of'],
semantic_manager=semantic_manager)
"""
A Relationship Between An Entity (Such As A Function) And A Command
"""
Has_Meter_Reading_Type: RelationField = RelationField(
name='Has_Meter_Reading_Type',
rule='only (Commodity or Property)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Reading Type Of A Measurement (E.G., Water,
Gas, Pressure , Energy , Power, Etc.)
"""
Has_Meter_Reading: RelationField = RelationField(
name='Has_Meter_Reading',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relationship Between A Metering Function And The Measurement Of The
Reading
"""
class Micro_Renewable(Function_Related):
"""
A Device That Generates Renewable Energy From Natural Resources Such As Teh
Sun, Wind And Water
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Energyefficiency]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Energyefficiency())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='value Energyefficiency, min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Multimedia(Function_Related):
"""
A Device Designed To Display, Store, Record Or Play Multimedia Content Such
As Audio, Images, Animation, Video
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('value', [[Entertainment]]), ('min|1', [[Task]])]
self.Consists_Of._rules = [('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
self.Accomplishes.add(Entertainment())
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='value Entertainment, min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Network(Function_Related):
"""
A Device Used To Connect Other Devices In A Network, Such As Hub, Switch Or
Router In A Local Area Network (Lan).
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Has_Manufacturer._rules = [('max|1', [['string']])]
self.Has_Model._rules = [('max|1', [['string']])]
self.Accomplishes._rules = [('min|1', [[Task]])]
self.Consists_Of._rules = [('only', [[Device]])]
self.Controls_Property._rules = [('only', [[Property]])]
self.Has_Function._rules = [('min|1', [[Function]])]
self.Has_Profile._rules = [('only', [[Profile]])]
self.Has_State._rules = [('only', [[State]])]
self.Has_Typical_Consumption._rules = [('only', [[Energy], [Power]])]
self.Is_Used_For._rules = [('only', [[Commodity]])]
self.Makes_Measurement._rules = [('only', [[Measurement]])]
self.Measures_Property._rules = [('only', [[Property]])]
self.Offers._rules = [('only', [[Service]])]
self.Accomplishes._instance_identifier = self.get_identifier()
self.Consists_Of._instance_identifier = self.get_identifier()
self.Controls_Property._instance_identifier = self.get_identifier()
self.Has_Function._instance_identifier = self.get_identifier()
self.Has_Profile._instance_identifier = self.get_identifier()
self.Has_State._instance_identifier = self.get_identifier()
self.Has_Typical_Consumption._instance_identifier = self.get_identifier()
self.Is_Used_For._instance_identifier = self.get_identifier()
self.Makes_Measurement._instance_identifier = self.get_identifier()
self.Measures_Property._instance_identifier = self.get_identifier()
self.Offers._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
self.Has_Manufacturer._instance_identifier = self.get_identifier()
self.Has_Model._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description Of An Entity (E.G., Device)
"""
Has_Manufacturer: DataField = DataField(
name='Has_Manufacturer',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Manufacturer Of An Entity (E.G., Device)
"""
Has_Model: DataField = DataField(
name='Has_Model',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Model Of An Entity (E.G., Device)
"""
# Relation fields
Accomplishes: RelationField = RelationField(
name='Accomplishes',
rule='min 1 Task',
inverse_of=['Is_Accomplished_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Certain Entity (E.G., A Device) And The Task It
Accomplishes
"""
Consists_Of: RelationField = RelationField(
name='Consists_Of',
rule='only Device',
semantic_manager=semantic_manager)
"""
A Relationship Indicating A Composite Entity That Consists Of Other Entities
(E.G., A Temperature/Humidity Sensor That Consists Of A Temperature Sensor
And A Humidity Sensor)
"""
Controls_Property: RelationField = RelationField(
name='Controls_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Controlled By A Certain
Device
"""
Has_Function: RelationField = RelationField(
name='Has_Function',
rule='min 1 Function',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of Function Of A Device
"""
Has_Profile: RelationField = RelationField(
name='Has_Profile',
rule='only Profile',
semantic_manager=semantic_manager)
"""
A Relationship Associating A Profile To A Certain Entity (E.G., A Device)
"""
Has_State: RelationField = RelationField(
name='Has_State',
rule='only State',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Type Of State Of A Device
"""
Has_Typical_Consumption: RelationField = RelationField(
name='Has_Typical_Consumption',
rule='only (Energy or Power)',
semantic_manager=semantic_manager)
"""
A Relationship Identifying The Typical (Energy Or Power) Consumption Of A
Device
"""
Is_Used_For: RelationField = RelationField(
name='Is_Used_For',
rule='only Commodity',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Purpose For Which A Device Is Used For (E.G.,
Controlling A Commodity)
"""
Makes_Measurement: RelationField = RelationField(
name='Makes_Measurement',
rule='only Measurement',
semantic_manager=semantic_manager)
"""
A Relation Between A Device And The Measurements It Makes. Such Measurement
Will Link Together The Value Of The Measurement, Its Unit Of Measure And The
Property To Which It Relates.
"""
Measures_Property: RelationField = RelationField(
name='Measures_Property',
rule='only Property',
semantic_manager=semantic_manager)
"""
A Relationship Specifying The Property That Can Be Measured By A Certain
Device
"""
Offers: RelationField = RelationField(
name='Offers',
rule='only Service',
inverse_of=['Is_Offered_By'],
semantic_manager=semantic_manager)
"""
A Relationship Between A Device And A Service
"""
class Notify_Command(Command):
"""
A Type Of Command
Source:
https://w3id.org/saref (saref.ttl)
"""
def __init__(self, *args, **kwargs):
is_initialised = 'id' in self.__dict__
super().__init__(*args, **kwargs)
if not is_initialised:
self.Has_Description._rules = [('max|1', [['string']])]
self.Acts_Upon._rules = [('only', [[State]])]
self.Is_Command_Of._rules = [('min|1', [[Function]])]
self.Acts_Upon._instance_identifier = self.get_identifier()
self.Is_Command_Of._instance_identifier = self.get_identifier()
self.Has_Description._instance_identifier = self.get_identifier()
# Data fields
Has_Description: DataField = DataField(
name='Has_Description',
rule='max 1 string',
semantic_manager=semantic_manager)
"""
A Relationship Providing A Description | |
import re
import warnings
import json
import os
import nltk
import itertools
import chardet
import pickle
from chardet.universaldetector import UniversalDetector
from nltk.stem.wordnet import WordNetLemmatizer
from coordinate_map import CoordinateMap
from nltk.tag.stanford import StanfordNERTagger
import subprocess
import numpy
import random
import string
class Philter:
"""
General text filtering class,
can filter using whitelists, blacklists, regex's and POS
"""
def __init__(self, config):
if "verbose" in config:
self.verbose = config["verbose"]
if "run_eval" in config:
self.run_eval = config["run_eval"]
if "freq_table" in config:
self.freq_table = config["freq_table"]
if "initials" in config:
self.initials = config["initials"]
if "finpath" in config:
if not os.path.exists(config["finpath"]):
raise Exception("Filepath does not exist", config["finpath"])
self.finpath = config["finpath"]
if "foutpath" in config:
if not os.path.exists(config["foutpath"]):
raise Exception("Filepath does not exist", config["foutpath"])
self.foutpath = config["foutpath"]
if "anno_folder" in config:
if not os.path.exists(config["anno_folder"]):
raise Exception("Filepath does not exist", config["anno_folder"])
self.anno_folder = config["anno_folder"]
if "coords" in config:
self.coords = config["coords"]
if "eval_out" in config:
self.eval_outpath = config["eval_out"]
if "outformat" in config:
self.outformat = config["outformat"]
else:
self.outformat = "asterisk"
if "ucsfformat" in config:
self.ucsf_format = config["ucsfformat"]
if "filters" in config:
if not os.path.exists(config["filters"]):
raise Exception("Filepath does not exist", config["filters"])
self.patterns = json.loads(open(config["filters"], "r").read())
if "xml" in config:
if not os.path.exists(config["xml"]):
raise Exception("Filepath does not exist", config["xml"])
self.xml = json.loads(open(config["xml"], "r", encoding='utf-8').read())
if "stanford_ner_tagger" in config:
if not os.path.exists(config["stanford_ner_tagger"]["classifier"]) and config["stanford_ner_tagger"]["download"] == False:
raise Exception("Filepath does not exist", config["stanford_ner_tagger"]["classifier"])
else:
#download the ner data
process = subprocess.Popen("cd generate_dataset && ./download_ner.sh".split(), stdout=subprocess.PIPE)
output, error = process.communicate()
self.stanford_ner_tagger_classifier = config["stanford_ner_tagger"]["classifier"]
if not os.path.exists(config["stanford_ner_tagger"]["jar"]):
raise Exception("Filepath does not exist", config["stanford_ner_tagger"]["jar"])
self.stanford_ner_tagger_jar = config["stanford_ner_tagger"]["jar"]
#we lazy load our tagger only if there's a corresponding pattern
self.stanford_ner_tagger = None
if "cachepos" in config and config["cachepos"]:
self.cache_to_disk = True
self.pos_path = config["cachepos"]
if not os.path.isdir(self.pos_path):
os.makedirs(self.pos_path)
else:
self.cache_to_disk = False
self.pos_path = None
#All coordinate maps stored here
self.coordinate_maps = []
#create a memory for pos tags
self.pos_tags = {}
#create a memory for tokenized text
self.cleaned = {}
#create a memory for include coordinate map
self.include_map = CoordinateMap()
#create a memory for exclude coordinate map
self.exclude_map = CoordinateMap()
#create a memory for FULL exclude coordinate map (including non-whitelisted words)
self.full_exclude_map = {}
#create a memory for the list of known PHI types
self.phi_type_list = ['DATE','Patient_Social_Security_Number','Email','Provider_Address_or_Location','Age','Name','OTHER','ID','NAME','LOCATION','CONTACT','AGE']
#create a memory for the corrdinate maps of known PHI types
self.phi_type_dict = {}
for phi_type in self.phi_type_list:
self.phi_type_dict[phi_type] = [CoordinateMap()]
#create a memory for stored coordinate data
self.data_all_files = {}
#create a memory for pattern index, with titles
self.pattern_indexes = {}
#create a memory for clean words
#self.clean_words = {}
#create directory for pos data if it doesn't exist
#pos_path = "./data/pos_data/"
#self.pos_path = "./data/pos_data/" + self.random_string(10) + "/"
#initialize our patterns
self.init_patterns()
def get_pos(self, filename, cleaned):
if self.cache_to_disk:
pos_path = self.pos_path
filename = filename.split("/")[-1]
file_ = pos_path + filename
if filename not in self.pos_tags:
self.pos_tags = {}
if not os.path.isfile(file_):
with open(file_, 'wb') as f:
tags = nltk.pos_tag(cleaned)
pickle.dump(tags, f)
return tags
else:
with open(file_, 'rb') as f:
self.pos_tags[filename] = pickle.load(f)
else:
if filename not in self.pos_tags:
self.pos_tags = {}
self.pos_tags[filename] = nltk.pos_tag(cleaned)
return self.pos_tags[filename]
#self.pos_tags[filename] = nltk.pos_tag(cleaned)
return self.pos_tags[filename]
#def get_pos_original(self, filename, cleaned):
# if filename not in self.pos_tags:
# self.pos_tags = {}
# self.pos_tags[filename] = nltk.pos_tag(cleaned)
# return self.pos_tags[filename]
def get_clean(self, filename, text, pre_process= r"[^a-zA-Z0-9]"):
if filename not in self.cleaned:
self.cleaned = {}
# Use pre-process to split sentence by spaces AND symbols, while preserving spaces in the split list
lst = re.split("(\s+)", text)
cleaned = []
for item in lst:
if len(item) > 0:
if item.isspace() == False:
split_item = re.split("(\s+)", re.sub(pre_process, " ", item))
for elem in split_item:
if len(elem) > 0:
cleaned.append(elem)
else:
cleaned.append(item)
self.cleaned[filename] = cleaned
return self.cleaned[filename]
#def get_clean_word(self, filename, word):
# if filename not in self.cleaned:
# self.clean_words = {}
# self.clean_words[filename] = {}
# if word not in self.clean_words[filename]:
# self.clean_words[filename][word] = re.sub(r"[^a-zA-Z0-9]+", "", word.lower().strip())
# return self.clean_words[filename][word]
#def get_clean_word2(self, filename, word):
# return re.sub(r"[^a-zA-Z0-9]+", "", word.lower().strip())
# if word not in self.clean_words:
# self.clean_words[word] = re.sub(r"[^a-zA-Z0-9]+", "", word.lower().strip())
# return self.clean_words[word]
def init_patterns(self):
""" given our input pattern config will load our sets and pre-compile our regex"""
known_pattern_types = set(["regex", "set", "regex_context","stanford_ner", "pos_matcher", "match_all"])
require_files = set(["regex", "set"])
require_pos = set(["pos_matcher"])
set_filetypes = set(["pkl", "json"])
regex_filetypes = set(["txt"])
reserved_list = set(["data", "coordinate_map"])
#first check that data is formatted, can be loaded etc.
for i,pattern in enumerate(self.patterns):
self.pattern_indexes[pattern['title']] = i
if pattern["type"] in require_files and not os.path.exists(pattern["filepath"]):
raise Exception("Config filepath does not exist", pattern["filepath"])
for k in reserved_list:
if k in pattern:
raise Exception("Error, Keyword is reserved", k, pattern)
if pattern["type"] not in known_pattern_types:
raise Exception("Pattern type is unknown", pattern["type"])
if pattern["type"] == "set":
if pattern["filepath"].split(".")[-1] not in set_filetypes:
raise Exception("Invalid filteype", pattern["filepath"], "must be of", set_filetypes)
self.patterns[i]["data"] = self.init_set(pattern["filepath"])
if pattern["type"] == "regex":
if pattern["filepath"].split(".")[-1] not in regex_filetypes:
raise Exception("Invalid filteype", pattern["filepath"], "must be of", regex_filetypes)
self.patterns[i]["data"] = self.precompile(pattern["filepath"])
elif pattern["type"] == "regex_context":
if pattern["filepath"].split(".")[-1] not in regex_filetypes:
raise Exception("Invalid filteype", pattern["filepath"], "must be of", regex_filetypes)
self.patterns[i]["data"] = self.precompile(pattern["filepath"])
#print(self.precompile(pattern["filepath"]))
def precompile(self, filepath):
""" precompiles our regex to speed up pattern matching"""
regex = open(filepath,"r").read().strip()
re_compiled = None
with warnings.catch_warnings(): #NOTE: this is not thread safe! but we want to print a more detailed warning message
warnings.simplefilter(action="error", category=FutureWarning) # in order to print a detailed message
try:
re_compiled = re.compile(regex)
except FutureWarning as warn:
print("FutureWarning: {0} in file ".format(warn) + filepath)
warnings.simplefilter(action="ignore", category=FutureWarning)
re_compiled = re.compile(regex) # assign nevertheless
return re_compiled
def init_set(self, filepath):
""" loads a set of words, (must be a dictionary or set shape) returns result"""
map_set = {}
if filepath.endswith(".pkl"):
try:
with open(filepath, "rb") as pickle_file:
map_set = pickle.load(pickle_file)
except UnicodeDecodeError:
with open(filepath, "rb") as pickle_file:
map_set = pickle.load(pickle_file, encoding = 'latin1')
elif filepath.endswith(".json"):
map_set = json.loads(open(filepath, "r").read())
else:
raise Exception("Invalid filteype",filepath)
return map_set
def map_coordinates(self, allowed_filetypes=set(["txt", "ano"])):
""" Runs the set, or regex on the input data
generating a coordinate map of hits given
(this performs a dry run on the data and doesn't transform)
"""
in_path = self.finpath
if not os.path.exists(in_path):
raise Exception("Filepath does not exist", in_path)
#create coordinate maps for each pattern
for i,pat in enumerate(self.patterns):
self.patterns[i]["coordinate_map"] = CoordinateMap()
for root, dirs, files in os.walk(in_path):
for f in files:
filename = os.path.join(root, f)
if filename.split(".")[-1] not in allowed_filetypes:
if self.verbose:
print("Skipping: ", filename)
continue
#self.patterns[i]["coordinate_map"].add_file(filename)
encoding = self.detect_encoding(filename)
if __debug__: print("reading text from " + filename)
txt = open(filename,"r", encoding=encoding['encoding'], errors='surrogateescape').read()
# Get full self.include/exclude map before transform
self.data_all_files[filename] = {"text":txt, "phi":[],"non-phi":[]}
#create an intersection map of all coordinates we'll be removing
self.exclude_map.add_file(filename)
#create an interestion map of all coordinates we'll be keeping
self.include_map.add_file(filename)
# add file to phi_type_dict
for phi_type in self.phi_type_list:
self.phi_type_dict[phi_type][0].add_file(filename)
# initialize phi type
phi_type = "OTHER"
#### Create inital self.exclude/include for file
for i,pat in enumerate(self.patterns):
if pat["type"] == "regex":
self.map_regex(filename=filename, text=txt, pattern_index=i)
elif pat["type"] == "set":
self.map_set(filename=filename, text=txt, pattern_index=i)
elif pat["type"] == "regex_context":
self.map_regex_context(filename=filename, text=txt, pattern_index=i)
elif pat["type"] == "stanford_ner":
self.map_ner(filename=filename, text=txt, pattern_index=i)
elif pat["type"] == "pos_matcher":
self.map_pos(filename=filename, text=txt, pattern_index=i)
elif pat["type"] == "match_all":
self.match_all(filename=filename, text=txt, pattern_index=i)
else:
raise Exception("Error, pattern type not supported: ", pat["type"])
self.get_exclude_include_maps(filename, pat, txt)
#create intersection maps for all phi types and add them to a dictionary containing all maps
# get full exclude map (only updated either on-command by map_regex_context or at the very end of map_coordinates)
self.full_exclude_map[filename] = self.include_map.get_complement(filename, txt)
for phi_type in self.phi_type_list:
for start,stop in self.phi_type_dict[phi_type][0].filecoords(filename):
self.data_all_files[filename]["phi"].append({"start":start, "stop":stop, "word":txt[start:stop],"phi_type":phi_type, "filepath":""})
#clear out any data to save ram
for i,pat in enumerate(self .patterns):
if "data" in pat:
del self.patterns[i]["data"]
return self.full_exclude_map
def map_regex(self, filename="", text="", pattern_index=-1, pre_process= r"[^a-zA-Z0-9]"):
""" Creates a coordinate map from the pattern on this data
generating a coordinate map of hits given (dry run doesn't transform)
"""
if not os.path.exists(filename):
raise Exception("Filepath does not exist", filename)
if pattern_index < 0 or pattern_index >= len(self.patterns):
raise Exception("Invalid pattern index: | |
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
all_is_well = True
organization_on_stage_found = False
organization_on_stage = Organization()
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to create a new position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Prepare a drop down of candidates competing in this election
candidate_campaign_list = CandidateCampaignListManager()
candidate_campaigns_for_this_election_list = []
results = candidate_campaign_list.retrieve_all_candidates_for_upcoming_election(google_civic_election_id, True)
if results['candidate_list_found']:
candidate_campaigns_for_this_election_list = results['candidate_list_objects']
# Prepare a drop down of measures in this election
contest_measure_list = ContestMeasureList()
contest_measures_for_this_election_list = []
results = contest_measure_list.retrieve_all_measures_for_upcoming_election(google_civic_election_id, True)
if results['measure_list_found']:
contest_measures_for_this_election_list = results['measure_list_objects']
try:
organization_position_list = PositionEntered.objects.order_by('stance')
organization_position_list = organization_position_list.filter(organization_id=organization_id)
if positive_value_exists(google_civic_election_id):
organization_position_list = organization_position_list.filter(
google_civic_election_id=google_civic_election_id)
organization_position_list = organization_position_list.order_by(
'google_civic_election_id', '-vote_smart_time_span')
if len(organization_position_list):
organization_position_list_found = True
except Exception as e:
organization_position_list = []
if all_is_well:
election_list = Election.objects.order_by('-election_day_text')
template_values = {
'candidate_campaigns_for_this_election_list': candidate_campaigns_for_this_election_list,
'candidate_campaign_id': candidate_campaign_id,
'contest_measures_for_this_election_list': contest_measures_for_this_election_list,
'contest_measure_id': contest_measure_id,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position_candidate_campaign_id': 0,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': stance,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
'organization_position_list': organization_position_list,
'voter_authority': authority_results,
# Incoming values from error state
'candidate_and_measure_not_found': candidate_and_measure_not_found,
'stance': stance,
'statement_text': statement_text,
'more_info_url': more_info_url,
}
return render(request, 'organization/organization_position_edit.html', template_values)
@login_required
def organization_delete_existing_position_process_form_view(request, organization_id, position_we_vote_id):
"""
:param request:
:param organization_id:
:param position_we_vote_id:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
organization_id = convert_to_int(organization_id)
# Get the existing position
organization_position_on_stage_found = False
if positive_value_exists(position_we_vote_id):
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
"Could not find this organization's position when trying to delete.")
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
try:
organization_position_on_stage.delete()
except Exception as e:
handle_record_not_deleted_exception(e, logger=logger)
messages.add_message(request, messages.ERROR,
'Could not delete position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
messages.add_message(request, messages.INFO,
'Position deleted.')
return HttpResponseRedirect(reverse('organization:organization_position_edit', args=([organization_id])))
@login_required
def organization_position_edit_view(request, organization_id, position_we_vote_id):
"""
In edit, you can only change your stance and comments, not who or what the position is about
:param request:
:param organization_id:
:param position_we_vote_id:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
messages_on_stage = get_messages(request)
organization_id = convert_to_int(organization_id)
organization_on_stage_found = False
try:
organization_on_stage = Organization.objects.get(id=organization_id)
organization_on_stage_found = True
except Organization.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Organization.DoesNotExist:
# This is fine, create new
pass
if not organization_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization when trying to edit a position.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Get the existing position
organization_position_on_stage = PositionEntered()
organization_position_on_stage_found = False
position_entered_manager = PositionEnteredManager()
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
if not organization_position_on_stage_found:
messages.add_message(request, messages.INFO,
'Could not find organization position when trying to edit.')
return HttpResponseRedirect(reverse('organization:organization_position_list', args=([organization_id])))
# Note: We have access to the candidate campaign through organization_position_on_stage.candidate_campaign
election_list = Election.objects.all()
if organization_position_on_stage_found:
template_values = {
'is_in_edit_mode': True,
'messages_on_stage': messages_on_stage,
'organization': organization_on_stage,
'organization_position': organization_position_on_stage,
'possible_stances_list': ORGANIZATION_STANCE_CHOICES,
'stance_selected': organization_position_on_stage.stance,
'election_list': election_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'organization/organization_position_edit.html', template_values)
@login_required
def organization_position_edit_process_view(request):
"""
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
organization_id = convert_to_int(request.POST.get('organization_id', 0))
position_we_vote_id = request.POST.get('position_we_vote_id', '')
candidate_campaign_id = convert_to_int(request.POST.get('candidate_campaign_id', 0))
contest_measure_id = convert_to_int(request.POST.get('contest_measure_id', 0))
stance = request.POST.get('stance', SUPPORT) # Set a default if stance comes in empty
statement_text = request.POST.get('statement_text', '') # Set a default if stance comes in empty
more_info_url = request.POST.get('more_info_url', '')
go_back_to_add_new = False
candidate_campaign_we_vote_id = ""
google_civic_candidate_name = ""
contest_measure_we_vote_id = ""
google_civic_measure_title = ""
candidate_campaign_on_stage_found = False
contest_measure_on_stage_found = False
organization_position_on_stage = PositionEntered()
organization_on_stage = Organization()
candidate_campaign_on_stage = CandidateCampaign()
contest_measure_on_stage = ContestMeasure()
state_code = ""
position_entered_manager = PositionEnteredManager()
# Make sure this is a valid organization before we try to save a position
organization_on_stage_found = False
organization_we_vote_id = ""
try:
organization_query = Organization.objects.filter(id=organization_id)
if organization_query.count():
organization_on_stage = organization_query[0]
organization_we_vote_id = organization_on_stage.we_vote_id
organization_on_stage_found = True
except Exception as e:
# If we can't retrieve the organization, we cannot proceed
handle_record_not_found_exception(e, logger=logger)
if not organization_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find the organization when trying to create or edit a new position.")
return HttpResponseRedirect(reverse('organization:organization_list', args=()))
# Now retrieve the CandidateCampaign or the ContestMeasure so we can save it with the Position
# We need either candidate_campaign_id or contest_measure_id
if candidate_campaign_id:
try:
candidate_campaign_on_stage = CandidateCampaign.objects.get(id=candidate_campaign_id)
candidate_campaign_on_stage_found = True
candidate_campaign_we_vote_id = candidate_campaign_on_stage.we_vote_id
google_civic_candidate_name = candidate_campaign_on_stage.google_civic_candidate_name
state_code = candidate_campaign_on_stage.state_code
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
if not candidate_campaign_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find Candidate's campaign when trying to create or edit a new position.")
if positive_value_exists(position_we_vote_id):
return HttpResponseRedirect(
reverse('organization:organization_position_edit', args=([organization_id], [position_we_vote_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
else:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
contest_measure_id = 0
elif contest_measure_id:
try:
contest_measure_on_stage = ContestMeasure.objects.get(id=contest_measure_id)
contest_measure_on_stage_found = True
contest_measure_we_vote_id = contest_measure_on_stage.we_vote_id
google_civic_measure_title = contest_measure_on_stage.google_civic_measure_title
state_code = contest_measure_on_stage.state_code
except CandidateCampaign.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except CandidateCampaign.DoesNotExist as e:
handle_record_not_found_exception(e, logger=logger)
if not contest_measure_on_stage_found:
messages.add_message(
request, messages.ERROR,
"Could not find measure when trying to create or edit a new position.")
if positive_value_exists(position_we_vote_id):
return HttpResponseRedirect(
reverse('organization:organization_position_edit', args=([organization_id], [position_we_vote_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
else:
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
candidate_campaign_id = 0
else:
messages.add_message(
request, messages.ERROR,
"Unable to find either Candidate or Measure.")
return HttpResponseRedirect(
reverse('organization:organization_position_new', args=([organization_id])) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&stance=" + stance +
"&statement_text=" + statement_text +
"&more_info_url=" + more_info_url +
"&candidate_and_measure_not_found=1"
)
organization_position_on_stage_found = False
# Retrieve position from position_we_vote_id if it exists already
if positive_value_exists(position_we_vote_id):
results = position_entered_manager.retrieve_position_from_we_vote_id(position_we_vote_id)
if results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
organization_position_found_from_new_form = False
if not organization_position_on_stage_found: # Position not found from position_we_vote_id
# If a position_we_vote_id hasn't been passed in, then we are trying to create a new position.
# Check to make sure a position for this org, candidate and election doesn't already exist
if candidate_campaign_id:
results = position_entered_manager.retrieve_organization_candidate_campaign_position(
organization_id, candidate_campaign_id, google_civic_election_id)
elif contest_measure_id:
results = position_entered_manager.retrieve_organization_contest_measure_position(
organization_id, contest_measure_id, google_civic_election_id)
else:
messages.add_message(
request, messages.ERROR,
"Missing both candidate_campaign_id and contest_measure_id.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
if results['MultipleObjectsReturned']:
messages.add_message(
request, messages.ERROR,
"We found more than one existing positions for this candidate. Please delete all but one position.")
return HttpResponseRedirect(
reverse('organization:organization_position_list', args=([organization_id]))
)
elif results['position_found']:
organization_position_on_stage_found = True
organization_position_on_stage = results['position']
organization_position_found_from_new_form = True
# Now save existing, or create new
success = False
try:
if organization_position_on_stage_found:
# Update the position
organization_position_on_stage.stance = stance
organization_position_on_stage.google_civic_election_id = google_civic_election_id
if not organization_position_found_from_new_form or positive_value_exists(more_info_url):
# Only update this if we came from update form, or there is a value in the incoming variable
organization_position_on_stage.more_info_url = more_info_url
if not organization_position_found_from_new_form or positive_value_exists(statement_text):
# Only update this if we came from update form, or there is a value in the incoming variable
organization_position_on_stage.statement_text = statement_text
if not positive_value_exists(organization_position_on_stage.organization_we_vote_id):
organization_position_on_stage.organization_we_vote_id = organization_on_stage.we_vote_id
organization_position_on_stage.candidate_campaign_id = candidate_campaign_id
organization_position_on_stage.candidate_campaign_we_vote_id = candidate_campaign_we_vote_id
organization_position_on_stage.google_civic_candidate_name = google_civic_candidate_name
organization_position_on_stage.contest_measure_id = contest_measure_id
organization_position_on_stage.contest_measure_we_vote_id = contest_measure_we_vote_id
organization_position_on_stage.google_civic_measure_title = google_civic_measure_title
organization_position_on_stage.state_code = state_code
organization_position_on_stage.save()
organization_position_on_stage = position_entered_manager.refresh_cached_position_info(
organization_position_on_stage)
success = True
if positive_value_exists(candidate_campaign_we_vote_id):
messages.add_message(
request, messages.INFO,
"Position on {candidate_name} updated.".format(
candidate_name=candidate_campaign_on_stage.display_candidate_name()))
elif positive_value_exists(contest_measure_we_vote_id):
messages.add_message(
request, messages.INFO,
"Position on {measure_title} updated.".format(
measure_title=contest_measure_on_stage.measure_title))
else:
# Create new
# Note that since we are processing a volunteer/admin entry tool, we can always save to the PositionEntered
# table, and don't need to worry about PositionForFriends
organization_position_on_stage = PositionEntered(
organization_id=organization_id,
organization_we_vote_id=organization_we_vote_id,
candidate_campaign_id=candidate_campaign_id,
candidate_campaign_we_vote_id=candidate_campaign_we_vote_id,
google_civic_candidate_name=google_civic_candidate_name,
contest_measure_id=contest_measure_id,
contest_measure_we_vote_id=contest_measure_we_vote_id,
google_civic_measure_title=google_civic_measure_title,
google_civic_election_id=google_civic_election_id,
stance=stance,
statement_text=statement_text,
more_info_url=more_info_url,
state_code=state_code,
)
organization_position_on_stage.save()
organization_position_on_stage = position_entered_manager.refresh_cached_position_info(
organization_position_on_stage)
success = True
if positive_value_exists(candidate_campaign_we_vote_id):
messages.add_message(
request, messages.INFO,
"New position on {candidate_name} saved.".format(
candidate_name=candidate_campaign_on_stage.display_candidate_name()))
elif positive_value_exists(contest_measure_we_vote_id):
messages.add_message(
request, messages.INFO,
"New position on {measure_title} saved.".format(
measure_title=contest_measure_on_stage.measure_title))
go_back_to_add_new = True
except Exception as e:
pass
# If the position was saved, then update the voter_guide entry
if | |
"""
if annotation not in self.sub_embedded_mapping:
return None, False
grouping = self.sub_embedded_mapping[annotation]
if grouping in self.variant_schema['properties']:
return grouping, True
elif grouping in self.variant_sample_schema['properties']:
return grouping, False
else:
raise VCFParserException('Sub_embedding_group for %s from the vcf does not match the schema' % annotation)
@staticmethod
def parse_annotation_field_value(s):
""" Helper - parses a raw annotation field value. Returns a list of the
field values for this annotation. They should all be pipe separated as per specs
Args:
s: string annotation field value (ie: raw value in the VCF)
Returns:
List of field values in expected order
"""
if len(s) > 1:
res = []
for entry in s:
res.append(entry.split('|'))
return res
else:
return [s[0].split('|')]
def fix_encoding(self, val):
""" Decodes restricted characters from val, returning the result"""
# uncomment below to enable: tolerate using '.' in vcf spec for single valued fields
# if isinstance(val, list) and len(val) == 1 and isinstance(val[0], str):
# val = val[0]
for encoded, decoded in self.RESTRICTED_CHARACTER_ENCODING.items():
val = val.replace(encoded, decoded)
return val
def cast_field_value(self, t, value, sub_type=None):
""" Casts the given value to the type given by 'type'
Args:
t: type to cast value to
value: value for the field we processing
sub_type: should be present if we're processing an array, otherwise error
Returns:
casted value
Raises:
VCFParserException if there is a type we did not expect
"""
if t == 'string':
return self.fix_encoding(value)
elif t == 'integer':
try:
return int(value)
except ValueError: # required if casting string->float->int, such as '0.000'
return int(float(value)) # throw exception here if need be
elif t == 'number':
try:
return float(value)
except Exception:
try:
return float(value[0])
except Exception: # XXX: This shouldn't happen but does in case of malformed entries, see uk10k_esp_maf
return 0.0
elif t == 'boolean':
if value in self.BOOLEAN_FALSE:
return False
elif value in self.BOOLEAN_TRUE:
return True
else:
raise VCFParserException(
"Received an unexpected value for a boolean: %s." % value
)
elif t == 'array':
if sub_type:
if not isinstance(value, list):
items = self.fix_encoding(value).split('&')
else:
items = value
return list(map(lambda v: self.cast_field_value(sub_type, v, sub_type=None), items))
else:
raise VCFParserException('Got array with no sub-type')
else:
raise VCFParserException('Type was %s and not one of: string, integer, number, boolean, array' % t)
def validate_variant_value(self, field, value, key='', exit_on_validation=False):
""" Given a field, check the variant schema for the type of that field and cast
the given value to that type. This constitutes our 'validation' step
Args:
field: name of the field we are looking to process. This should exist somewhere
in the schema properties either at the top level or as a sub-embedded object
value: value of the field to be cast
key: annotation field (sub-embedded) that this field is part of
exit_on_validation: boolean flag to determine whether or not we bail if
we fail validation in this step. Default to False
Returns:
casted value
Raises:
VCFParserException if the given field does not exist
"""
props = self.variant_schema['properties']
sub_type = None
sub_embedded_group = self.sub_embedded_mapping.get(key)
if field not in props: # check if sub-embedded field
if sub_embedded_group and sub_embedded_group in props:
item_props = props[sub_embedded_group]['items']['properties']
if field in item_props:
t = item_props[field]['type']
if t == 'array':
sub_type = item_props[field]['items']['type']
else:
return None # maybe log as well? Special case where key has sub-embedding group but is not in props
else:
if exit_on_validation:
raise VCFParserException('Tried to check a variant field that does not exist on the schema: %s'
% field)
else:
# enable later maybe
# logger.error('Tried to check a variant field that does not exist on the schema: %s' % field)
return None
else:
t = props[field]['type']
if t == 'array':
sub_type = props[field]['items']['type']
# if this field is specifically disabled (due to formatting error), drop it here
if field in self.DISABLED_FIELDS:
return None
return self.cast_field_value(t, value, sub_type)
@staticmethod
def get_record_attribute(record, field):
return getattr(record, field, None)
@staticmethod
def remove_prefix(prefix, text):
if not text.startswith(prefix):
raise ValueError('Prefix %s is not the initial substring of %s' % (prefix, text))
return text[len(prefix):]
def create_variant_from_record(self, record):
""" Produces a dictionary containing all the annotation fields for this record
Each MUTANNO tag in the annotated VCF corresponds to an annotation field
entry. Each one will be parsed as an annotation field, the rest will be
directly cast based on the interpeted type from the INFO field. A MUTANNO
tag can also designate a sub-embedded object. Record format is validated
against the variant schema.
Args:
record: a single row in the VCF to parse, grabbed from 'vcf'
Raises:
VCFParserException from helpers
Returns:
dictionary of parsed VCF entry
"""
result = {}
for vcf_key in self.VCF_FIELDS:
if vcf_key == 'ALT': # requires special care
val = getattr(record, vcf_key)[0].sequence
if val == '*':
val = '-' # replace with '-' as '*' is a path character and ALT is part of the pkey
result[vcf_key] = val
elif vcf_key == 'CHROM':
result[vcf_key] = self.remove_prefix('chr', getattr(record, vcf_key)) # splice chr off
else:
attr = self.get_record_attribute(record, vcf_key)
if attr is not None:
result[vcf_key] = attr
for key in self.format.keys():
# handle non-annotation fields
if key not in self.annotation_keys:
if record.INFO.get(key, None):
val = self.validate_variant_value(key, record.INFO.get(key), exit_on_validation=False)
if val is not None:
result[key] = val
continue
# drop if variant_sample sub-embedded field
sub_embedded_group = self.sub_embedded_mapping.get(key, None)
if sub_embedded_group in self.variant_sample_sub_embedded_fields:
continue
# handle annotation fields
raw = record.INFO.get(key, None)
if raw:
annotations = self.parse_annotation_field_value(raw)
else:
continue
# annotation could be multi-valued split into groups
for g_idx, group in enumerate(annotations):
# in nearly all cases there are multiple fields. match them
# up with format
for f_idx, field in enumerate(group):
if field:
fn = self.format[key][f_idx]
if fn == self.DROPPED_FIELD:
continue
# if the field we are processing is an overwrite field, apply the overwrite
if fn in self.OVERWRITE_FIELDS:
fn = self.OVERWRITE_FIELDS[fn]
# handle sub-embedded
if key in self.sub_embedded_mapping:
if sub_embedded_group not in result: # create sub-embedded group if not there
result[sub_embedded_group] = {}
if g_idx not in result[sub_embedded_group]:
result[sub_embedded_group][g_idx] = {}
# XXX: Special Behavior here in light of VEP annotations
# VEP duplicates annotations in the same CSQ INFO field, so while some fields
# vary by VEP transcript, a large set of others (that are in our data set)
# do not and are duplicated in every transcript entry. Detect when this occurs
# and place the field value at top level instead of in the transcript object.
possible_value = self.validate_variant_value(fn, field, key)
if possible_value is not None:
if fn in self.variant_props:
result[fn] = self.validate_variant_value(fn, field, key)
else:
result[sub_embedded_group][g_idx][fn] = self.validate_variant_value(fn, field, key)
else:
possible_value = self.validate_variant_value(fn, field, key)
if possible_value is not None:
result[fn] = possible_value
return dict(self.variant_defaults, **result) # copy defaults, merge in result
@staticmethod
def format_variant(result, seo='transcript'):
""" Does some extra formatting to the seo's on the variant so they fit the schema.
When we build the item above we index the seo's into a dictionary on
for processing speed/convenience. This function removes that and puts
them instead into a list as expected by the schema
Args:
result: the item to reformat
seo: sub-embedded-object to re-format, default='transcript' since that
is the only seo we currently have on the schema
"""
acc = []
if not result.get(seo, None):
return
for _, vals in result[seo].items():
acc.append(vals)
result[seo] = acc
def format_variant_sub_embedded_objects(self, result):
""" Applies 'format_variant' for all sub_embedded_object fields (detected) """
for key in self.sub_embedded_mapping.values():
if key in self.variant_props:
self.format_variant(result, seo=key)
def parse_samples(self, result, sample):
""" Parses the samples on the record, adding them to result
Args:
result: dict to populate
sample: sample to parse
"""
result['CALL_INFO'] = sample.sample
data = sample.data
for field in sample.data._fields: # noQA must peek at structure to know which fields to pass
if hasattr(data, field) and field in self.variant_sample_schema['properties']:
field_value = data.__getattribute__(field)
if isinstance(field_value, list): # could be a list - in this case, force cast to string
field_value = ','.join(map(str, field_value))
if field_value is not None:
result[field] = field_value
def | |
' + str(num)
# as long as name+num is allready taken, count up num
while name in namelist:
num = num + 1
name = 'Drawing ' + str(num)
return name
def gpencil_paint_mode():
"""Gpencil has to be selected! activates DRAW mode / GPENCIL_PAINT mode, unless it's already active
"""
if not bpy.context.mode == 'PAINT_GPENCIL':
bpy.ops.object.mode_set(mode='PAINT_GPENCIL')
return {'FINISHED'}
def laststroke():
"""returns last stroke of active Greasepencil object
returns 'No GP object active' when no GP Obj is active
returns 'Names not equal' if data.objects GP name + data.grease_pencil object Name of active GP Object are not equal
"""
if bpy.context.view_layer.objects.active.type == 'GPENCIL':
# name of the active object (Type Gpencil Object)
name_active = bpy.context.view_layer.objects.active.name
if (name_active in (gp_pen.name for gp_pen in bpy.data.grease_pencil)):
gp_pen = bpy.data.grease_pencil[name_active]
if gp_pen.layers.active:
if gp_pen.layers.active.active_frame.strokes:
ls = gp_pen.layers.active.active_frame.strokes[-1]
return ls
else:
print('laststroke: active GP Obj has no strokes')
return {'No Strokes'}
else:
print('laststroke: active GP Obj has no strokes')
return {'No Strokes'}
else:
print('laststroke: Names of active GP object and its bpy.data.grease_pencil equivalent must be equal')
return {'Names not equal'}
else:
print('No GP object active')
return {'GP obj inactive'}
# def offset_plane():
#
# cube = bpy.data.objects["Cube"]
# # one blender unit in x-direction
# vec = mathutils.Vector((1.0, 0.0, 0.0))
# inv = cube.rotation_euler.to_matrix()
# # vec aligned to local axis
# vec_rot = vec * inv
# cube.location = cube.location + vec_rot
def plane_array(p1, p2, rotation):
"""adds an array of 1m by 1m planes at given location, parameter rotation defines way to calculate angle
"""
# define standard scale / count
save_active_gp()
# delete last workplane
if bpy.data.objects:
deselect_all()
# select last temporary workplane
for o in bpy.data.objects:
if o.name == 'workplane_TEMPORARY':
o.select_set(state=True)
# save settings of last workplane
save_grid_settings()
break
# delete last workplane
bpy.ops.object.delete()
bpy.context.scene.plane_offset = 0.0
if rotation == '1p':
p_loc = calc_location_2p(p1, p1)
p_rot = ((0, 0, 0))
elif rotation == '3p':
p_loc = calc_location_2p(p1, p1)
else:
p_loc = calc_location_2p(p1, p2)
if rotation == 'v':
p_rot = calc_rotation_2p_zv(p1, p2)
elif rotation in ('h', 'bp'):
p_rot = calc_rotation_2p_zh(p1, p2)
elif rotation == '3d':
p_rot = calc_rotation_2p_3d(p1, p2)
elif rotation == '3p':
p_rot = calc_rotation_3p(p1, p2)
bpy.context.scene.plane_location = p_loc
bpy.ops.mesh.primitive_plane_add(size=1, location=p_loc, rotation=p_rot)
baseplane = bpy.context.active_object
baseplane.name = 'workplane_TEMPORARY'
add4arrays()
baseplane.scale = bpy.context.scene.grid_scale
# set material of plane
# mat = bpy.data.materials['Mat_Transparent_White']
# baseplane.active_material = mat
baseplane.show_wire = True
deselect_all()
activate_gp()
if rotation not in ('3p', 'bp'):
if bpy.context.scene.del_stroke:
bpy.ops.dt.delete_last_stroke()
return {'FINISHED'}
def save_active_gp():
"""save active gp obj in global variable
"""
if bpy.context.view_layer.objects.active:
if (bpy.context.view_layer.objects.active.type == 'GPENCIL'):
# name of the active object (Type Gpencil Object)
name_active = bpy.context.view_layer.objects.active.name
if (name_active in (gp_pen.name for gp_pen in bpy.data.grease_pencil)):
# select data.grease_pencil object to select its strokes
bpy.context.scene.gp_active = name_active
else:
bpy.context.scene.gp_active = 'empty'
else:
bpy.context.scene.gp_active = 'empty'
else:
bpy.context.scene.gp_active = 'empty'
def save_grid_settings():
"""Stores Grid settings of workplane to global Property of scene
"""
bpy.context.scene.grid_scale = bpy.data.objects['workplane_TEMPORARY'].scale
bpy.context.scene.grid_count = (
bpy.data.objects['workplane_TEMPORARY'].modifiers[0].count,
bpy.data.objects['workplane_TEMPORARY'].modifiers[1].count, 0)
def unit_vector(vector):
""" Returns the unit vector of the input vector.
"""
return vector / np.linalg.norm(vector)
class SetupDrawchitecture(bpy.types.Operator): # standard plane
"""initializes the setup: colors & viewsettings
"""
bl_idname = 'dt.setup'
bl_label = 'SetupDrawchitecture View'
def execute(self, context):
# Viewport shader mode set to 'WIREFRAME' for transparent objects
find_3dview_space().shading.type = 'WIREFRAME'
find_3dview_space().shading.show_xray_wireframe = True
# Disable Floor Grid + Cursor in active View3D, make Vertices in editmode visible
find_3dview_space().overlay.show_floor = False
find_3dview_space().overlay.show_cursor = False
find_3dview_space().overlay.show_object_origins = False
find_3dview_space().overlay.vertex_opacity = 1
# Set 3d View Background color to white and Wire color to grey
bpy.context.preferences.themes[0].view_3d.space.gradients.high_gradient = (0.8, 0.8, 0.8)
bpy.context.preferences.themes[0].view_3d.wire = (0.5, 0.5, 0.5)
# Set Stroke Placement in active Scene to 'Surface'
bpy.context.window.scene.tool_settings.gpencil_stroke_placement_view3d = 'SURFACE'
# plane_array(Vector((0, 0.5, 0)), Vector((1, 0.5, 0)), 'h') # default workplane at 0,0,0
# create GP object or activate last GP object
activate_gp()
# switch to DRAW mode
gpencil_paint_mode()
return {'FINISHED'}
class InitializeDrawchitecture(bpy.types.Operator): # standard plane
"""initializes the setup: default workplane at start, activates GP mode
"""
bl_idname = 'dt.initialize'
bl_label = 'Create Baseplane (+ GP Object if there is none)'
def execute(self, context):
# default workplane at 0,0,0
plane_array(Vector((0, 0.5, 0)), Vector((1, 0.5, 0)), 'bp')
# create GP object if there is none
# if not [obj for obj in bpy.data.objects if obj.type == 'GPENCIL']:
# add_GP()
activate_gp()
# switch to DRAW mode
gpencil_paint_mode()
return {'FINISHED'}
class AddGPObject(bpy.types.Operator):
"""Adds new GP Object to Scene, locked at 0.0.0
"""
bl_idname = 'dt.add_gp_object'
bl_label = 'adds gp object, locked at 0.0.0'
def execute(self, context):
add_GP()
gpencil_paint_mode()
return {'FINISHED'}
class AddRotation(bpy.types.Operator):
"""Adds given rotation to the rotation vector of workplane_TEMPORARY, property sets +/- and Achsis
only shown when workplane_Temporary exists
"""
bl_idname = 'dt.add_rotation'
bl_label = 'add rotation'
axis: bpy.props.StringProperty()
rotation: bpy.props.FloatProperty()
# axis_index = bpy.props.IntProperty()
def execute(self, context):
wp = bpy.data.objects['workplane_TEMPORARY']
rotation_old = wp.rotation_euler
rotation_add = self.rotation * math.pi / 180
if self.axis == 'x':
axis_index = 0
elif self.axis == 'y':
axis_index = 1
elif self.axis == 'z':
axis_index = 2
else:
print('error: axis must be x / y / z')
return {'CANCELLED'}
bpy.data.objects['workplane_TEMPORARY'].rotation_euler[axis_index] = rotation_old[axis_index] + rotation_add
return {'FINISHED'}
class ClearPlaneAndGP(bpy.types.Operator):
""" Deletes the Temporary Workplane and all GP Objects
"""
bl_idname = 'dt.clear_all_objects'
bl_label = 'clears all Temporary Workplane + gp objects in project'
def execute(self, context):
if not bpy.context.mode == 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
# delete all objects
if bpy.data.objects:
for o in bpy.data.objects:
if o.name == 'workplane_TEMPORARY':
o.select_set(state=True)
if o.type == 'GPENCIL':
o.select_set(state=True)
bpy.ops.object.delete()
if bpy.data.grease_pencil:
for gp in bpy.data.grease_pencil:
bpy.data.grease_pencil.remove(gp)
bpy.context.scene.gp_active = 'empty'
bpy.context.scene.plane_offset = 0.0
bpy.ops.dt.initialize()
return {'FINISHED'}
else:
bpy.context.scene.gp_active = 'empty'
bpy.ops.dt.initialize()
return {'FINISHED'}
class DeleteLastStroke(bpy.types.Operator):
"""For V/H/3D: deletes last drawn stroke of active GP Object
"""
bl_idname = 'dt.delete_last_stroke'
bl_label = 'deletes last stroke of active GP object'
def execute(self, context):
save_active_gp()
activate_gp()
gpencil_paint_mode()
active_name = bpy.context.scene.gp_active
if bpy.data.grease_pencil[active_name].layers.active:
if bpy.data.grease_pencil[active_name].layers.active.active_frame.strokes:
# deselect gp to only delete latest stroke
deselect_all_gp()
bpy.data.grease_pencil[active_name].layers.active.active_frame.strokes[-1].select = True
bpy.ops.gpencil.delete(type='STROKES')
else:
print('DeleteLastStroke: Active Grease Pencil has no strokes to be deleted')
else:
print('DeleteLastStroke: Active Grease Pencil has no strokes to be deleted')
gpencil_paint_mode()
return {'FINISHED'}
class RemoveGPObject(bpy.types.Operator):
"""Removes the active GP Object
"""
bl_idname = 'dt.remove_gp_object'
bl_label = 'removes active GP Object'
def execute(self, context):
# make sure no other object is selected
deselect_all()
# activate last gp or write gp name in global variable
activate_gp()
# object mode must be activated to delete an object
if not bpy.context.mode == 'OBJECT':
bpy.ops.object.mode_set(mode='OBJECT')
name_active = bpy.context.scene.gp_active
# if there is an object with the same name as the saved GP, delete it
if (name_active in (gp_obj.name for gp_obj in bpy.data.objects)):
bpy.data.objects[name_active].select_set(state=True)
bpy.ops.object.delete()
# clear saved GP name to activate any other GP or create new if no GP left
bpy.context.scene.gp_active = 'empty'
activate_gp()
gpencil_paint_mode()
return {'FINISHED'}
class ResetScale(bpy.types.Operator):
"""Reset X and Y scale + count of workplane
"""
bl_idname = 'dt.reset_scale'
bl_label = 'reset scale + count'
def execute(self, context):
scale_default = (1.0, 1.0, 0)
wp = bpy.data.objects['workplane_TEMPORARY']
wp.scale = scale_default
wp.modifiers[0].count = 100
wp.modifiers[1].count = 100
save_grid_settings()
return {'FINISHED'}
class SelectGPobject(bpy.types.Operator):
"""Shows buttons with all GP Objects and selects them
(Problems with hidden GP Objects)
"""
bl_idname = 'dt.select_gp_object'
bl_label = 'Activates Greasepencil Object by Name on Button'
gp: bpy.props.StringProperty(default='', options={'SKIP_SAVE'})
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
deselect_all()
gp = context.scene.objects.get(self.gp)
bpy.context.view_layer.objects.active = gp
context.scene.grease_pencil = gp.grease_pencil
save_active_gp()
gpencil_paint_mode()
return {'FINISHED'}
class SwitchScaleAndCount(bpy.types.Operator):
"""Switches X and Y scale + count of workplane
"""
bl_idname = 'dt.switch_scale_and_count'
bl_label = 'switch x/y'
def execute(self, context):
scale = bpy.data.objects['workplane_TEMPORARY'].scale
scale_switched = (scale[1], scale[0], scale[2])
wp = bpy.data.objects['workplane_TEMPORARY']
wp.scale = scale_switched
count_x = wp.modifiers[0].count
wp.modifiers[0].count = wp.modifiers[1].count
wp.modifiers[1].count = count_x
save_grid_settings()
return {'FINISHED'}
class WPstrokeV(bpy.types.Operator): # First+ Last Point of last Stroke create vertical plane
"""adds VERTICAL workplane at last grease pencil stroke by start + endpoint of stroke
! GP Object must be selected first
! GP Object and Grease_Pencil object need equal Names
"""
bl_idname = 'dt.work_plane_on_stroke_2p'
bl_label = 'add vertical workplane by stroke start end'
def execute(self, context):
# last greasepencil stroke
# gp_laststroke = bpy.data.grease_pencil[-1].layers.active.active_frame.strokes[-1]
ls = | |
"""
cancel out the basic system load and replace it with a convex combination of bids
note: the demand_module (or some subsidiary module) may store calibration data
at the module level (not in the model), so this module should only be used with one
model at a time. An alternative approach would be to receive a calibration_data
object back from demand_module.calibrate(), then add that to the model and pass
it back to the bid function when needed.
note: we also take advantage of this assumption and store a reference to the
current demand_module in this module (rather than storing it in the model itself)
"""
import os, sys
from pprint import pprint
from pyomo.environ import *
import switch_mod.utilities as utilities
demand_module = None # will be set via command-line options
import util
from util import get
def define_arguments(argparser):
argparser.add_argument("--dr_flat_pricing", action='store_true', default=False,
help="Charge a constant (average) price for electricity, rather than varying hour by hour")
argparser.add_argument("--dr_total_cost_pricing", action='store_true', default=False,
help="Include both marginal and non-marginal(fixed) costs when setting prices")
argparser.add_argument("--dr_elasticity_scenario", type=int, default=3,
help="Choose a scenario of customer elasticity (1-3), defined in the demand_module")
argparser.add_argument("--dr_demand_module", default=None,
help="Name of module to use for demand-response bids. This should also be "
"specified in the modules list, and should provide calibrate() and bid() functions. "
"Pre-written options include constant_elasticity_demand_system or r_demand_system. "
"Specify one of these in the modules list and use --help again to see module-specific options.")
def define_components(m):
###################
# Choose the right demand module.
# NOTE: we assume only one model will be run at a time, so it's safe to store
# the setting in this module instead of in the model.
##################
global demand_module
if m.options.dr_demand_module is None:
raise RuntimeError(
"No demand module was specified for the demand_response system; unable to continue. "
"Please use --dr_demand_module <module_name> in options.txt, scenarios.txt or on "
"the command line. "
"You should also add this module to the list of modules to load "
" via modules.txt or --include_module <module_name>."
)
if m.options.dr_demand_module not in sys.modules:
raise RuntimeError(
"Demand module {mod} cannot be used because it has not been loaded. "
"Please add this module to the the modules list (usually modules.txt) "
"or specify --include_module {mod} in options.txt or on the command line."
"".format(mod=m.options.dr_demand_module)
)
demand_module = sys.modules[m.options.dr_demand_module]
# Make sure the model has a dual suffix
if not hasattr(m, "dual"):
m.dual = Suffix(direction=Suffix.IMPORT)
###################
# Unserved load, with a penalty.
# to ensure the model is always feasible, no matter what demand bids we get
##################
# cost per MWh for unserved load (high)
m.dr_unserved_load_penalty_per_mwh = Param(initialize=10000)
# amount of unserved load during each timepoint
m.DRUnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.DR_Unserved_Load_Penalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.DRUnservedLoad[lz, tp] * m.dr_unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('DRUnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('DR_Unserved_Load_Penalty')
###################
# Price Responsive Demand bids
##################
# list of all bids that have been received from the demand system
m.DR_BID_LIST = Set(initialize = [], ordered=True)
# we need an explicit indexing set for everything that depends on DR_BID_LIST
# so we can reconstruct it (and them) each time we add an element to DR_BID_LIST
# (not needed, and actually doesn't work -- reconstruct() fails for sets)
# m.DR_BIDS_LZ_TP = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMEPOINTS)
# m.DR_BIDS_LZ_TS = Set(initialize = lambda m: m.DR_BID_LIST * m.LOAD_ZONES * m.TIMESERIES)
# data for the individual bids; each load_zone gets one bid for each timeseries,
# and each bid covers all the timepoints in that timeseries. So we just record
# the bid for each timepoint for each load_zone.
m.dr_bid = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, mutable=True)
# price used to get this bid (only kept for reference)
m.dr_price = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMEPOINTS, mutable=True)
# the private benefit of serving each bid
m.dr_bid_benefit = Param(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, mutable=True)
# weights to assign to the bids for each timeseries when constructing an optimal demand profile
m.DRBidWeight = Var(m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, within=NonNegativeReals)
# def DR_Convex_Bid_Weight_rule(m, lz, ts):
# if len(m.DR_BID_LIST) == 0:
# print "no items in m.DR_BID_LIST, skipping DR_Convex_Bid_Weight constraint"
# return Constraint.Skip
# else:
# print "constructing DR_Convex_Bid_Weight constraint"
# return (sum(m.DRBidWeight[b, lz, ts] for b in m.DR_BID_LIST) == 1)
#
# choose a convex combination of bids for each zone and timeseries
m.DR_Convex_Bid_Weight = Constraint(m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, lz, ts:
Constraint.Skip if len(m.DR_BID_LIST) == 0
else (sum(m.DRBidWeight[b, lz, ts] for b in m.DR_BID_LIST) == 1)
)
# Since we don't have differentiated prices for each zone, we have to use the same
# weights for all zones. (Otherwise the model will try to micromanage load in each
# zone, but that won't be reflected in the prices we report.)
# Note: LOAD_ZONES is not an ordered set, so we have to use a trick to get a single
# arbitrary one to refer to (next(iter(m.LOAD_ZONES)) would also work).
m.DR_Load_Zone_Shared_Bid_Weight = Constraint(
m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, lz, ts:
m.DRBidWeight[b, lz, ts] == m.DRBidWeight[b, list(m.LOAD_ZONES)[0], ts]
)
# For flat-price models, we have to use the same weight for all timeseries within the
# same year (period), because there is only one price for the whole period, so it can't
# induce different adjustments in individual timeseries.
if m.options.dr_flat_pricing:
m.DR_Flat_Bid_Weight = Constraint(
m.DR_BID_LIST, m.LOAD_ZONES, m.TIMESERIES, rule=lambda m, b, lz, ts:
m.DRBidWeight[b, lz, ts]
== m.DRBidWeight[b, lz, m.tp_ts[m.PERIOD_TPS[m.ts_period[ts]].first()]]
)
# Optimal level of demand, calculated from available bids (negative, indicating consumption)
m.FlexibleDemand = Expression(m.LOAD_ZONES, m.TIMEPOINTS,
rule=lambda m, lz, tp:
sum(m.DRBidWeight[b, lz, m.tp_ts[tp]] * m.dr_bid[b, lz, tp] for b in m.DR_BID_LIST)
)
# # FlexibleDemand reported as an adjustment (negative equals more demand)
# # We have to do it this way because there's no way to remove the lz_demand_mw from the model
# # without changing the core code.
# m.DemandPriceResponse = Expression(m.LOAD_ZONES, m.TIMEPOINTS,
# rule=lambda m, lz, tp: m.lz_demand_mw[lz, tp] - m.FlexibleDemand[lz, tp]
# )
# private benefit of the electricity consumption
# (i.e., willingness to pay for the current electricity supply)
# reported as negative cost, i.e., positive benefit
# also divide by number of timepoints in the timeseries
# to convert from a cost per timeseries to a cost per timepoint.
m.DR_Welfare_Cost = Expression(m.TIMEPOINTS, rule=lambda m, tp:
(-1.0)
* sum(m.DRBidWeight[b, lz, m.tp_ts[tp]] * m.dr_bid_benefit[b, lz, m.tp_ts[tp]]
for b in m.DR_BID_LIST for lz in m.LOAD_ZONES)
* m.tp_duration_hrs[tp] / m.ts_num_tps[m.tp_ts[tp]]
)
# add the private benefit to the model's objective function
m.cost_components_tp.append('DR_Welfare_Cost')
# annual costs, recovered via baseline prices
# but not included in switch's calculation of costs
m.other_costs = Param(m.PERIODS, mutable=True, default=0.0)
m.cost_components_annual.append('other_costs')
# variable to store the baseline data
m.base_data = None
def post_iterate(m):
print "\n\n======================================================="
print "Solved model"
print "======================================================="
print "Total cost: ${v:,.0f}".format(v=value(m.SystemCost))
print "marginal costs (first day):"
print [
electricity_marginal_cost(m, lz, tp)
for lz in m.LOAD_ZONES
for tp in m.TS_TPS[m.TIMESERIES[1]]
]
print "marginal costs (second day):"
print [
electricity_marginal_cost(m, lz, tp)
for lz in m.LOAD_ZONES
for tp in m.TS_TPS[m.TIMESERIES[2]]
]
# if m.iteration_number % 5 == 0:
# # save time by only writing results every 5 iterations
# write_results(m)
# Retrieve SystemCost before calling update_demand()
# because that breaks SystemCost until the next solve
old_SystemCost = getattr(m, "last_SystemCost", None)
new_SystemCost = value(m.SystemCost)
m.last_SystemCost = new_SystemCost
if m.iteration_number > 0:
# store cost of current solution before it gets altered by update_demand()
current_cost = value(sum(
(
sum(
electricity_marginal_cost(m, lz, tp) * electricity_demand(m, lz, tp)
for lz in m.LOAD_ZONES
) + m.DR_Welfare_Cost[tp]
) * m.bring_timepoint_costs_to_base_year[tp]
for ts in m.TIMESERIES
for tp in m.TS_TPS[ts]
))
update_demand(m)
if m.iteration_number > 0:
# get an estimate of best possible net cost of serving load
# (if we could completely | |
<filename>rec.py
from typing import Dict, List, Optional, Tuple, Union
import re
SYNTAX_ERROR = "SyntaxError"
RUNTIME_ERROR = "RuntimeError"
class Number:
def __init__(self, value: int) -> None:
self.value = value
def run(self, variables: Dict[str, int]) -> int:
return self.value
class Operator:
def __init__(self, left, right) -> None:
self.left = left
self.right = right
def add_num(self, variables: Dict[str, int]) -> None:
if self.left not in variables:
variables[self.left] = 0
if self.right not in variables:
variables[self.right] = 0
class And(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 and int(variables[self.right]) != 0 else 0
class Or(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) != 0 or int(variables[self.right]) != 0 else 0
class Nand(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if not (int(variables[self.left]) != 0 and int(variables[self.right]) != 0) else 0
class Add(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) + int(variables[self.right])
class Sub(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) - int(variables[self.right])
class Mul(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(variables[self.left]) * int(variables[self.right])
class Div(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return int(int(variables[self.left]) / int(variables[self.right]))
class Lt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) < int(variables[self.right]) else 0
class Gt(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if variables[self.left] > variables[self.right] else 0
class Eq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) == int(variables[self.right]) else 0
class Leq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) <= int(variables[self.right]) else 0
class Geq(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int]) -> int:
self.add_num(variables)
return 1 if int(variables[self.left]) >= int(variables[self.right]) else 0
class Call(Operator):
def __init__(self, left, right) -> None:
super().__init__(left, right)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Union[int, str]:
func_to_run = [fun for fun in functions if fun.function_name == self.left]
if len(func_to_run) == 0:
return "NameError"
if len(func_to_run[0].arguments_names) != len(self.right):
return "TypeError"
arguments = []
for var in self.right:
if var not in variables:
variables[var] = 0
arguments.append(variables[var])
rt_val = func_to_run[0].run(arguments, functions)
return rt_val
built_in_func = {
"add": Add,
"sub": Sub,
"mul": Mul,
"div": Div,
"lt" : Lt,
"gt" : Gt,
"eq" : Eq,
"leq": Leq,
"geq": Geq,
"and": And,
"or" : Or,
"nand": Nand,
}
class Expression:
def __init__(self) -> None:
self.variable: Optional[str] = None
self.right: Optional[Operator] = None
self.code_line_num: Optional[int] = None
def check(self, functions: List['Function']) -> Optional[int]:
if isinstance(self.right, Call):
fun = [fun for fun in functions if fun.function_name == self.right.left and len(fun.arguments_names) == len(self.right.right)]
if len(fun) != 1:
return self.code_line_num
return None
def build(self, index: int, line: str, functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
left, right = line.split("=")
self.left = left.split()[0]
if not check_variable(self.left):
return (self.code_line_num, SYNTAX_ERROR)
right_parts = right.split()
if len(right_parts) == 0:
return (self.code_line_num, SYNTAX_ERROR)
elif len(right_parts) == 1 and right_parts[0].lstrip("-").isdigit():
self.right = Number(int(right_parts[0]))
elif len(right_parts) == 3 and right_parts[0] in built_in_func:
if not (check_variable(right_parts[1]) and check_variable(right_parts[2])):
return (self.code_line_num, SYNTAX_ERROR)
self.right = built_in_func[right_parts[0]](right_parts[1], right_parts[2])
else:
if right_parts[0] in built_in_func:
return (self.code_line_num, SYNTAX_ERROR)
for var in right_parts[1:]:
if not check_variable(var):
return (self.code_line_num, SYNTAX_ERROR)
self.right = Call(right_parts[0], right_parts[1:])
return index + 1
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
try:
if isinstance(self.right, Call):
rt_val = self.right.run(variables, functions)
else:
rt_val = self.right.run(variables)
except:
return (self.code_line_num, RUNTIME_ERROR)
if not isinstance(rt_val, int):
return (self.code_line_num, rt_val)
variables[self.left] = rt_val
class Scope:
def __init__(self) -> None:
self.objects_to_run: List[Union["Scope", "Operator", "Number"]] = []
def check_valid_line(self, line: str, pattern: str) -> bool:
return re.search(pattern, line) is not None or line.isspace() or line == ""
def check(self, functions: List['Function']) -> Optional[int]:
for object_to_run in self.objects_to_run:
rt_val = object_to_run.check(functions)
if rt_val is not None:
return rt_val
return None
def build_scope(self, index: int, code_lines: List[str],
functions: List['Function'], indent: str) \
-> Union[int, Tuple[int, str]]:
pattern = "^" + indent + "[a-zA-Z].*$"
while index < len(code_lines) and self.check_valid_line(code_lines[index], pattern):
line = code_lines[index]
if line.isspace() or line == "":
index += 1
continue
line_parts = line.split()
if len(line_parts) < 2:
return (index + 1, SYNTAX_ERROR)
elif len(line_parts) == 2:
if line_parts[0] != "if" and line_parts[1] != "while":
return (index + 1, SYNTAX_ERROR)
new_object = PredicateScope()
rt_val = new_object.build(index, code_lines, functions, indent + " ")
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
elif line_parts[1] == "=":
new_object = Expression()
rt_val = new_object.build(index, line, functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
self.objects_to_run.append(new_object)
else:
return (index + 1, SYNTAX_ERROR)
return index
class PredicateScope(Scope):
def __init__(self) -> None:
super().__init__()
self.predicate_var: Optional[str] = None
self.code_line_num: Optional[int] = None
self.type_scp: Optional[str] = None
def build(self, index: int, code_lines: List[str], functions: List['Function'], indent: str) -> Union[int, Tuple[int, str]]:
self.code_line_num = index + 1
line = code_lines[index]
line_parts = line.split()
self.type_scp = "if" if line_parts[0] == "if" else "while"
if not check_variable(line_parts[1]):
return (index + 1, SYNTAX_ERROR)
self.predicate_var = line_parts[1]
return self.build_scope(index + 1, code_lines, functions, indent)
def run(self, variables: Dict[str, int], functions: List['Function']) -> Optional[Tuple[int, str]]:
if self.predicate_var not in variables:
variables[self.predicate_var] = 0
return None
if self.type_scp == "if" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
while self.type_scp == "while" and variables[self.predicate_var] != 0:
for line in self.objects_to_run:
rt_val = line.run(variables, functions)
if rt_val is not None:
return rt_val
class Function(Scope):
def __init__(self) -> None:
super().__init__()
self.function_name: Optional[str] = None
self.arguments_names: List[str] = []
self.line_num: Optional[int] = None
def check_function_name(self, functions: List['Function']) -> bool:
if re.search("^[a-zA-Z][a-zA-Z0-9_]*$", self.function_name) is None:
return False
if self.function_name in built_in_func:
return False
if any(map(lambda func: func.function_name == self.function_name, functions)):
return False
return True
def build(self, index: int, code_lines: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
self.line_num = index + 1
line = code_lines[index]
header = line.split()
if len(header) < 2:
return (self.line_num, SYNTAX_ERROR)
_, self.function_name, *arguments = header
self.arguments_names = arguments
for var in self.arguments_names:
if not check_variable(var):
return (self.line_num, SYNTAX_ERROR)
if not self.check_function_name(functions):
return (self.line_num, SYNTAX_ERROR)
functions.append(self)
return self.build_scope(index + 1, code_lines, functions, " ")
def run(self, variables: List[str], functions: List['Function']) -> Union[int, Tuple[int, str]]:
if len(self.arguments_names) != len(variables):
return (self.line_num, RUNTIME_ERROR)
variables_dict = {}
variables_dict[self.function_name] = 0
for var, val in zip(self.arguments_names, variables):
variables_dict[var] = val
for line in self.objects_to_run:
rt_val = line.run(variables_dict, functions)
if rt_val is not None:
return rt_val
return variables_dict[self.function_name]
class Interpreter:
def __init__(self, code_str: str) -> None:
self.code_lines: List[str] = code_str.split("\n")
self.functions: List["Function"] = []
def func_check(self) -> Optional[int]:
for fun in self.functions:
rt_val = fun.check(self.functions)
if rt_val is not None:
return rt_val
return None
def build(self) -> Optional[Tuple[int, str]]:
index = 0
while index < len(self.code_lines):
line = self.code_lines[index]
if line.isspace() or line == "":
index += 1
elif line.startswith("def"):
new_function = Function()
rt_val = new_function.build(index, self.code_lines, self.functions)
if isinstance(rt_val, int):
index = rt_val
else:
return rt_val
else:
return (index + 1, SYNTAX_ERROR)
rt_val = self.func_check()
if rt_val is not None:
return (rt_val, SYNTAX_ERROR)
def run(self, func_name: str, variables: List[str]) -> Union[int, Tuple[int, str]]:
function_to_run = [fun for fun in self.functions if fun.function_name == func_name]
if len(function_to_run) != 1:
return (0, RUNTIME_ERROR)
return function_to_run[0].run(variables, self.functions)
def check_variable(name: str) -> bool:
return re.search("^[a-zA-Z][a-zA-Z0-9_]*$", name) is not None
def do_rec(code: str, func_name: str, *args) -> | |
'''
kwargs = self._setup_fitter_kwargs(
{'A': 1, 'mu':0, 'sigma': 1, 'c': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_func(self, x, A, mu, sigma, c):
"""Guassian function
A: amplitude
mu: position
sigma: std deviation
c : offset
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class GaussianModelN(Fitter):
def __init__(self, *args, parameter_dict=None, **kwargs):
''' Fit Gausian model using Minuit.
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **parameter_dict**: Dict of parameters for gaussian fit.
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
'''
self._parameter_names = None
self._parameter_dict_fitarg = None
self._pmn = ['A', 'mu', 'sigma', 'c']
#Numberfy params
if not parameter_dict:
raise NotImplementedError('Must have parameter dict currently')
self.parameter_dict = parameter_dict
if not kwargs:
kwargs = {}
kwargs['forced_parameters'] = self.parameter_names
kwargs['fitarg'] = self.parameter_dict_fitarg
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
@property
def parameter_names(self):
if isinstance(self._parameter_names, type(None)):
ret = []
for name in self._pmn:
pos = 0
for value in self.parameter_dict[name]:
ret.append('%s%d'%(name, pos))
pos += 1
self._parameter_names = ret
return self._parameter_names
@property
def parameter_dict_fitarg(self):
"""Creates a numbered dictionary that can be used as fitargs
dict to create the fit function."""
if isinstance(self._parameter_dict_fitarg, type(None)):
ret = {}
for pm in self._pmn:
values = self.parameter_dict[pm]
pos = 0
for value in values:
ret['%s%d'%(pm,pos)] = value
pos += 1
self._parameter_dict_fitarg = ret
return self._parameter_dict_fitarg
def _params_from_parameter_dict(self):
ret = []
for name in self._parameter_names:
[ret.append(value) for value in self.parameter_dict[name]]
return np.array(ret)
def fit_func(self, x, *params):
"""
Gaussian functions.
Pass parameters as list. Sorting of parameters is:
A0, A1,.. mu0, mu1,... sigma0, sigma1,....c0,c1,....
"""
# Minuit passes negative values for sigma
# and these values lead to failures of the fitting
i = len(params)//4
pparams = np.reshape(params, (4, i)).T
ret = np.zeros_like(x)
for _p in pparams:
ret += self._gaussian(x, *_p)
return ret
def _gaussian(self, x, A, mu, sigma, c):
"""Gaussian function"""
if sigma < 0:
return 0
return A * norm.pdf(x, mu, sigma) + c
class LorenzianModel(Fitter):
"""
N-Lorenzian Peaks and Non Resonant background to fit SFG
Spectra with.
"""
def __init__(self, *args, n_lorenzians=1, **kwargs):
# Must definde forced_parameters because iminuits parameter auto
# discovery fails for sfgn as fit function
self.n_lorenzians = n_lorenzians
_fitarg = {k: 0 for k in flatten([('amp_%i'%i, 'pos_%i'%i, 'width_%i'%i) for i in range(n_lorenzians)])}
_fitarg = {'nr': 0, 'phase': 0, **_fitarg}
self.parameter_names = list(_fitarg.keys())
kwargs['forced_parameters'] = self.parameter_names
# If no fitargs is defined, we define a minimum set and use
# sane parameter defaults
# This has a problem if n_lorenzians is wrong. Currently the user
# has to take care to use it correctly
fitarg = kwargs.get('fitarg')
if not fitarg:
kwargs['fitarg'] = _fitarg
Fitter.__init__(self, *args, **kwargs)
def fit_func(self, x, *args, **kwargs):
return sfgn(x, *args, **kwargs)
@property
def kwargs(self):
"""n_lorenzians is needed for model to work."""
ret = super().kwargs
ret['n_lorenzians'] = self.n_lorenzians
return ret
class SkewedNormal(Fitter):
def __init__(self, *args, **kwargs):
Fitter.__init__(self, *args, **kwargs)
self._box_str_format = '{:5}: {:7.3g} $\\pm$ {:6.1g}\n'
def fit_funct(self, x, A, mu, sigma, kurt, c):
return A * skewnorm.pdf(x, kurt, mu, sigma) + c
class FourLevelMolKinM(Fitter):
def __init__(
self,
*args,
gSigma=150,
N0=[1, 0, 0, 0],
rtol=1.09012e-9,
atol=1.49012e-9,
full_output=True,
**kwargs
):
"""4 Level Model Fitter.
To use set following `kwargs`
`xdata`, `ydata` and `fitarg`. Optinal pass `sigma` for y errors.
**Arguments:**
- **N0**: Boundary condition of the DGL
- **rtol**: Precision parameter of the DGL
- **atol**: Precision parameter of the DGL
- **full_output**: Weather to get full_output of the DGL Solver.
Usefull for debugging. atol and rtol
**args**/**kwargs:**
Get passed to `sfg2d.models.Fitter`. Options are:
- **xdata**: array of x data points
- **ydata**: array of y data points
- **sigma**: Array of y data errors
- **fitarg**: Dictionary with fit conditions.
Each parameter has an entry with its name `'parameter'`
`'error_parameter'` `'fix_parameter'` and `'limit_parameter'`
- **box_coords**: Coordinates of the fit result box in data coordinates.
- **roi**: Slice. Region of interest of the data.
This subregion will be used for fitting.
- **name**: Str, Name to describe the Model.
"""
self.gSigma = gSigma # width of the excitation
self.rtol = rtol # Precition of the numerical integrator.
self.atol = atol
# Starting conditions of the Populations, not to be confuesed with starting conditions of the plot
self.N0 = N0
self.full_output = full_output
self.infodict = None # Infodict return of the Odeint.
kwargs = self._setup_fitter_kwargs(
{'s': 1, 't1': 1, 't2': 0.7, 'c': 1, 'mu': 0,},
kwargs
)
Fitter.__init__(self, *args, **kwargs)
def ext_gaus(self, t, mu, sigma):
"""Gausian excitation function.
Due to historic reasons its not a strict gausian, but something
very cloe to it. The Igor Code is:
1/sqrt(pi)/coeff1*exp(-(coeff0-x)^2/coeff1^2)
The here wanted sigma is sqrt(2)*sigma of a normal gaussian
and then its also normalized. If you have FWHM, then sigma
is sigma = FWHM/(2*sqrt(log(2)))
"""
return 1 / np.sqrt(np.pi) / sigma * np.exp(-((mu-t)/sigma)**2)
# The Physical Water model
def dgl(self, N, t, ext_func, s, t1, t2):
"""Dgl of the 4 Level DGL system.
**Arguments:**
- **N**: deg 4 array
Population of the 4 levels respectively
- **t**: float
time
- **ext_func**: exictation function in time.
Time profile of the pump laser.
Function of t. Usaully a gaussian function.
- **s**: scaling factor of the pump laser.
- **t1**: Time constant of first level
- **t2**: Time constant of second level.
**Returns:**
Derivatives of the system. As 4 dim array.
"""
# This is the DGL written as a Matrix multiplication.
# dNdt = A x N
# A is the constructing matrix of the DGL
# and N is a 4-level vector with (N0, N1, N2, N3)
# as the population of the states at time t.
# dNdt is the state wise derivative of N
# See https://en.wikipedia.org/wiki/Matrix_differential_equation
A = np.array([
[-s * ext_func(t), s * ext_func(t), 0, 0],
[s * ext_func(t), -s * ext_func(t) - 1/t1, 0, 0],
[0, 1 / t1, -1 / t2, 0],
[0, 0, 1 / t2, 0],
], dtype=np.float64)
dNdt = A.dot(N)
return dNdt
def fit_func(self, t, s, t1, t2, c, mu):
"""
Function we use to fit.
**Arguments:**
- **t**: time
- **s**: Gaussian Amplitude
- **t1**: Livetime of first state
- **t2**: livetime of second(intermediate) state
- **c**: Coefficient of third(Heat) state
- **mu**: Position of pump pulse, the zero.
**Returns**
The bleach of the water model
and the Matrix with the populations"""
N = self.population(
t,
lambda t: self.ext_gaus(t, mu, self.gSigma),
s,
t1,
t2
).T
return ((N[0] - N[1]+ N[2] + c * N[3])**2) / (self.N0[0]**2)
def population(self, t, *args, **kwargs):
"""Numerical solution to the 4 Level DGL-Water system.
**Arguments:**
- **t**: array of time values
**Args**:
Arguments of the dgl function
- **ext_func**: Function of excitation.
- **s**: scalar factor for the pump
- **t1**: Live time of the first exited state
- **t2**: livetime of the intermediate state.
**kwargs**:
Get passe to differential equation solver odeing
**Returns**
(len(t), 4) shaped array with the 4 entires beeing the population
of the N0 t0 N3 levels of the system
"""
ret = odeint(
func=self.dgl, # the DGL of the 4 level water system
y0=self.N0, # Starting conditions of the DGL
t=t, # Time as parameter
args=args, # Aguments of the dgl
# Dfun=self.jac, # The Jacobean of the DGL. Its optional.
# The precisioin parameter for the nummerical DGL solver.
rtol=self.rtol,
atol=self.atol,
full_output=self.full_output,
**kwargs,
)
if self.full_output:
ret, self.infodict = ret
return | |
text: %s', source_text)
except UnicodeDecodeError:
is_binary = True
logging.debug('Template file is binary. Templating disabled.')
compiled = None
context = None
if is_binary or not self.custom_response.use_templating:
compiled = source_text
else:
compiled, context = self.common_template_renderer(template_engine, source_text)
self.populate_counters(context)
if not is_binary:
logging.debug('Render output: %s', compiled)
return compiled
def build_replica_request(self) -> Request:
"""Method that builds the replica `Request` object to be injected into the response templating."""
request = Request()
# Details
request.version = self.request.version
request.remoteIp = self.request.remote_ip
request.protocol = self.request.protocol
request.host = self.request.host
request.hostName = self.request.host_name
request.port = self.request.server_connection.stream.socket.getsockname()[1]
request.uri = self.request.uri
# Method
request.method = self.request.method
# Path
request.set_path(self.request.path)
# Headers
for key, value in self.request.headers._dict.items():
request.headers[key] = value
request.headers[key.lower()] = value
# Query String
for key, value in self.request.query_arguments.items():
request.queryString[key] = [x.decode() for x in value]
if len(request.queryString[key]) == 1:
request.queryString[key] = request.queryString[key][0]
# Body
if self.request.body_arguments:
request.mimeType = 'application/x-www-form-urlencoded'
for key, value in self.request.body_arguments.items():
try:
request.bodyType[key] = 'str'
request.body[key] = [x.decode() for x in value]
except (AttributeError, UnicodeDecodeError):
request.bodyType[key] = BASE64
request.body[key] = [_b64encode(x) for x in value]
if len(request.body[key]) == 1:
request.body[key] = request.body[key][0]
elif self.request.files:
request.mimeType = 'multipart/form-data'
for key, value in self.request.files.items():
try:
request.bodyType[key] = 'str'
request.body[key] = [x.body.decode() for x in value]
except (AttributeError, UnicodeDecodeError):
request.bodyType[key] = BASE64
request.body[key] = [_b64encode(x.body) for x in value]
if len(request.body[key]) == 1:
request.body[key] = request.body[key][0]
else:
request.mimeType = 'text/plain'
try:
request.bodyType = 'str'
request.body = self.request.body.decode()
except (AttributeError, UnicodeDecodeError):
request.bodyType = BASE64
request.body = _b64encode(self.request.body)
request.bodySize = len(self.request.body)
# Files
request.files = self.request.files
return request
def build_replica_response(self) -> Response:
"""Method that prepares replica `Response` object to be modified by the interceptors."""
response = Response()
response.status = self._status_code
response.headers = self._headers
if not hasattr(self, 'rendered_body'):
self.rendered_body = None
response.body = self.rendered_body
return response
def update_response(self) -> None:
"""Updates the response according to modifications made in interceptors."""
self._status_code = self.replica_response.status
self._headers = self.replica_response.headers
self.rendered_body = self.replica_response.body
self._write_buffer = []
if self.rendered_body is None:
self.rendered_body = ''
if self.should_write():
self.write(self.rendered_body)
def determine_status_code(self) -> None:
"""Method to determine the status code of the response."""
status_code = None
if self.custom_response.status is not None:
if isinstance(self.custom_response.status, str):
compiled, context = self.common_template_renderer(
self.definition_engine,
self.custom_response.status
)
self.populate_counters(context)
try:
status_code = int(compiled)
except ValueError:
status_code = compiled
else:
status_code = self.custom_response.status
else:
status_code = 200
if self.performance_profile is not None:
status_code = self.performance_profile.trigger(status_code)
if isinstance(status_code, str) and status_code.lower() == 'rst':
self.request.server_connection.stream.socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_LINGER,
struct.pack('ii', 1, 0)
)
self.request.server_connection.stream.close()
self.set_elapsed_time(self.request.request_time())
self.stats.services[self.service_id].endpoints[self.internal_endpoint_id].add_status_code('RST')
if isinstance(status_code, str) and status_code.lower() == 'fin':
self.request.server_connection.stream.close()
self.stats.services[self.service_id].endpoints[self.internal_endpoint_id].add_status_code('FIN')
self.dont_add_status_code = True
else:
self.set_status(status_code)
def analyze_component(self, component: str) -> None:
"""Method that analyzes various HTTP components."""
if SPECIAL_CONTEXT not in self.initial_context or component not in self.initial_context[SPECIAL_CONTEXT]:
return
payload = None
if component == 'headers':
payload = self.request.headers._dict
elif component == 'queryString':
payload = self.request.query_arguments
elif component == 'bodyText':
payload = self.request.body.decode()
elif component == 'bodyUrlencoded':
payload = self.request.body_arguments
elif component == 'bodyMultipart':
payload = self.request.files
for key, value in self.initial_context[SPECIAL_CONTEXT][component].items():
self.analyze_component_inject_to_context(key, value, component, payload)
def analyze_component_inject_to_context(self, key: str, value: dict, component: str, payload: Union[dict, str]):
_key = key
if component == 'headers':
_key = key.title()
if _key in payload or component == 'bodyText':
if value['type'] == 'regex':
match_string = None
regex = value['regex']
if component == 'headers':
match_string = self.request.headers.get(key)
elif component == 'queryString':
match_string = self.get_query_argument(key)
elif component == 'bodyText':
match_string = payload
if self.alternative is not None and self.alternative.body is not None:
regex = self.alternative.body.text
if self.alternative.body.is_graphql_query:
json_data = json.loads(payload)
logging.debug('[inject] GraphQL original request:\n%s', json_data['query'])
try:
graphql_ast = graphql_parse(json_data['query'])
match_string = graphql_print_ast(graphql_ast).strip()
logging.debug('[inject] GraphQL parsed/unparsed request:\n%s', match_string)
except GraphQLSyntaxError as e:
logging.error('[inject] GraphQL: %s', str(e))
return
elif component == 'bodyUrlencoded':
match_string = self.get_body_argument(key)
elif component == 'bodyMultipart':
match_string = self.request.files[key][0].body.decode()
match = re.search(regex, match_string)
if match is not None:
for i, key in enumerate(value['args']):
self.custom_context[key] = match.group(i + 1)
def determine_headers(self) -> None:
"""Method to determine the headers of the response."""
if self.custom_endpoint_id is not None:
self.set_header('x-%s-endpoint-id' % PROGRAM.lower(), self.custom_endpoint_id)
if 'headers' in self.globals:
for key, value in self.globals['headers'].items():
self.set_header(key, value)
if self.custom_response.headers is None:
return
for key, value in self.custom_response.headers.payload.items():
value_list = None
if isinstance(value, list):
value_list = value
if isinstance(value, str):
value_list = [value]
new_value_list = []
for value in value_list:
new_value, context = self.common_template_renderer(self.definition_engine, value)
self.populate_counters(context)
new_value_list.append(new_value)
for value in new_value_list:
if key.title() == 'Set-Cookie':
value_splitted = value.split('=')
value_splitted[1] = quote_plus(value_splitted[1])
self.set_cookie(value_splitted[0], value_splitted[1])
else:
self.set_header(key, value)
async def match_alternative(self) -> tuple:
"""Method to handles all the request matching logic.
If the request does not match to any alternatives defined in the config, it returns `400`.
It also handles the automatic CORS.
"""
if await self.should_cors():
self.respond_cors()
return ()
self.alternatives = self.methods[self.request.method.lower()]
response = None
params = None
context = None
reason = None
for alternative in self.alternatives:
fail = False
# Headers
fail, reason = self.match_alternative_headers(alternative)
if fail:
continue
# Query String
fail, reason = self.match_alternative_query_string(alternative)
if fail:
continue
# Body
if alternative.body is not None:
body = self.request.body.decode()
# Schema
fail, reason, error = self.match_alternative_body_schema(body, alternative)
if error:
return
elif fail:
continue
# Text
fail, reason = self.match_alternative_body_text(body, alternative)
if fail:
continue
# Urlencoded
fail, reason = self.match_alternative_body_urlencoded(body, alternative)
if fail:
continue
# Multipart
fail, reason = self.match_alternative_body_multipart(body, alternative)
if fail:
continue
# GraphQL Variables
fail, reason = self.match_alternative_body_graphql_variables(body, alternative)
if fail:
continue
# Multiple responses
if alternative.response is not None:
response = alternative.response
if isinstance(response, ConfigMultiResponse):
if not len(response.payload) > 0:
response = ConfigResponse(body=None)
else:
response = self.loop_alternative(alternative, 'response', 'multi_responses')
if not response:
return ()
response = response if isinstance(response, ConfigResponse) else ConfigResponse(body=response)
else: # pragma: no cover
response = ConfigResponse(body=None)
# Dataset
dataset = {}
if alternative.dataset is not None:
alternative.dataset = self.load_dataset(alternative.dataset)
dataset = self.loop_alternative(alternative, 'dataset', 'dataset')
if not dataset:
return ()
_id = alternative.id
params = alternative.params
context = alternative.context
internal_endpoint_id = alternative.internal_endpoint_id
performance_profile = alternative.performance_profile
self.alternative = alternative
return (
_id,
response,
params,
context,
dataset,
internal_endpoint_id,
performance_profile
)
self.write(reason)
await self.raise_http_error(400)
def match_alternative_headers(self, alternative: HttpAlternative) -> Tuple[bool, Union[str, None]]:
reason = None
fail = False
if alternative.headers is not None:
for key, value in alternative.headers.items():
request_header_val = self.request.headers.get(key.title())
if key.title() not in self.request.headers._dict:
self.internal_endpoint_id = alternative.internal_endpoint_id
fail = True
reason = '%r not in the request headers!' % key.title()
break
if value == request_header_val:
continue
value = '^%s$' % value
match = re.search(value, request_header_val)
if match is None:
self.internal_endpoint_id = alternative.internal_endpoint_id
fail = True
reason = 'Request header value %r on key %r does not match to regex: %s' % (
request_header_val,
key.title(),
value
)
break
return fail, reason
def match_alternative_query_string(self, alternative: HttpAlternative) -> Tuple[bool, Union[str, None]]:
reason = None
fail = False
if alternative.query_string is not None:
for key, value in alternative.query_string.items():
# To prevent 400, default=None
default = None
request_query_val = self.get_query_argument(key, default=default)
if request_query_val is default:
is_matched = False
if re.escape(key) != key:
for _key in self.request.query_arguments:
match = re.search(key, _key)
if match is not None:
is_matched = True
break
if not is_matched:
self.internal_endpoint_id = alternative.internal_endpoint_id
fail = True
reason = 'Key %r couldn\'t found in the query string!' % key
break
if value == request_query_val:
continue
if request_query_val is default:
continue
value = '^%s$' % value
match = re.search(value, request_query_val)
if match is None:
self.internal_endpoint_id = alternative.internal_endpoint_id
fail = True
reason = 'Request query parameter value %r on key %r does not match to regex: %s' % (
request_query_val,
key,
value
)
break
return fail, reason
def match_alternative_body_schema(self, body: str, alternative: HttpAlternative) -> Tuple[bool, Union[str, None], bool]:
reason = None
fail = False
if alternative.body.schema is not None:
json_schema = None
if isinstance(alternative.body.schema.payload, ConfigExternalFilePath):
json_schema_path, _ = self.resolve_relative_path(alternative.body.schema.payload.path)
with open(json_schema_path, 'r') as file:
logging.info('Reading JSON schema file from path: %s', json_schema_path)
try:
json_schema = json.load(file)
except json.decoder.JSONDecodeError:
self.send_error(
500,
message='JSON decode error of the JSON schema file: %s' % alternative.body.schema.payload.path
)
return fail, reason, True
logging.debug('JSON schema: %s', json_schema)
else:
json_schema = alternative.body.schema.payload
json_data = None
if body and json_schema:
try:
json_data = json.loads(body)
| |
Basic.__new__(cls, name, tensortype, is_up)
obj._name = name
obj._tensortype = tensortype
obj._is_up = is_up
return obj
@property
def name(self):
return self._name
@property
def tensortype(self):
return self._tensortype
@property
def is_up(self):
return self._is_up
def _pretty(self):
s = self._name
if not self._is_up:
s = '-%s' % s
return s
def __lt__(self, other):
return (self._tensortype, self._name) < (other._tensortype, other._name)
def __neg__(self):
t1 = TensorIndex(self._name, self._tensortype,
(not self._is_up))
return t1
def tensor_indices(s, typ):
"""
Returns list of tensor indices given their names and their types
Parameters
==========
s : string of comma separated names of indices
typ : list of ``TensorIndexType`` of the indices
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b, c, d = tensor_indices('a,b,c,d', Lorentz)
"""
if isinstance(s, str):
a = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
return [TensorIndex(i, typ) for i in a]
class TensorSymmetry(Basic):
"""
Monoterm symmetry of a tensor
Parameters
==========
bsgs : tuple ``(base, sgs)`` BSGS of the symmetry of the tensor
Attributes
==========
``base`` : base of the BSGS
``generators`` : generators of the BSGS
``rank`` : rank of the tensor
Notes
=====
A tensor can have an arbitrary monoterm symmetry provided by its BSGS.
Multiterm symmetries, like the cyclic symmetry of the Riemann tensor,
are not covered.
See Also
========
sympy.combinatorics.tensor_can.get_symmetric_group_sgs
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, TensorSymmetry, TensorType, get_symmetric_group_sgs
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = TensorSymmetry(get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
def __new__(cls, bsgs, **kw_args):
base, generators = bsgs
obj = Basic.__new__(cls, base, generators, **kw_args)
return obj
@property
def base(self):
return self.args[0]
@property
def generators(self):
return self.args[1]
@property
def rank(self):
return self.args[1][0].size - 2
def _hashable_content(self):
r = (tuple(self.base), tuple(self.generators))
return r
def tensorsymmetry(*args):
"""
Return a ``TensorSymmetry`` object.
One can represent a tensor with any monoterm slot symmetry group
using a BSGS.
``args`` can be a BSGS
``args[0]`` base
``args[1]`` sgs
Usually tensors are in (direct products of) representations
of the symmetric group;
``args`` can be a list of lists representing the shapes of Young tableaux
Notes
=====
For instance:
``[[1]]`` vector
``[[1]*n]`` symmetric tensor of rank ``n``
``[[n]]`` antisymmetric tensor of rank ``n``
``[[2, 2]]`` monoterm slot symmetry of the Riemann tensor
``[[1],[1]]`` vector*vector
``[[2],[1],[1]`` (antisymmetric tensor)*vector*vector
Notice that with the shape ``[2, 2]`` we associate only the monoterm
symmetries of the Riemann tensor; this is an abuse of notation,
since the shape ``[2, 2]`` corresponds usually to the irreducible
representation characterized by the monoterm symmetries and by the
cyclic symmetry.
Examples
========
Symmetric tensor using a Young tableau
>>> from sympy.tensor.tensor import TensorIndexType, TensorType, tensorsymmetry
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
Symmetric tensor using a BSGS
>>> from sympy.tensor.tensor import TensorSymmetry, get_symmetric_group_sgs
>>> sym2 = tensorsymmetry(*get_symmetric_group_sgs(2))
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
from sympy.combinatorics import Permutation
def tableau2bsgs(a):
if len(a) == 1:
# antisymmetric vector
n = a[0]
bsgs = get_symmetric_group_sgs(n, 1)
else:
if all(x == 1 for x in a):
# symmetric vector
n = len(a)
bsgs = get_symmetric_group_sgs(n)
elif a == [2, 2]:
bsgs = riemann_bsgs
else:
raise NotImplementedError
return bsgs
if not args:
return TensorSymmetry([[], [Permutation(1)]])
if len(args) == 2 and isinstance(args[1][0], Permutation):
return TensorSymmetry(args)
base, sgs = tableau2bsgs(args[0])
for a in args[1:]:
basex, sgsx = tableau2bsgs(a)
base, sgs = bsgs_direct_product(base, sgs, basex, sgsx)
return TensorSymmetry((base, sgs))
class TensorType(Basic):
"""
Class of tensor types.
Parameters
==========
index_types : list of ``TensorIndexType`` of the tensor indices
symmetry : ``TensorSymmetry`` of the tensor
Attributes
==========
``index_types``
``symmetry``
``types`` : list of ``TensorIndexType`` without repetitions
Examples
========
Define a symmetric tensor
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1, 1])
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
"""
is_commutative = False
def __new__(cls, index_types, symmetry, **kw_args):
assert symmetry.rank == len(index_types)
obj = Basic.__new__(cls, index_types, symmetry, **kw_args)
return obj
@property
def index_types(self):
return self.args[0]
@property
def symmetry(self):
return self.args[1]
@property
def types(self):
return sorted(set(self.index_types), key=lambda x: x.name)
def __str__(self):
return 'TensorType(%s)' %([str(x) for x in self.index_types])
def __call__(self, s, comm=0):
"""
Return a TensorHead object or a list of TensorHead objects.
``s`` name or string of names
``comm``: commutation group number
see ``_TensorManager.set_comm``
Examples
========
Define symmetric tensors ``V``, ``W`` and ``G``, respectively
commuting, anticommuting and with no commutation symmetry
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorsymmetry, TensorType, canon_bp
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> V = S2('V')
>>> W = S2('W', 1)
>>> G = S2('G', 2)
>>> canon_bp(V(a, b)*V(-b, -a))
V(L_0, L_1)*V(-L_0, -L_1)
>>> canon_bp(W(a, b)*W(-b, -a))
0
"""
if isinstance(s, str):
names = [x.name for x in symbols(s, seq=True)]
else:
raise ValueError('expecting a string')
if len(names) == 1:
return TensorHead(names[0], self, comm)
else:
return [TensorHead(name, self, comm) for name in names]
def tensorhead(name, typ, sym, comm=0):
"""
Function generating tensorhead(s).
Parameters
==========
name : name or sequence of names (as in ``symbol``)
typ : index types
sym : same as ``*args`` in ``tensorsymmetry``
comm : commutation group number
see ``_TensorManager.set_comm``
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> A(a, -b)
A(a, -b)
"""
sym = tensorsymmetry(*sym)
S = TensorType(typ, sym)
return S(name, comm)
class TensorHead(Basic):
"""
Tensor head of the tensor
Parameters
==========
name : name of the tensor
typ : list of TensorIndexType
comm : commutation group number
Attributes
==========
``name``
``index_types``
``rank``
``types`` : equal to ``typ.types``
``symmetry`` : equal to ``typ.symmetry``
``comm`` : commutation group
Notes
=====
A ``TensorHead`` belongs to a commutation group, defined by a
symbol on number ``comm`` (see ``_TensorManager.set_comm``);
tensors in a commutation group have the same commutation properties;
by default ``comm`` is ``0``, the group of the commuting tensors.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensorsymmetry, TensorType
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> sym2 = tensorsymmetry([1]*2)
>>> S2 = TensorType([Lorentz]*2, sym2)
>>> A = S2('A')
"""
is_commutative = False
def __new__(cls, name, typ, comm, **kw_args):
assert isinstance(name, basestring)
obj = Basic.__new__(cls, name, typ, **kw_args)
obj._name = obj.args[0]
obj._rank = len(obj.index_types)
obj._types = typ.types
obj._symmetry = typ.symmetry
obj._comm = TensorManager.comm_symbols2i(comm)
return obj
@property
def name(self):
return self._name
@property
def rank(self):
return self._rank
@property
def types(self):
return self._types[:]
@property
def symmetry(self):
return self._symmetry
@property
def typ(self):
return self.args[1]
@property
def comm(self):
return self._comm
@property
def index_types(self):
return self.args[1].index_types[:]
def __lt__(self, other):
return (self.name, self.index_types) < (other.name, other.index_types)
def _hashable_content(self):
r = (self._name, tuple(self._types), self._symmetry, self._comm)
return r
def commutes_with(self, other):
"""
Returns 0 (1) if self and other (anti)commute.
Returns None if self and other do not (anti)commute.
"""
r = TensorManager.get_comm(self._comm, other._comm)
return r
def _pretty(self):
return '%s(%s)' %(self.name, ','.join([str(x) for x in self.index_types]))
def __call__(self, *indices):
"""
Returns a tensor with indices.
Examples
========
>>> from sympy.tensor.tensor import TensorIndexType, tensor_indices, tensorhead
>>> Lorentz = TensorIndexType('Lorentz', dummy_fmt='L')
>>> a, b = tensor_indices('a,b', Lorentz)
>>> A = tensorhead('A', [Lorentz]*2, [[1]*2])
>>> t = A(a, -b)
"""
if not [indices[i]._tensortype for i in range(len(indices))] == self.index_types:
raise ValueError('wrong index type')
components = [self]
free, dum = TensMul.from_indices(*indices)
free.sort(key=lambda x: x[0].name)
dum.sort()
return TensMul(S.One, components, free, dum)
class TensExpr(Basic):
"""
Abstract base class for tensor expressions
Notes
=====
A tensor expression is an expression formed by tensors;
currently the sums of tensors are distributed.
A ``TensExpr`` can be a ``TensAdd`` or a ``TensMul``.
``TensAdd`` objects are put in canonical form using the Butler-Portugal
algorithm for canonicalization under monoterm symmetries.
``TensMul`` objects are formed by products of component tensors,
and include a coefficient, which is a SymPy expression.
In the internal representation contracted indices are represented
by ``(ipos1, | |
the empirical precision matrix given the data.
Args:
X (array[N,D]): data matrix of shape NxD where N is the number of samples, and D is the dimensionality
of a data point
axis (int): axis along which the precision is computed
bessels_correction (bool): if True, it will compute the precision using `1/N-1` instead of `N`.
Returns:
float[D,D]: 2D precision matrix
"""
prec = torch.inverse(Gaussian.compute_covariance(X, axis=axis, bessels_correction=bessels_correction))
return prec
@staticmethod
def compute_normalization_constant(covariance):
r"""
Compute the normalization constant based on the covariance, which is given by:
.. math:: c = \frac{1}{(2\pi)^{\frac{d}{2}} |\Sigma|^{\frac{1}{2}}}
Args:
covariance (array_like: float[d,d]): covariance matrix
Returns:
float: normalization constant such that the distribution sums to 1 when integrated.
"""
size = covariance.shape[0]
normalization_constant = 1. / ((2 * np.pi) ** (size / 2.) * torch.det(covariance) ** 0.5)
return normalization_constant
@staticmethod
def is_symmetric(X, tol=1e-8):
"""Check if given matrix X is symmetric.
If a matrix is symmetric, it has real eigenvalues, orthogonal eigenvectors and is always diagonalizable.
"""
return torch.allclose(X, X.t(), atol=tol)
# TODO: check if X belongs to the SPD space S^n_{++}
@staticmethod
def is_psd(X, tol=1e-12):
"""Check if given matrix is PSD"""
evals, evecs = torch.eig(X, eigenvectors=False)
return torch.all(evals >= 0 - tol)
###########
# Methods #
###########
def parameters(self):
"""Returns an iterator over the model parameters."""
yield self.mean
yield self.covariance
def named_parameters(self):
"""Returns an iterator over the model parameters, yielding both the name and the parameter itself"""
yield "mean", self.mean
yield "covariance", self.covariance
def pdf(self, value): # likelihood
r"""
Probability density function evaluated at the given `x`.
This is given by the following formula:
.. math:: p(x) = \frac{1}{(2\pi)^\frac{d}{2} |\Sigma|^\frac{1}{2}}
\exp\left( - \frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right)
where :math:`\Sigma` is the covariance matrix, :math:`\mu` is the mean, and :math:`d` is the dimensionality
of the Gaussian distribution.
Args:
value (torch.Tensor): vector to evaluate the probability density function.
Returns:
float: probability density evaluated at `x`.
"""
return torch.exp(self.log_prob(value))
def logcdf(self, value):
r"""
Log of the Cumulative Distribution Function.
Note that this requires at least scipy v1.1.0
Args:
value (torch.Tensor): vector to evaluate the log of the cumulative distribution function.
Returns:
float: log of the cumulative distribution function evaluated at `x`.
"""
return torch.log(self.cdf(value))
def distance(self, x):
r"""
Compute the distance of the given data from the mean by also taking into account the covariance. In the
'Euclidean' space, this method returns the Mahalanobis distance which is defined as
:math:`D_M(x) = \sqrt{(x - \mu)^T \Sigma^{-1} (x - \mu)}`.
Args:
x (torch.Tensor[D]): data vector
Returns:
float: distance
"""
if self.manifold == 'euclidean':
diff = x - self.mean
return torch.sqrt(diff.matmul(self.precision).matmul(diff))
def condition(self, input_value, output_idx, input_idx=None):
r"""
Compute the conditional distribution.
Assume the joint distribution :math:`p(x_1, x2)` is modeled as a normal distribution, then
the conditional distribution of :math:`x_1` given :math:`x_2` is given by
:math:`p(x_1|x_2) = \mathcal{N}(\mu, \Sigma)` (which is also Gaussian), where the mean :math:`\mu` and
covariance :math:`\Sigma` are given by:
.. math::
\mu &= \mu_1 + \Sigma_{12} \Sigma_{22}^{-1} (x_2 - \mu_2) \\
\Sigma &= \Sigma_{11} - \Sigma_{12} \Sigma_{22}^{-1} \Sigma_{21}
Args:
input_value (float[d2]): array of values :math:`x_2` such that we have :math:`p(x_1|x_2)`
output_idx (int[d1], int): indices that we are interested in, given (i.e. conditioned on) the other ones.
That is, the indices for :math:`x_1`
input_idx (int[d2], int, None): indices that we conditioned on, i.e. corresponding to the values. If None,
it will be inferred. That is, the indices for :math:`x_2`
Returns:
Gaussian: Conditional Normal distribution
"""
# aliases
value, o, i = input_value, output_idx, input_idx
value = torch.Tensor([value]) if isinstance(value, (int, float)) else torch.tensor(value)
# if i is None:
# o = torch.Tensor([o]) if isinstance(o, int) else torch.Tensor(o)
# # from all the indices remove the output indices
# i = torch.Tensor(list(set(range(self.dim)) - set(o)))
# i.sort()
#
# # make sure that the input indices have the same length as the value ones
# i = i[:len(value)]
# else:
# i = torch.Tensor([i]) if isinstance(i, int) else torch.Tensor(i)
# assert len(i) == len(value), "The value array and the idx2 array have different lengths"
if i is None:
o = np.array([o]) if isinstance(o, int) else np.array(o)
# from all the indices remove the output indices
i = np.array(list(set(range(self.dim)) - set(o)))
i.sort()
# make sure that the input indices have the same length as the value ones
i = i[:len(value)]
else:
i = np.array([i]) if isinstance(i, int) else np.array(i)
assert len(i) == len(value), "The value array and the idx2 array have different lengths"
# compute conditional
c = self.covariance[np.ix_(o, i)].matmul(torch.inverse(self.covariance[np.ix_(i, i)]))
mu = self.mean[o] + c.matmul(value - self.mean[i])
cov = self.covariance[i, o]
if len(cov.size()):
cov = cov.view(-1, 1)
cov = self.covariance[np.ix_(o, o)] - c.matmul(cov)
return Gaussian(mean=mu, covariance=cov)
def marginalize(self, idx):
r"""
Compute and return the marginal distribution (which is also Gaussian) of the specified indices.
Let's assume that the joint distribution :math:`p(x_1, x_2)` is modeled as a Gaussian distribution, that is:
.. math:: x \sim \mathcal{N}(\mu, \Sigma)
where :math:`x = [x_1, x_2]`, :math:`\mu = [\mu_1, \mu_2]` and
:math:`\Sigma=\left[\begin{array}{cc} \Sigma_{11} & \Sigma_{12} \\ \Sigma_{21} & \Sigma_{22} \end{array}\right]`
then the marginal distribution :math:`p(x_1) = \int_{x_2} p(x_1, x_2) dx_2` is also Gaussian and is given by:
.. math:: p(x_1) = \mathcal{N}(\mu_1, \Sigma_{11})
Args:
idx (int, slice): indices of :math:`x_1` (this value should be between 0 and D-1, where D is
the dimensionality of the data)
Returns:
Gaussian: marginal distribution (which is also Gaussian)
"""
if isinstance(idx, (int, float)):
idx = [idx]
return Gaussian(mean=self.mean[idx], covariance=self.covariance[np.ix_(idx, idx)])
def multiply(self, other):
r"""
Multiply a Gaussian by another Gaussian, by a square matrix (under an affine transformation), or a float
number.
The product of two Gaussian PDFs is given by:
.. math:: \mathcal{N}(\mu_1, \Sigma_1) \mathcal{N}(\mu_2, \Sigma_2) = C \mathcal{N}(\mu, \Sigma)
where :math:`C = \mathcal{N}(\mu_1; \mu_2, \Sigma_1 + \Sigma_2)` is a constant (scalar),
:math:`\Sigma = (\Sigma_1^{-1} + \Sigma_2^{-1})^-1`, and
:math:`\mu = \Sigma (\Sigma_1^{-1} \mu_1 + \Sigma_2^{-1} \mu_2)`.
Note that the product of two Gaussians is a Gaussian, but it is usually no more a valid probability density
function. In order to make it a proper probability distribution, we have to normalize it, which results to
remove the constant :math:`C`.
The product of a Gaussian distribution :math:`\mathcal{N}(\mu, \Sigma)` with a square matrix :math:`A` gives:
.. math:: Ax \sim \mathcal{N}(A \mu, A \Sigma A^T)
The product of a Gaussian by a float does nothing as we have to re-normalize it to be a proper distribution.
Args:
other (Gaussian, array_like of float[D,D], float): Gaussian, square matrix (to rotate or scale), or float
Returns:
Gaussian: resulting Gaussian distribution
"""
# if other == Gaussian
if isinstance(other, Gaussian):
# coefficient = Gaussian(other.mean, self.covariance + other.covariance)(self.mean) * self.coefficient
prec1, prec2 = self.precision, other.precision
cov = torch.inverse(prec1 + prec2)
mu = cov.matmul(prec1.matmul(self.mean) + prec2.matmul(other.mean))
return Gaussian(mean=mu, covariance=cov) # , coefficient=coefficient)
# if other == square matrix
elif isinstance(other, torch.Tensor):
return Gaussian(mean=other.matmul(self.mean), covariance=other.matmul(self.covariance).matmul(other.t()))
# if other == number
elif isinstance(other, (int, float)):
return self
else:
raise TypeError("Trying to multiply a Gaussian with {}, which has not be defined".format(type(other)))
def get_multiplication_coefficient(self, other):
r"""
Return the coefficient :math:`C` that appears when multiplying two Gaussians.
As a reminder, the product of two Gaussian PDFs is given by:
.. math:: \mathcal{N}(\mu_1, \Sigma_1) \mathcal{N}(\mu_2, \Sigma_2) = C \mathcal{N}(\mu, \Sigma)
where :math:`C = \mathcal{N}(\mu_1; \mu_2, \Sigma_1 + \Sigma_2)` is a constant (scalar),
:math:`\Sigma = (\Sigma_1^{-1} + \Sigma_2^{-1})^-1`, and
:math:`\mu = \Sigma (\Sigma_1^{-1} \mu_1 + \Sigma_2^{-1} \mu_2)`.
Args:
other (Gaussian): other Gaussian
Returns:
float: resulting coefficient
"""
return Gaussian(mean=other.mean, covariance=self.covariance + other.covariance)(self.mean)
# def integrate_conjugate_prior(self, other): # TODO: call it marginal_likelihood
def marginal_distribution(self, x, prior):
r"""
Integrate the given Gaussian conjugate prior on the parameters with the current Gaussian PDF.
.. math::
p(y; \theta) &= \int \mathcal{N}(y | \Phi(x) w, \Sigma_y) \mathcal{N}(w | \mu_w, \Sigma_w) dw \\
&= \mathcal{N}(y | \Phi(x) \mu_w, \Phi(x) \Sigma_w \Phi(x)^T + \Sigma_y)
Args:
prior (Gaussian): the other Gaussian conjugate prior
Returns:
Gaussian: resulting Gaussian
"""
if isinstance(prior, Gaussian):
if callable(self.mean):
# | |
import pprint
import time
from .constant import (
AIR_QUALITY_KEY,
AUTOMATION_PATH,
CONNECTION_KEY,
DEFAULT_MODES,
DEFINITIONS_PATH,
HUMIDITY_KEY,
MODE_ID_TO_NAME_KEY,
MODE_IS_SCHEDULE_KEY,
MODE_KEY,
MODE_NAME_TO_ID_KEY,
MODE_UPDATE_INTERVAL,
MODEL_BABY,
MODEL_ESSENTIAL,
MODEL_GO,
MODEL_PRO_3_FLOODLIGHT,
MODEL_PRO_4,
MODEL_WIREFREE_VIDEO_DOORBELL,
PING_CAPABILITY,
RESOURCE_CAPABILITY,
RESTART_PATH,
SCHEDULE_KEY,
SIREN_STATE_KEY,
TEMPERATURE_KEY,
TIMEZONE_KEY,
)
from .device import ArloDevice
from .util import time_to_arlotime
day_of_week = ["Mo", "Tu", "We", "Th", "Fr", "Sa", "Su", "Mo"]
class ArloBase(ArloDevice):
def __init__(self, name, arlo, attrs):
super().__init__(name, arlo, attrs)
self._refresh_rate = 15
self._schedules = None
self._last_update = 0
def _id_to_name(self, mode_id):
return self._load([MODE_ID_TO_NAME_KEY, mode_id], None)
def _id_is_schedule(self, mode_id):
return self._load([MODE_IS_SCHEDULE_KEY, mode_id.lower()], False)
def _name_to_id(self, mode_name):
return self._load([MODE_NAME_TO_ID_KEY, mode_name.lower()], None)
def _parse_modes(self, modes):
for mode in modes:
mode_id = mode.get("id", None)
mode_name = mode.get("name", "")
if mode_name == "":
mode_name = mode.get("type", "")
if mode_name == "":
mode_name = mode_id
if mode_id and mode_name != "":
self._arlo.debug(mode_id + "<=M=>" + mode_name)
self._save([MODE_ID_TO_NAME_KEY, mode_id], mode_name)
self._save([MODE_NAME_TO_ID_KEY, mode_name.lower()], mode_id)
self._save([MODE_IS_SCHEDULE_KEY, mode_id.lower()], False)
self._save([MODE_IS_SCHEDULE_KEY, mode_name.lower()], False)
def schedule_to_modes(self):
if self._schedules is None:
return []
now = time.localtime()
day = day_of_week[now.tm_wday]
minute = (now.tm_hour * 60) + now.tm_min
for schedule in self._schedules:
if not schedule.get("enabled", False):
continue
for action in schedule.get("schedule", []):
if day in action.get("days", []):
start = action.get("startTime", 65535)
duration = action.get("duration", 65536)
if start <= minute < (start + duration):
modes = action.get("startActions", {}).get("enableModes", None)
if modes:
self._arlo.debug("schdule={}".format(modes[0]))
return modes
# If nothing in schedule we are disarmed.
return ["mode0"]
def _parse_schedules(self, schedules):
self._schedules = schedules
for schedule in schedules:
schedule_id = schedule.get("id", None)
schedule_name = schedule.get("name", "")
if schedule_name == "":
schedule_name = schedule_id
if schedule_id and schedule_name != "":
self._arlo.debug(schedule_id + "<=S=>" + schedule_name)
self._save([MODE_ID_TO_NAME_KEY, schedule_id], schedule_name)
self._save([MODE_NAME_TO_ID_KEY, schedule_name.lower()], schedule_id)
self._save([MODE_IS_SCHEDULE_KEY, schedule_id.lower()], True)
self._save([MODE_IS_SCHEDULE_KEY, schedule_name.lower()], True)
def _set_mode_or_schedule(self, event):
# schedule on or off?
schedule_ids = event.get("activeSchedules", [])
if schedule_ids:
self._arlo.debug(self.name + " schedule change " + schedule_ids[0])
schedule_name = self._id_to_name(schedule_ids[0])
self._save_and_do_callbacks(SCHEDULE_KEY, schedule_name)
else:
self._arlo.debug(self.name + " schedule cleared ")
self._save_and_do_callbacks(SCHEDULE_KEY, None)
# mode present? we just set to that one... If no mode but schedule then
# try to parse that out
mode_ids = event.get("activeModes", [])
if not mode_ids and schedule_ids:
self._arlo.debug(self.name + " mode change (via schedule) ")
self._arlo.vdebug(
self.name + " schedules: " + pprint.pformat(self._schedules)
)
mode_ids = self.schedule_to_modes()
if mode_ids:
self._arlo.debug(self.name + " mode change " + mode_ids[0])
mode_name = self._id_to_name(mode_ids[0])
self._save_and_do_callbacks(MODE_KEY, mode_name)
def _event_handler(self, resource, event):
self._arlo.debug(self.name + " BASE got " + resource)
# modes on base station
if resource == "modes":
props = event.get("properties", {})
# list of modes - recheck?
self._parse_modes(props.get("modes", []))
# mode change?
if "activeMode" in props:
self._save_and_do_callbacks(
MODE_KEY, self._id_to_name(props["activeMode"])
)
elif "active" in props:
self._save_and_do_callbacks(MODE_KEY, self._id_to_name(props["active"]))
# Base station mode change.
# These come in per device and can arrive multiple times per state
# change. We limit the updates to once per MODE_UPDATE_INTERVAL
# seconds. Arlo doesn't send a "schedule changed" notification so we
# re-fetch that information before testing the mode.
elif resource == "states":
now = time.monotonic()
with self._lock:
if now < self._last_update + MODE_UPDATE_INTERVAL:
return
self._last_update = now
self._arlo.debug("state change")
self.update_modes()
self.update_mode()
# mode change?
elif resource == "activeAutomations":
self._set_mode_or_schedule(event)
# schedule has changed, so reload
elif resource == "automationRevisionUpdate":
self.update_modes()
# pass on to lower layer
else:
super()._event_handler(resource, event)
@property
def _v1_modes(self):
if self._arlo.cfg.mode_api.lower() == "v1":
self._arlo.vdebug("forced v1 api")
return True
if self._arlo.cfg.mode_api.lower() == "v2":
self._arlo.vdebug("forced v2 api")
return False
if (
self.model_id == MODEL_BABY
or self.model_id == MODEL_GO
or self.device_type == "arloq"
or self.device_type == "arloqs"
):
self._arlo.vdebug("deduced v1 api")
return True
else:
self._arlo.vdebug("deduced v2 api")
return False
@property
def available_modes(self):
"""Returns string list of available modes.
For example:: ``['disarmed', 'armed', 'home']``
"""
return list(self.available_modes_with_ids.keys())
@property
def available_modes_with_ids(self):
"""Returns dictionary of available modes mapped to Arlo ids.
For example:: ``{'armed': 'mode1','disarmed': 'mode0','home': 'mode2'}``
"""
modes = {}
for key, mode_id in self._load_matching([MODE_NAME_TO_ID_KEY, "*"]):
modes[key.split("/")[-1]] = mode_id
if not modes:
modes = DEFAULT_MODES
return modes
@property
def mode(self):
"""Returns the current mode."""
return self._load(MODE_KEY, "unknown")
@mode.setter
def mode(self, mode_name):
"""Set the base station mode.
**Note:** Setting mode has been known to hang, method includes code to keep retrying.
:param mode_name: mode to use, as returned by available_modes:
"""
# Actually passed a mode?
mode_id = None
real_mode_name = self._id_to_name(mode_name)
if real_mode_name:
self._arlo.debug(f"passed an ID({mode_name}), converting it")
mode_id = mode_name
mode_name = real_mode_name
# Need to change?
if self.mode == mode_name:
self._arlo.debug("no mode change needed")
return
if mode_id is None:
mode_id = self._name_to_id(mode_name)
if mode_id:
# Need to change?
if self.mode == mode_id:
self._arlo.debug("no mode change needed (id)")
return
# Schedule or mode? Manually set schedule key.
if self._id_is_schedule(mode_id):
active = "activeSchedules"
inactive = "activeModes"
self._save_and_do_callbacks(SCHEDULE_KEY, mode_name)
else:
active = "activeModes"
inactive = "activeSchedules"
self._save_and_do_callbacks(SCHEDULE_KEY, None)
# Post change.
self._arlo.debug(self.name + ":new-mode=" + mode_name + ",id=" + mode_id)
if self._v1_modes:
self._arlo.be.notify(
base=self,
body={
"action": "set",
"resource": "modes",
"publishResponse": True,
"properties": {"active": mode_id},
},
)
else:
# This is complicated... Setting a mode can fail and setting a mode can be sync or async.
# This code tried 3 times to set the mode with attempts to reload the devices between
# attempts to try and kick Arlo. In async mode the first set works in the current thread,
# subsequent ones run in the background. In sync mode it the same. Sorry.
def _set_mode_v2_cb(attempt):
self._arlo.debug("v2 arming")
params = {
"activeAutomations": [
{
"deviceId": self.device_id,
"timestamp": time_to_arlotime(),
active: [mode_id],
inactive: [],
}
]
}
if attempt < 4:
tid = "(modes:{}|activeAutomations)".format(self.device_id)
body = self._arlo.be.post(
AUTOMATION_PATH,
params=params,
raw=True,
tid=tid,
wait_for=None,
)
if body is not None:
if (
body.get("success", False) is True
or body.get("resource", "") == "modes"
or body.get("resource", "") == "activeAutomations"
):
return
self._arlo.warning(
"attempt {0}: error in response when setting mode=\n{1}".format(
attempt, pprint.pformat(body)
)
)
self._arlo.debug(
"Fetching device list (hoping this will fix arming/disarming)"
)
self._arlo.be.devices()
if self._arlo.cfg.synchronous_mode:
self._arlo.debug("trying again, but synchronous")
_set_mode_v2_cb(attempt=attempt + 1)
else:
self._arlo.bg.run(_set_mode_v2_cb, attempt=attempt + 1)
return
self._arlo.error("Failed to set mode.")
self._arlo.debug(
"Giving up on setting mode! Session headers=\n{}".format(
pprint.pformat(self._arlo.be.session.headers)
)
)
self._arlo.debug(
"Giving up on setting mode! Session cookies=\n{}".format(
pprint.pprint(self._arlo.be.session.cookies)
)
)
_set_mode_v2_cb(1)
else:
self._arlo.warning(
"{0}: mode {1} is unrecognised".format(self.name, mode_name)
)
def update_mode(self):
"""Check and update the base's current mode."""
now = time.monotonic()
with self._lock:
# if now < self._last_update + MODE_UPDATE_INTERVAL:
# self._arlo.debug('skipping an update')
# return
self._last_update = now
data = self._arlo.be.get(AUTOMATION_PATH)
for mode in data:
if mode.get("uniqueId", "") == self.unique_id:
self._set_mode_or_schedule(mode)
def update_modes(self, initial=False):
"""Get and update the available modes for the base."""
if self._v1_modes:
# Work around slow arlo connections.
if initial and self._arlo.cfg.synchronous_mode:
time.sleep(5)
resp = self._arlo.be.notify(
base=self,
body={"action": "get", "resource": "modes", "publishResponse": False},
wait_for="event",
)
if resp is not None:
props = resp.get("properties", {})
self._parse_modes(props.get("modes", []))
else:
self._arlo.error("unable to read mode, try forcing v2")
else:
modes = self._arlo.be.get(
DEFINITIONS_PATH + "?uniqueIds={}".format(self.unique_id)
)
if modes is not None:
modes = modes.get(self.unique_id, {})
self._parse_modes(modes.get("modes", []))
self._parse_schedules(modes.get("schedules", []))
self._save(TIMEZONE_KEY, modes.get("olsonTimeZone", None))
else:
self._arlo.error("failed to read modes (v2)")
@property
def schedule(self):
"""Returns current schedule name or `None` if no schedule active."""
return self._load(SCHEDULE_KEY, None)
@property
def on_schedule(self):
"""Returns `True` is base station is running a schedule."""
return self.schedule is not None
@property
def refresh_rate(self):
return self._refresh_rate
@refresh_rate.setter
def refresh_rate(self, value):
if isinstance(value, (int, float)):
self._refresh_rate = value
@property
def siren_state(self):
"""Returns the current siren state (`on` or `off`)."""
return self._load(SIREN_STATE_KEY, "off")
def siren_on(self, duration=300, volume=8):
"""Turn base siren on.
Does nothing if base doesn't support sirens.
:param duration: how long, in seconds, to sound for
:param volume: how long, from 1 to 8, to sound
"""
body = {
"action": "set",
"resource": "siren",
"publishResponse": True,
"properties": {
"sirenState": "on",
"duration": int(duration),
"volume": int(volume),
"pattern": "alarm",
},
}
self._arlo.debug(str(body))
self._arlo.be.notify(base=self, body=body)
def siren_off(self):
"""Turn base siren off.
Does nothing if base doesn't support sirens.
"""
body = {
"action": "set",
"resource": "siren",
"publishResponse": True,
"properties": {"sirenState": "off"},
}
self._arlo.debug(str(body))
self._arlo.be.notify(base=self, body=body)
def restart(self):
params = {"deviceId": self.device_id}
tid = "diagnostics:{}".format(self.device_id)
if (
self._arlo.be.post(RESTART_PATH, params=params, tid=tid, wait_for=None)
is None
):
self._arlo.debug("RESTART didnt send")
def | |
self.json_data["active"] = self.active
self.json_data["users"] = self.users
self.json_data["old"] = self.old
# Same practice as constructor
self.json_handler.pack_data(self.json_data)
self.json_data = self.json_handler.unpack_data()
self.users = (self.json_data["users"]
if "users" in self.json_data else dict())
self.rooms = (self.json_data["rooms"]
if "rooms" in self.json_data else list())
self.active = (self.json_data["active"]
if "active" in self.json_data else dict())
self.old = self.json_data["old"] if "old" in self.json_data else dict()
class HotelInterface(metaclass=ABCMeta):
"""
All classes that "connect" to the hotel is derived from HotelInterface.
An abstract class that predefined implementations requirements.
"""
@abstractmethod
def __init__(self):
"""
Initializes the hotel object that it will connect to
Expected Args:
hotel (HotelManager): HotelManager object
"""
@abstractmethod
def run(self):
"""
Implement the run method to start the interface.
TL:DR; Talk to the self.hotel object
"""
...
class WebHotel(HotelInterface):
# Django or Flask implementation
#! NOT IMPLEMENTED
...
class GuiHotel(HotelInterface):
# PyImgui implementation
# Specific branch (context manager integration by mcoding):
# https://github.com/pyimgui/pyimgui/pull/264
#! NOT IMPLEMENTED
...
class ConsoleHotel(HotelInterface):
"""
ConsoleHotel is a console based interface for the hotel.
"""
def __init__(self, hotel: HotelManager):
"""
Constructor for the ConsoleHotel object. Initializes the hotel object that it will connect to.
Args:
hotel (HotelManager): hotel object
"""
# Object instance of HotelManager class
self.hotel = hotel
# Console related attributes, avoid having exit value as a number(interferes with options menu
self._menu_option = {
"header": "Nimbus Hotel",
"description":
"Welcome to Nimbus Hotel's Navigation Menu.\nPlease select an option.",
"options": {
"Hotel Info": self._print_hotel_info,
"User Menu": self._menu_user,
"Booking Menu": self._menu_booking,
"Room Menu": self._menu_room,
"Check in": self._check_in,
"Check out": self._check_out,
},
"exit": "#",
}
def run(self):
"""
Runs the interface. Prints the menu and waits for user input
and call respective function, until opted to exit
"""
# Main loop
while True:
# Updates the hotels internal information
self.hotel._update_json()
# Prints the menu and gets input
user_input = self._print_menu(self._menu_option)
if user_input.isdigit():
# Checks if the user input is within the range of allowed options
if int(user_input) in range(
1,
len(self._menu_option["options"]) + 1):
# Calls the corresponding method to call.
# For example user_input = 1 will call self._print_all_vacant()
self._menu_option["options"][list(
self._menu_option["options"].keys())[int(user_input) -
1]]()
elif user_input == self._menu_option["exit"]:
# Update json_data before exiting
self.hotel._update_json()
# Exits the loop & program
break
@staticmethod
def _userPrint(*args, **kwargs):
"""
Override to print ">>" before message that is directed to a user.
For visibility purposes.
Used exactly like print().
"""
print(">> ", end="")
print(*args, **kwargs)
@staticmethod
def _userInput(*args, **kwargs):
"""
Override to print ">>" before message that is directed to a user.
For visibility purposes.
"""
print(">> ", end="")
return input(*args, **kwargs)
@staticmethod
def _clear_console():
"""
Clears the console.
"""
os.system("cls" if os.name == "nt" else "clear")
def _menu_user(self):
"""Menu for user related actions"""
# Menu options
self._menu_user_option = {
"header": "User Menu",
"description": "User correlated actions",
"options": {
"View all users": self._print_all_users,
"Register new user": self._register_user,
"Edit user": self._edit_user,
"Unregister user": self._unregister_user,
},
"exit": self._menu_option["exit"],
}
while True:
self.hotel._update_json()
# Print menu and get input
user_input = self._print_menu(self._menu_user_option)
# Check if user wants to exit
if user_input == self._menu_option["exit"]:
break
# Check if user input is valid
if user_input.isdigit() and int(user_input) in range(
1,
len(self._menu_user_option) + 1):
# Call the function associated with the option
self._menu_user_option["options"][list(
self._menu_user_option["options"].keys())[int(user_input) -
1]]()
else:
print("Invalid input, try again.")
def _menu_booking(self):
"""Menu for booking related actions"""
# Menu options
self._menu_booking_option = {
"header": "Booking Menu",
"description": "Booking correlated actions",
"options": {
"Add booking": self._add_booking,
"Edit booking": self._edit_booking,
"Remove booking": self._remove_booking,
"View all bookings": self._print_all_bookings,
},
"exit": self._menu_option["exit"],
}
while True:
self.hotel._update_json()
# Print menu and get input
user_input = self._print_menu(self._menu_booking_option)
# Check if user wants to exit
if user_input == self._menu_option["exit"]:
break
# Check if user input is valid
if user_input.isdigit() and int(user_input) in range(
1,
len(self._menu_booking_option) + 1):
# Call the function associated with the option
self._menu_booking_option["options"][list(
self._menu_booking_option["options"].keys())[
int(user_input) - 1]]()
else:
print("Invalid input, try again.")
def _menu_room(self):
"""Menu for room related actions"""
# Menu options
self._menu_room_option = {
"header": "Room Menu",
"description": "Room correlated actions",
"options": {
"Add room": self._add_room,
"Edit room": self._edit_room,
"Remove room": self._remove_room,
"View all rooms": self._print_all_rooms,
},
"exit": self._menu_option["exit"],
}
while True:
self.hotel._update_json()
# Print menu and get input
user_input = self._print_menu(self._menu_room_option)
# Check if user wants to exit
if user_input == self._menu_option["exit"]:
break
# Check if user input is valid
if user_input.isdigit() and int(user_input) in range(
1,
len(self._menu_room_option) + 1):
# Call the function associated with the option
self._menu_room_option["options"][list(
self._menu_room_option["options"].keys())[int(user_input) -
1]]()
else:
print("Invalid input, try again.")
def _print_menu(self,
menu: dict[str, Any],
noInput=False,
noClear=False) -> str:
"""Prints the menu and returns the user input"""
# Print menu
if not noClear:
self._clear_console()
print(menu["header"])
print("=" * len(menu["header"]))
print(menu["description"])
print("-" * 15)
for index, option in enumerate(menu["options"]):
print(f"[{index+1}]: {option}")
print(f"[{menu['exit']}]: Exit or return to top level menu")
print()
# Get user input
if noInput:
return ""
return self._userInput("Please select an option: ")
def _print_hotel_info(self):
"""
Prints the hotel information.
"""
self._clear_console()
# Print menu header; hotel name
print(self._menu_option["header"])
print("=" * len(self._menu_option["header"]))
# Prints the instance of the hotel, returns hotel information...
print(self.hotel)
self._userInput("Press enter to continue...")
def _print_all_vacant(self):
"""
Prints all vacant rooms.
"""
# Gets all rooms that are vacant.
self.vacant_rooms = self.hotel.filter_dict(self.hotel.rooms,
{"state": "vacant"})
self._clear_console()
print(self._menu_option["header"])
print("=" * len(self._menu_option["header"]))
print(f"There are {len(self.vacant_rooms)} vacant rooms")
print("-" * 15)
# Print out all room information here
for room in self.vacant_rooms:
print(f"Room Number: {self.hotel.rooms.index(room)+1}")
print(f"Type: {room['name']}")
print(f"State: {room['state']}")
print(f"Price: {room['price']}c")
print(f"Capacity: {room['capacity']}")
print(f"Description: {room['description']}")
print(f"Features:", ", ".join(room["misc"]))
print("-" * 15)
self._userInput("Press enter to continue...")
def _register_user(self):
"""
Registers a new user.
"""
self._clear_console()
print(self._menu_option["header"])
print("=" * len(self._menu_option["header"]))
print("Register a new user")
print("-" * 15)
# Prompt user for input
while True:
while (userSSN :=
self._userInput("Enter your SSN (12 characters): ")
) != self._menu_option["exit"]:
if self.hotel.is_ssn_valid(userSSN):
break
else:
self._userPrint(
"SSN is invalid, make sure its following format: YYYYMMDDXXXX"
)
if userSSN == self._menu_option["exit"]:
return
if self.hotel.is_registered(userSSN):
self._userPrint("User already registered")
break
elif self.hotel.been_registered(userSSN):
self._userPrint(
"You have been registered before! Do you want to autofill the following information?"
)
name = self.hotel.old[userSSN]["name"]
age = self.hotel.old[userSSN]["age"]
autofill_msg = str("-" * 7 + "AUTOFILL INFORMATION" + "-" * 7)
print(autofill_msg)
self._userPrint(f"Name: {name}")
self._userPrint(f"Age: {age}")
print("-" * len(autofill_msg))
while True:
userInput = self._userInput("(y/n): ")
if userInput == "y":
if type(result := self.hotel.register_user(
userSSN, name, age)) == bool:
return
else:
self._userPrint("Something went wrong:", result)
self._userInput(
"Press enter to enter name and age manually..."
)
break
elif userInput == "n":
break
else:
self._userPrint("Invalid input")
while (userName := self._userInput("Enter your name: ")
) != self._menu_option["exit"]:
if userName:
break
else:
self._userPrint(
"Name is invalid, make sure its following format: Firstname Lastname"
)
if userName == self._menu_option["exit"]:
return
while (userAge := self._userInput("Enter your age: ")
) != self._menu_option["exit"]:
if userAge.isdigit():
break
else:
self._userPrint(
"Age is invalid, make sure its a number only")
if userAge == self._menu_option["exit"]:
return
if type(result := self.hotel.register_user(userSSN, userName,
userAge)) == bool:
# Registered user if the result is a bool
self._userPrint("User registered")
self._userInput("Press enter to continue")
return
else:
# Prints the error message
self._userPrint(result)
self._userInput("Press enter to continue...")
return
self._userInput("Press enter to continue...")
def _edit_user(self):
self._clear_console()
print(self._menu_option["header"])
print("=" * len(self._menu_option["header"]))
print("Edit user")
print("-" * 15)
# Prompt user for input
while not self.hotel.is_registered(
(userSsn := self._userInput("Please enter your SSN: "))):
if userSsn == self._menu_option["exit"]:
return
self._userInput(
f"Invalid SSN (Make sure its 12 numbers and registered). Press enter to try again or {self._menu_option['exit']} to exit"
)
while True:
name = self.hotel.users[userSsn]["name"]
age = self.hotel.users[userSsn]["age"]
self._clear_console()
print("What to edit?")
print("-" * 15)
self._userPrint(f"SSN: {userSsn}")
self._userPrint(f"Name: {name}")
self._userPrint(f"Age: {age}")
self._userPrint("-" * 15)
self._userPrint("[1]: Change SSN")
self._userPrint("[2]: Change name")
self._userPrint("[3]: Change age")
self._userPrint(f"[{self._menu_option['exit']}]: Exit")
print()
userInput = self._userInput("Please select an option: ")
if userInput == self._menu_option["exit"]:
return
elif userInput == "1":
while True:
newSsn = self._userInput("Enter new SSN: ")
if self.hotel.is_ssn_valid(newSsn):
break
else:
self._userPrint(
"SSN is invalid, make sure its following format: YYYYMMDDXXXX"
)
if type(result := self.hotel.edit_user(
userSsn, new_ssn=newSsn)) == bool:
self._userPrint("SSN changed")
self._userInput("Press enter to continue...")
return
else:
self._userPrint(result)
self._userInput("Press enter to continue...")
return
elif userInput == | |
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import pandas as pd
from .bilateral import Bilateral
from .utils import HDFMixin, fast_binomial_pmf
class MidlineBilateral(HDFMixin):
"""Model a bilateral lymphatic system where an additional risk factor can
be provided in the data: Whether or not the primary tumor extended over the
mid-sagittal line.
It is reasonable to assume (and supported by data) that such an extension
significantly increases the risk for metastatic spread to the contralateral
side of the neck. This class attempts to capture this using a simple
assumption: We assume that the probability of spread to the contralateral
side for patients *with* midline extension is larger than for patients
*without* it, but smaller than the probability of spread to the ipsilateral
side. Formally:
.. math::
b_c^{\\in} = \\alpha \\cdot b_i + (1 - \\alpha) \\cdot b_c^{\\not\\in}
where :math:`b_c^{\\in}` is the probability of spread from the primary tumor
to the contralateral side for patients with midline extension, and
:math:`b_c^{\\not\\in}` for patients without. :math:`\\alpha` is the linear
mixing parameter.
"""
def __init__(
self,
graph: Dict[Tuple[str], List[str]] = {},
alpha_mix: float = 0.,
trans_symmetric: bool = True,
**kwargs
):
"""The class is constructed in a similar fashion to the
:class:`Bilateral`: That class contains one :class:`Unilateral` for
each side of the neck, while this class will contain two instances of
:class:`Bilateral`, one for the case of a midline extension and one for
the case of no midline extension.
Args:
graph: Dictionary of the same kind as for initialization of
:class:`System`. This graph will be passed to the constructors of
two :class:`System` attributes of this class.
alpha_mix: Initial mixing parameter between ipsi- & contralateral
base probabilities that determines the contralateral base
probabilities for the patients with mid-sagittal extension.
trans_symmetric: If ``True``, the spread probabilities among the
LNLs will be set symmetrically.
See Also:
:class:`Bilateral`: Two of these are held as attributes by this
class. One for the case of a mid-sagittal extension of the primary
tumor and one for the case of no such extension.
"""
self.ext = Bilateral(
graph=graph, base_symmetric=False, trans_symmetric=trans_symmetric
)
self.noext = Bilateral(
graph=graph, base_symmetric=False, trans_symmetric=trans_symmetric
)
self.alpha_mix = alpha_mix
@property
def graph(self) -> Dict[Tuple[str], List[str]]:
"""Return the (unilateral) graph that was used to create this network.
"""
return self.noext.graph
@property
def base_probs(self) -> np.ndarray:
"""Base probabilities of metastatic lymphatic spread from the tumor(s)
to the lymph node levels. This will return a concatenation of the
ipsilateral base probabilities and the contralateral ones without the
midline extension, as well as - lastly - the mixing parameter alpha.
The returned array has therefore this composition:
+-----------------+-------------------+--------------+
| base probs ipsi | base probs contra | mixing param |
+-----------------+-------------------+--------------+
When setting these, one also needs to provide this mixing parameter as
the last entry in the provided array.
"""
return np.concatenate([self.noext.base_probs, [self.alpha_mix]])
@base_probs.setter
def base_probs(self, new_params: np.ndarray):
"""Set the base probabilities from the tumor(s) to the LNLs, accounting
for the mixing parameter :math:`\\alpha``.
"""
new_base_probs = new_params[:-1]
self.alpha_mix = new_params[-1]
# base probabilities for lateralized cases
self.noext.base_probs = new_base_probs
# base probabilities for cases with tumors extending over the midline
self.ext.ipsi.base_probs = self.noext.ipsi.base_probs
self.ext.contra.base_probs = (
self.alpha_mix * self.noext.ipsi.base_probs
+ (1 - self.alpha_mix) * self.noext.contra.base_probs
)
# avoid unnecessary double computation of ipsilateral transition matrix
self.noext.ipsi._transition_matrix = self.ext.ipsi.transition_matrix
@property
def trans_probs(self) -> np.ndarray:
"""Probabilities of lymphatic spread among the lymph node levels. They
are assumed to be symmetric ipsi- & contralaterally by default.
"""
return self.noext.trans_probs
@trans_probs.setter
def trans_probs(self, new_params: np.ndarray):
"""Set the new spread probabilities for lymphatic spread from among the
LNLs.
"""
self.noext.trans_probs = new_params
self.ext.trans_probs = new_params
# avoid unnecessary double computation of ipsilateral transition matrix
self.noext.ipsi._transition_matrix = self.ext.ipsi.transition_matrix
@property
def spread_probs(self) -> np.ndarray:
"""These are the probabilities representing the spread of cancer along
lymphatic drainage pathways per timestep.
The returned array here contains the probabilities of spread from the
tumor(s) to the ipsilateral LNLs, then the same values for the spread
to the contralateral LNLs, after this the spread probabilities among
the LNLs (which is assumed to be symmetric ipsi- & contralaterally) and
finally the mixing parameter :math:`\\alpha`. So, it's form is
+-----------------+-------------------+-------------+--------------+
| base probs ipsi | base probs contra | trans probs | mixing param |
+-----------------+-------------------+-------------+--------------+
"""
spread_probs = self.noext.spread_probs
return np.concatenate([spread_probs, [self.alpha_mix]])
@spread_probs.setter
def spread_probs(self, new_params: np.ndarray):
"""Set the new spread probabilities and the mixing parameter
:math:`\\alpha`.
"""
num_base_probs = len(self.noext.ipsi.base_edges)
new_base_probs = new_params[:2*num_base_probs]
new_trans_probs = new_params[2*num_base_probs:-1]
alpha_mix = new_params[-1]
self.base_probs = np.concatenate([new_base_probs, [alpha_mix]])
self.trans_probs = new_trans_probs
@property
def modalities(self):
"""A dictionary containing the specificity :math:`s_P` and sensitivity
:math:`s_N` values for each diagnostic modality.
Such a dictionary can also be provided to set this property and compute
the observation matrices of all used systems.
See Also:
:meth:`Bilateral.modalities`: Getting and setting this property in
the normal bilateral model.
:meth:`Unilateral.modalities`: Getting and setting :math:`s_P` and
:math:`s_N` for a unilateral model.
"""
return self.noext.modalities
@modalities.setter
def modalities(self, modality_spsn: Dict[str, List[float]]):
"""Call the respective getter and setter methods of the bilateral
components with and without midline extension.
"""
self.noext.modalities = modality_spsn
self.ext.modalities = modality_spsn
@property
def patient_data(self):
"""A pandas :class:`DataFrame` with rows of patients and columns of
patient and involvement details. The table's header should have three
levels that categorize the individual lymph node level's involvement to
the corresponding diagnostic modality (first level), the side of the
LNL (second level) and finaly the name of the LNL (third level).
Additionally, the patient's T-category must be stored under ('info',
'tumor', 't_stage') and whether the tumor extends over the mid-sagittal
line should be noted under ('info', 'tumor', 'midline_extension'). So,
part of this table could look like this:
+-----------------------------+------------------+------------------+
| info | MRI | PET |
+-----------------------------+--------+---------+--------+---------+
| tumor | ipsi | contra | ipsi | contra |
+---------+-------------------+--------+---------+--------+---------+
| t_stage | midline_extension | II | II | II | II |
+=========+===================+========+=========+========+=========+
| early | ``True`` |``True``|``None`` |``True``|``False``|
+---------+-------------------+--------+---------+--------+---------+
| late | ``True`` |``None``|``None`` |``None``|``None`` |
+---------+-------------------+--------+---------+--------+---------+
| early | ``False`` |``True``|``False``|``True``|``True`` |
+---------+-------------------+--------+---------+--------+---------+
"""
try:
return self._patient_data
except AttributeError:
raise AttributeError(
"No patient data has been loaded yet"
)
@patient_data.setter
def patient_data(self, patient_data: pd.DataFrame):
"""Load the patient data. For now, this just calls the :meth:`load_data`
method, but at a later point, I would like to write a function here
that generates the pandas :class:`DataFrame` from the internal matrix
representation of the data.
"""
self._patient_data = patient_data.copy()
self.load_data(patient_data)
def load_data(
self,
data: pd.DataFrame,
t_stages: Optional[List[int]] = None,
modality_spsn: Optional[Dict[str, List[float]]] = None,
mode = "HMM"
):
"""Load data as table of patients with involvement details and convert
it into internal representation of a matrix.
Args:
data: The table with rows of patients and columns of patient and
involvement details. The table's header must have three levels
that categorize the individual lymph node level's involvement
to the corresponding diagnostic modality (first level), the
side of the LNL (second level) and finaly the name of the LNL
(third level). Additionally, the patient's T-category must be
stored under ('info', 'tumor', 't_stage') and whether the tumor
extends over the mid-sagittal line should be noted under
('info', 'tumor', 'midline_extension'). So, part of this table
could look like this:
+-----------------------------+---------------------+
| info | MRI |
+-----------------------------+----------+----------+
| tumor | ipsi | contra |
+---------+-------------------+----------+----------+
| t_stage | midline_extension | II | II |
+=========+===================+==========+==========+
| early | ``True`` | ``True`` | ``None`` |
+---------+-------------------+----------+----------+
| late | ``True`` | ``None`` | ``None`` |
+---------+-------------------+----------+----------+
| early | ``False`` | ``True`` | ``True`` |
+---------+-------------------+----------+----------+
t_stages: List of T-stages that should be included in the learning
process. If ommitted, the list of T-stages is extracted from
the :class:`DataFrame`
modality_spsn: If no diagnostic modalities have been defined yet,
this must be provided to build the observation matrix.
See Also:
:attr:`patient_data`: The attribute for loading and exporting data.
:meth:`Bilateral.load_data`: Loads data into a bilateral network by
splitting it into ipsi- & contralateral | |
# Copyright (c) 2016-2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from cloudify.state import current_ctx
from cloudify.mocks import MockCloudifyContext
from cloudify.exceptions import NonRecoverableError
from cloudify_common_sdk._compat import builtins_open
from cloudify_libvirt.tests.test_common_base import LibVirtCommonTest
import cloudify_libvirt.volume_tasks as volume_tasks
class TestVolumeTasks(LibVirtCommonTest):
def _create_ctx(self):
_ctx = MockCloudifyContext(
'node_name',
properties={
'libvirt_auth': {'a': 'c'},
'params': {'pool': 'pool_name'},
},
runtime_properties={
'libvirt_auth': {'a': 'd'}
}
)
current_ctx.set(_ctx)
return _ctx
def _test_empty_connection_backup(self, func):
# check correct handle exception with empty connection
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
self._check_correct_connect(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"})
def _test_empty_volume_backup(self, func):
# check correct handle exception with empty volume
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx, "snapshot_name": "backup"}, 'resource')
def _test_empty_volume(self, func):
# check correct handle exception with empty volume
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
func, [], {'ctx': _ctx}, 'resource')
def _create_fake_volume_backup(self):
volume = mock.Mock()
volume.XMLDesc = mock.Mock(return_value="<volume/>")
volume.isActive = mock.Mock(return_value=1)
volume.name = mock.Mock(return_value="volume_name")
pool = mock.Mock()
pool.XMLDesc = mock.Mock(return_value="<pool/>")
pool.isActive = mock.Mock(return_value=1)
pool.name = mock.Mock(return_value="pool_name")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'resource'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
_ctx.node.properties['params'] = {}
_ctx.instance.runtime_properties["backups"] = {
"node_name-backup": "<xml/>"}
return _ctx, connect, pool, volume
def test_snapshot_apply(self):
self._test_no_resource_id(volume_tasks.snapshot_apply,
"No volume for restore")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_apply)
self._test_empty_connection_backup(volume_tasks.snapshot_apply)
self._test_empty_volume_backup(volume_tasks.snapshot_apply)
# no such snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
# we have such snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
# no such backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# have backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "<volume/>"
with mock.patch(
builtins_open, fake_file
):
volume_tasks.snapshot_apply(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
def test_snapshot_create(self):
self._test_no_resource_id(volume_tasks.snapshot_create,
"No volume for backup")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_create)
self._test_empty_connection_backup(volume_tasks.snapshot_create)
self._test_empty_volume_backup(volume_tasks.snapshot_create)
# check create snapshot with error, already exists
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Snapshot node_name-backup already exists."
):
volume_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
connect.storagePoolLookupByName.assert_called_with('pool_name')
pool.storageVolLookupByName.assert_called_with('resource')
# no such snapshots
_ctx.instance.runtime_properties["backups"] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_create(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{"node_name-backup": "<volume/>"})
# check create snapshot
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isdir",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
with mock.patch(
builtins_open, fake_file
):
# with error, already exists
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Backup node_name-backup already exists."
):
volume_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
# without error
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
volume_tasks.snapshot_create(
ctx=_ctx, snapshot_name="backup",
snapshot_incremental=False)
fake_file().write.assert_called_with("<volume/>")
def test_snapshot_delete(self):
self._test_no_resource_id(volume_tasks.snapshot_delete,
"No volume for backup delete")
self._test_no_snapshot_name(
self._create_ctx(),
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.snapshot_delete)
# no such snapshots
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No snapshots found with name: node_name-backup!."
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=True)
self.assertEqual(
_ctx.instance.runtime_properties["backups"],
{'node_name-backup': "<xml/>"})
# remove snapshot
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.snapshot_delete(ctx=_ctx, snapshot_name="backup",
snapshot_incremental=True)
self.assertEqual(_ctx.instance.runtime_properties["backups"], {})
# no such backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=False)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"No backups found with name: node_name-backup!."
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
# remove backup
_ctx, connect, pool, volume = self._create_fake_volume_backup()
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"os.path.isfile",
mock.Mock(return_value=True)
):
fake_file = mock.mock_open()
fake_file().read.return_value = "!!!!"
remove_mock = mock.Mock()
with mock.patch(
"os.remove",
remove_mock
):
with mock.patch(
builtins_open, fake_file
):
volume_tasks.snapshot_delete(
ctx=_ctx, snapshot_name="backup!",
snapshot_incremental=False)
fake_file.assert_called_with('./backup!/resource.xml', 'r')
remove_mock.assert_called_with('./backup!/resource.xml')
def test_create(self):
# check correct handle exception with empty connection
self._check_correct_connect(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {'ctx': self._create_ctx()})
# check error with create volume image
self._check_create_object(
'Failed to find the pool',
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {'ctx': self._create_ctx(),
'params': {'pool': 'empty'}})
# successful create
_ctx = self._create_ctx()
_ctx.get_resource = mock.Mock(return_value='<somexml/>')
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume_name")
pool = mock.Mock()
pool.createXML = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
# without params
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.create(ctx=_ctx,
template_resource="template_resource",
params={'pool': 'empty'})
pool.createXML.assert_called_with('<somexml/>')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], "volume_name"
)
# failed check size of download
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
# empty
head_response = mock.Mock()
head_response.headers = {'Content-Length': 0}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Failed to download volume."
):
volume_tasks.create(
ctx=_ctx,
template_resource="template_resource",
params={
'pool': 'empty',
'url': "https://fake.org/centos.iso"})
# sucessful check size of download
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
head_response = mock.Mock()
head_response.headers = {'Content-Length': 512,
'Accept-Ranges': 'bytes'}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
volume_tasks.create(
ctx=_ctx,
template_resource="template_resource",
params={
'pool': 'empty',
'url': "https://fake.org/centos.iso"})
# failed on create
_ctx.instance.runtime_properties['resource_id'] = None
_ctx.instance.runtime_properties['params'] = {}
_ctx.node.properties['params'] = {}
pool.createXML = mock.Mock(return_value=None)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with self.assertRaisesRegexp(
NonRecoverableError,
'Failed to create a virtual volume'
):
volume_tasks.create(ctx=_ctx,
template_resource="template_resource",
params={'pool': 'empty'})
def test_reuse_volume_create_not_exist(self):
# check correct handle exception with empty network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
self._check_no_such_object_volume(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.create, [], {
'ctx': _ctx,
"resource_id": 'resource',
"use_external_resource": True,
}, 'resource')
def test_reuse_volume_create_exist(self):
# check that we can use network
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.create(ctx=_ctx,
resource_id='resource',
use_external_resource=True)
connect.storagePoolLookupByName.assert_called_with('pool_name')
pool.storageVolLookupByName.assert_called_with('resource')
self.assertEqual(
_ctx.instance.runtime_properties['resource_id'], 'volume'
)
self.assertTrue(
_ctx.instance.runtime_properties['use_external_resource']
)
def test_start(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.start)
self._test_empty_volume(volume_tasks.start)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.start)
self._test_no_resource_id(volume_tasks.start)
def test_start_wipe(self):
# zero wipe
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.upload = mock.Mock()
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.start(ctx=_ctx,
params={
'zero_wipe': True,
'allocation': 1
})
def test_start_download(self):
# download
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.upload = mock.Mock()
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
# empty
head_response = mock.Mock()
head_response.headers = {'Content-Length': 0}
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with self.assertRaisesRegexp(
NonRecoverableError,
"Failed to download volume."
):
volume_tasks.start(
ctx=_ctx,
params={
'url': "https://fake.org/centos.iso"})
# 512 for download
head_response = mock.Mock()
head_response.headers = {'Content-Length': 512,
'Accept-Ranges': 'bytes'}
head_response.iter_content = mock.Mock(return_value=["\0" * 256])
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.head",
mock.Mock(return_value=head_response)
):
with mock.patch(
"cloudify_libvirt.volume_tasks.requests.get",
mock.Mock(return_value=head_response)
):
volume_tasks.start(
ctx=_ctx,
params={
'url': "https://fake.org/centos.iso"})
def test_stop(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.stop)
self._test_empty_volume(volume_tasks.stop)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.stop)
self._test_no_resource_id(volume_tasks.stop)
def test_stop_wipe(self):
# failed to wipe/error ignored
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.wipe = mock.Mock(
side_effect=volume_tasks.libvirt.libvirtError("e"))
pool = mock.Mock()
pool.name = mock.Mock(return_value="pool")
pool.storageVolLookupByName = mock.Mock(return_value=volume)
connect = self._create_fake_connection()
connect.storagePoolLookupByName = mock.Mock(return_value=pool)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.stop(ctx=_ctx)
# failed to wipe/wrong response
volume.wipe = mock.Mock(return_value=-1)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
with mock.patch(
"cloudify_libvirt.volume_tasks.time.sleep",
mock.Mock(return_value=mock.Mock())
):
volume_tasks.stop(ctx=_ctx)
# correctly wiped
volume.wipe = mock.Mock(return_value=0)
with mock.patch(
"cloudify_libvirt.volume_tasks.libvirt.open",
mock.Mock(return_value=connect)
):
volume_tasks.stop(ctx=_ctx)
def test_delete(self):
# check correct handle exception with empty connection
self._test_check_correct_connect_action(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.delete)
self._test_empty_volume(volume_tasks.delete)
self._test_reused_object(
"cloudify_libvirt.volume_tasks.libvirt.open",
volume_tasks.delete)
self._test_no_resource_id(volume_tasks.delete)
# failed to remove
_ctx = self._create_ctx()
_ctx.instance.runtime_properties['resource_id'] = 'volume'
_ctx.instance.runtime_properties['params'] = {'pool': 'pool_name'}
volume = mock.Mock()
volume.name = mock.Mock(return_value="volume")
volume.delete = mock.Mock(return_value=-1)
| |
flight_ID=flight_ID)
df['flight_ID'] = flight_ID
dfs_obs[flight_ID] = df
# Model
# dfs_mod = {}
# for flight_ID in flight_IDs:
# df = get_GEOSCF4flightnum(flight_ID=flight_ID)
# df = add_derived_FAAM_flags2df4flight(df=df, flight_ID=flight_ID)
# df['flight_ID'] = flight_ID
# dfs_mod[flight_ID] = df
# Model - GEOS-CF (online)
dfs_mod_CF = {}
for flight_ID in flight_IDs:
df = get_GEOSCF4flightnum(flight_ID=flight_ID)
df = add_derived_FAAM_flags2df4flight(df=df, flight_ID=flight_ID)
df['flight_ID'] = flight_ID
dfs_mod_CF[flight_ID] = df
# Model - GEOS-Chem (offline)
if inc_GEOSChem:
dfs_mod_GC = {}
for flight_ID in flight_IDs:
dfs = get_GEOSChem4flightnum(flight_ID=flight_ID,
res=res,
RunSet=RunSet,)
for key in dfs.keys():
df = dfs[key]
df = add_derived_FAAM_flags2df4flight(df=df,
flight_ID=flight_ID)
df['flight_ID'] = flight_ID
dfs[key] = df
dfs_mod_GC[flight_ID] = dfs
del dfs
# TODO: Combine to a single DataFrame to plot GEOS-CF and GEOS-Chem
# Kludge - for now just plot GEOS-Chem
if just_plot_GEOS_Chem:
dfs_mod = dfs_mod_GC
else:
print('TODO: setup plotting of GEOS-CF and GEOS-Chem')
dfs_mod = dfs_mod_CF
if debug:
print(dfs_mod.keys())
print(list(dfs_mod.keys())[0])
print(dfs_mod[list(dfs_mod.keys())[0]])
print(dfs_mod[list(dfs_mod.keys())[0]][RunSet])
print(dfs_mod[list(dfs_mod.keys())[0]][RunSet].head())
# Combine to a single dataframe
# df_mod = pd.concat([dfs_mod[i] for i in dfs_mod.keys()], axis=0)
df_mod = pd.concat([dfs_mod[i][RunSet] for i in dfs_mod.keys()], axis=0)
df_obs = pd.concat([dfs_obs[i] for i in dfs_obs.keys()], axis=0)
# Only consider data during SLRs?
if just_SLR:
df_obs = df_obs.loc[df_obs['IS_SLR'] == True, :]
df_mod = df_mod.loc[df_mod['IS_SLR'] == True, :]
extr_str = '_JUST_SLR'
else:
extr_str = ''
# Setup PDF to save PDF plots to
if isinstance(savetitle, type(None)):
savetitle = 'ARNA_altitude_binned_{}{}'.format('ALL', extr_str)
if isinstance(pdff, type(None)):
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Plot up location of flights
if just_SLR:
title = "Flight tracks for 'Straight and Level' Runs during ARNA"
else:
title = 'Flight tracks for all flights during ARNA'
plt_flightpath_spatially_over_CVAO(df=df_obs, flight_ID=flight_ID,
title=title)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, tight=True)
plt.close()
# - Put observations and vars to plot into a dictionary
sns.set(color_codes=True)
sns.set_context(context, font_scale=font_scale)
# Force alt to be in units of km
ALT_var = 'Altitude (km)'
Y_unit = ALT_var
key4GEOSCF = 'GEOS-CF'
# if key4GEOSCF in df_mod.keys():
print('WARNING: Kludged below to only plot GEOS-Chem alt var')
if just_plot_GEOS_Chem:
df_mod[ALT_var] = AC.hPa_to_Km(df_mod['PRESS'].values)
else:
df_mod[ALT_var] = AC.hPa_to_Km(df_mod['model-lev'].values)
df_obs[ALT_var] = df_obs['ALT_GIN'].values / 1E3
#
if just_plot_GEOS_Chem:
data_d = {RunSet: df_mod, 'Obs.': df_obs}
else:
data_d = {'GEOS-CF': df_mod, 'Obs.': df_obs}
# - Now plot up flight time series plots by variable
if just_SLR:
title_str = "Altitude binned '{}' ({}) for all 'Straight and Level Runs'"
else:
title_str = "Altitude binned '{}' ({}) for all flights"
# Setup color dictionary for plotting...
color_dict = {'GEOS-CF': 'red', 'Obs.': 'k', RunSet: 'Orange'}
colors2use = AC.get_CB_color_cycle()
runs2color = [i for i in data_d.keys() if i not in color_dict.keys()]
for n_run, run in enumerate(runs2color):
color_dict[run] = colors2use[n_run]
# And conversion scales and units for variables
unit_d = {}
mod2obs_varnames = {
'CO': 'CO_AERO', 'O3': 'O3_TECO', 'NO2': 'no2_mr', 'NO': 'no_mr',
'HNO2': 'hono_mr',
'NOx': 'NOx'
}
units_d = {
'CO': 'ppbv', 'O3': 'ppbv', 'NO2': 'pptv', 'NO': 'pptv', 'NOx': 'pptv',
'HNO2': 'pptv', 'HONO': 'pptv',
}
range_d = {
'CO': (50, 400), 'O3': (-10, 100), 'NO2': (-50, 500), 'NO': (-50, 500),
'NOx': (-50, 500),
'HNO2': (-60, 60), 'HONO': (-60, 60),
}
NOx_specs = ['HNO2', 'NOx', 'NO', 'NO2', 'HONO']
# - by variable
runs = list(sorted(data_d.keys()))
print(runs)
# Which variables to use?
vars2plot = list(sorted(mod2obs_varnames.keys()))[::-1]
vars2plot = ['CO', 'O3', 'NOx', 'NO2', 'NO', 'HNO2']
print(vars2plot)
print(df_obs.columns)
vars2plot = [
i for i in vars2plot if mod2obs_varnames[i] in df_obs.columns
]
# What bins should be used?
bins = [0.5*i for i in np.arange(15)]
for var2plot in vars2plot:
fig = plt.figure()
ax = plt.gca()
# Now loop data
for n_key, key_ in enumerate(runs):
print(n_key, key_, var2plot)
#
if key_ == 'Obs.':
varname = mod2obs_varnames[var2plot]
else:
varname = var2plot
# Setup an axis label
units = units_d[var2plot]
xlabel = '{} ({})'.format(var2plot, units)
# Add alt to DataFrame
df = pd.DataFrame({
var2plot: data_d[key_][varname], ALT_var: data_d[key_][ALT_var]
})
#
if key_ != 'Obs.':
scaleby = AC.get_unit_scaling(units)
df[var2plot] = df[var2plot].values * scaleby
# drop any NaNs from the DataFrame
s_shape = df.shape
df.dropna(axis=0, how='any', inplace=True)
if s_shape != df.shape:
pcent = (float(df.shape[0]) - s_shape[0])/s_shape[0] * 100.
pstr_dtr = 'WANRING dropped values - shape {}=>{} ({:.2f})'
print(pstr_dtr.format(s_shape, df.shape, pcent))
# Plot up as binned boxplots using existing function
print(df.head())
try:
AC.binned_boxplots_by_altitude(df=df, fig=fig, ax=ax,
var2bin_by=ALT_var,
label=key_, xlabel=xlabel,
binned_var=var2plot,
num_of_datasets=len(runs),
bins=bins,
widths=0.15,
dataset_num=n_key,
color=color_dict[key_])
except:
pass
# Make NOx species be on a log scale
xscale = 'linear'
if (var2plot in NOx_specs):
xscale = 'linear'
# xscale = 'log'
ax.set_xscale(xscale)
if xscale == 'log':
xlim = xlim(0.3, 400)
ax.set_xlim(xlim)
# Beautify plot
plt.legend()
plt.title(title_str.format(var2plot, units, flight_ID))
plt.xlim(range_d[var2plot])
# Save to PDF
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, tight=True)
if show_plot:
plt.show()
plt.close()
# - Save entire pdf
if close_pdf:
AC.plot2pdfmulti(pdff, savetitle, close=True, dpi=dpi)
plt.close('all')
def plt_comp_by_alt_4ARNA_all_DUST(dpi=320, just_SLR=True, flight_nums=[],
plt_model=False, show_plot=False,
context="paper", font_scale=0.75):
"""
Plot up altitude binned comparisons between core obs. and model data
"""
import seaborn as sns
# Which flights to plot?
# Just use non-transit ARNA flights
if len(flight_nums) == 0:
flight_nums = [
# Just use non-transit ARNA flights
# 216,
# 217,
218, 219, 220, 221, 222, 223, 224, 225,
]
flight_IDs = ['C{}'.format(i) for i in flight_nums]
# - Loop by flight and retrieve the files as dataframes (mod + obs)
# Model
dfs_obs = {}
for flight_ID in flight_IDs:
df = get_FAAM_core4flightnum(flight_ID=flight_ID)
# df = add_derived_FAAM_flags2df4flight(df=df, flight_ID=flight_ID)
df['flight_ID'] = flight_ID
dfs_obs[flight_ID] = df
# Observations
dfs_mod = {}
for flight_ID in flight_IDs:
df = get_GEOSCF4flightnum(flight_ID=flight_ID)
df = add_derived_FAAM_flags2df4flight(df=df, flight_ID=flight_ID)
df['flight_ID'] = flight_ID
dfs_mod[flight_ID] = df
# Combine to a single dataframe
df_mod = pd.concat([dfs_mod[i] for i in dfs_mod.keys()], axis=0)
df_obs = pd.concat([dfs_obs[i] for i in dfs_obs.keys()], axis=0)
# Only consider data during SLRs?
if just_SLR:
df_obs = df_obs.loc[df_obs['IS_SLR'] == True, :]
df_mod = df_mod.loc[df_mod['IS_SLR'] == True, :]
extr_str = '_JUST_SLR_DUST'
else:
extr_str = '_DUST'
# - Setup data objects or plotting
# Force alt to be in units of km
ALT_var = 'Altitude (km)'
Y_unit = ALT_var
df_mod[ALT_var] = AC.hPa_to_Km(df_mod['model-lev'].values)
df_obs[ALT_var] = df_obs['ALT_GIN'].values / 1E3
# Plot up just observations? Or model too?
data_d = {
'Obs.': df_obs.loc[df_obs['IS_DUST'] == False, :],
'Obs. (Dust)': df_obs.loc[df_obs['IS_DUST'] == True, :],
}
if plt_model:
data_d['GEOS-CF'] = df_mod.loc[df_mod['IS_DUST'] == False, :]
data_d['GEOS-CF (dust)'] = df_mod.loc[df_mod['IS_DUST'] == True, :]
extr_str += '_inc_MODEL'
# Setup PDF to save PDF plots to
savetitle = 'ARNA_altitude_binned_{}{}'.format('ALL', extr_str)
pdff = AC.plot2pdfmulti(title=savetitle, open=True, dpi=dpi)
# - Plot up location of flights
if just_SLR:
title = "Flight tracks for 'Straight and Level Runs' during ARNA"
else:
title = 'Flight tracks for all flights during ARNA'
plt_flightpath_spatially_over_CVAO(df=df_obs, flight_ID=flight_ID,
title=title)
AC.plot2pdfmulti(pdff, savetitle, dpi=dpi, tight=True)
plt.close()
# - Put observations and vars to plot into a dictionary
sns.set(color_codes=True)
sns.set_context(context, font_scale=font_scale)
# - Now plot up flight time series plots by variable
if just_SLR:
title_str = "Altitude binned '{}' ({}) for all 'Straight+Level Runs'"
else:
title_str = "Altitude binned '{}' ({}) for all flights"
# Setup color dictinoary
color_dict = {'GEOS-CF': 'red', 'Obs.': 'k'}
colors2use = AC.get_CB_color_cycle()
runs2color = [i for i in data_d.keys() if i not in color_dict.keys()]
for n_run, run in enumerate(runs2color):
color_dict[run] = colors2use[n_run]
unit_d = {}
mod2obs_varnames = {
'CO': 'CO_AERO', 'O3': 'O3_TECO', 'NO2': 'no2_mr', 'NO': 'no_mr',
'HNO2': 'hono_mr',
'NOx': 'NOx'
}
units_d = {
'CO': 'ppbv', 'O3': 'ppbv', 'NO2': 'pptv', 'NO': 'pptv', 'NOx': 'pptv',
'HNO2': 'pptv', 'HONO': 'pptv',
}
range_d = {
'CO': (50, 400), 'O3': (-10, 100), 'NO2': (-50, 500), 'NO': (-50, 500),
'NOx': (-50, 500),
'HNO2': (-60, 60), 'HONO': (-60, 60),
}
NOx_specs = ['HNO2', 'NOx', 'NO', 'NO2', 'HONO']
# - by variable
runs = list(sorted(data_d.keys()))
# Which variables to use?
vars2plot = list(sorted(mod2obs_varnames.keys()))[::-1]
vars2plot = ['CO', 'O3', 'NOx', 'NO2', 'NO', 'HNO2']
print(vars2plot)
print(df_obs.columns)
vars2plot = [
i for i in vars2plot if mod2obs_varnames[i] in df_obs.columns
]
# What bins should be used?
bins = [0.5*i for i in np.arange(15)]
for var2plot in vars2plot:
fig = plt.figure()
ax = plt.gca()
# Now loop data
for n_key, key_ in enumerate(runs):
print(n_key, key_, var2plot)
#
if ('Obs.' in key_):
varname = mod2obs_varnames[var2plot]
else:
varname = var2plot
# Setup an axis label
units = units_d[var2plot]
xlabel = '{} ({})'.format(var2plot, units)
| |
text: '噬嗑'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '运拙如同身受饥\\n幸得送饭又送食\\n适口充腹心欢喜\\n忧愁从此渐消移'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 21
source: '21.png'
allow_stretch: False
Label:
text: '噬嗑亨利用狱'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九何校灭耳凶'
font_name: './yahei.ttf'
Label:
text: '六五噬干肉得黄金贞厉无咎'
font_name: './yahei.ttf'
Label:
text: '九四噬干胏得金矢利艰贞吉'
font_name: './yahei.ttf'
Label:
text: '六三噬腊肉遇毒小吝无咎'
font_name: './yahei.ttf'
Label:
text: '六二噬肤灭鼻无咎'
font_name: './yahei.ttf'
Label:
text: '初九屦校灭趾无咎'
font_name: './yahei.ttf'
<Screen51>:
name: '51'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '震'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '一口金钟在淤泥\\n人人拿着当玩石\\n忽然一日钟悬起\\n响亮一声天下知'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 51
source: '51.png'
allow_stretch: False
Label:
text: '震亨震来虩虩笑言哑哑震惊百里不丧匕鬯'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六震索索视矍矍征凶震不于其躬于其邻无咎婚媾有言'
font_name: './yahei.ttf'
Label:
text: '六五震往来厉意无丧有事'
font_name: './yahei.ttf'
Label:
text: '九四震遂泥'
font_name: './yahei.ttf'
Label:
text: '六三震苏苏震行无眚'
font_name: './yahei.ttf'
Label:
text: '六二震来厉亿丧贝跻于九陵勿逐七日得'
font_name: './yahei.ttf'
Label:
text: '初九震来虩虩后笑言哑哑吉'
font_name: './yahei.ttf'
<Screen42>:
name: '42'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '益'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '时来运转吉气发\\n多年枯木又开花\\n枝叶重生多茂盛\\n几人见了几人夸'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 42
source: '42.png'
allow_stretch: False
Label:
text: '益利有攸往利涉大川'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九莫益之或击之立心勿恒凶'
font_name: './yahei.ttf'
Label:
text: '九五有孚惠心勿问元吉有孚惠我德'
font_name: './yahei.ttf'
Label:
text: '六四中行告公従利用为依迁国'
font_name: './yahei.ttf'
Label:
text: '六三益之用凶事无咎有孚中行告公用圭'
font_name: './yahei.ttf'
Label:
text: '六二或益之十朋之龟弗克违永贞吉王用享于帝吉'
font_name: './yahei.ttf'
Label:
text: '初九利用为大作元吉无咎'
font_name: './yahei.ttf'
<Screen3>:
name: '3'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '屯'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '风刮乱丝不见头\\n颠三倒四犯忧愁\\n慢从款来左顺遂\\n急促反惹不自由'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 3
source: '3.png'
allow_stretch: False
Label:
text: '屯元亨利贞勿用有攸往利建侯'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六乘马班如泣血涟如'
font_name: './yahei.ttf'
Label:
text: '九五屯其膏小贞吉大贞凶'
font_name: './yahei.ttf'
Label:
text: '六四乘马班如求婚媾往吉无不利'
font_name: './yahei.ttf'
Label:
text: '六三即鹿无虞惟入于林中君子几不如舍往吝'
font_name: './yahei.ttf'
Label:
text: '六二屯如邅如乘马班如匪寇婚媾女子贞不字十年乃字'
font_name: './yahei.ttf'
Label:
text: '初九磐桓利居贞利建侯'
font_name: './yahei.ttf'
<Screen27>:
name: '27'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '颐'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '太公独钓渭水河\\n手执丝杆忧愁多\\n时来又遇文王访\\n自此永不受折磨'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 27
source: '27.png'
allow_stretch: False
Label:
text: '颐贞吉观颐自求口实'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九由颐厉吉利涉大川'
font_name: './yahei.ttf'
Label:
text: '六五拂经居贞吉不可涉大川'
font_name: './yahei.ttf'
Label:
text: '六四颠颐吉虎视眈眈,其欲逐逐,无咎'
font_name: './yahei.ttf'
Label:
text: '六三拂颐贞凶十年勿用无攸利'
font_name: './yahei.ttf'
Label:
text: '六二颠颐拂经于丘颐征凶'
font_name: './yahei.ttf'
Label:
text: '初九舍尔灵龟观我朵颐凶'
font_name: './yahei.ttf'
<Screen24>:
name: '24'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:'十一月\\n大雪\\n鼠'
font_name: './yahei.ttf'
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '复'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '马氏太公不相合\\n世人占之忧疑多\\n恩人无义反为怨\\n是非平地起风波'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 24
source: '24.png'
allow_stretch: False
Label:
text: '复亨出入无疾朋来无咎反复其道七日来复利有攸往'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 60, 10
pos: 140, 200
Rectangle:
size: 60, 10
pos: 220, 200
Rectangle:
size: 60, 10
pos: 140, 160
Rectangle:
size: 60, 10
pos: 220, 160
Rectangle:
size: 60, 10
pos: 140, 120
Rectangle:
size: 60, 10
pos: 220, 120
Rectangle:
size: 60, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 220, 80
Rectangle:
size: 140, 10
pos: 140, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上六迷复凶有灾眚用行师终有大败以其国君凶至于十年不克征'
font_name: './yahei.ttf'
Label:
text: '六五敦复无悔'
font_name: './yahei.ttf'
Label:
text: '六四中行独复'
font_name: './yahei.ttf'
Label:
text: '六三频复厉无咎'
font_name: './yahei.ttf'
Label:
text: '六二休复吉'
font_name: './yahei.ttf'
Label:
text: '初九不远复无祗悔元吉'
font_name: './yahei.ttf'
<Screen44>:
name: '44'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:'五月\\n芒种\\n马'
font_name: './yahei.ttf'
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '姤'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '他乡遇友喜气欢\\n须知运气福重添\\n自今交了顺当运\\n向后管保不相干'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 44
source: '44.png'
allow_stretch: False
Label:
text: '姤女壮勿用取女'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 140, 10
pos: 140, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 140, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: 140, 40
Rectangle:
size: 60, 10
pos: 220, 40
BoxLayout:
orientation: 'vertical'
Label:
text: '上九姤其角吝无咎'
font_name: './yahei.ttf'
Label:
text: '九五以杞包瓜含章有陨自天'
font_name: './yahei.ttf'
Label:
text: '九四包无鱼起凶'
font_name: './yahei.ttf'
Label:
text: '九三臀无肤其行次且厉无大咎'
font_name: './yahei.ttf'
Label:
text: '九二包有鱼无咎不利宾'
font_name: './yahei.ttf'
Label:
text: '初六系于金柅贞吉有攸往见凶羸豕孚蹢躅'
font_name: './yahei.ttf'
<Screen28>:
name: '28'
BoxLayout:
orientation: 'vertical'
padding: [10,40,40,30]
BoxLayout:
size_hint_y: .3
Label:
text:''
font_name: './yahei.ttf'
font_size: 60
markup: True
Button:
background_normal: ''
background_color: 0,0,0,0
text: '大过'
font_size: 60
font_name: './yahei.ttf'
on_release:
root.manager.current = 'menu'
root.manager.transition.direction = 'right'
Button:
background_normal: ''
background_color: 0,0,0,0
text: '夜晚梦里梦金银\\n醒来仍不见一文\\n目下只宜求本分\\n思想络是空劳神'
font_name: './yahei.ttf'
BoxLayout:
Image:
id: 28
source: '28.png'
allow_stretch: False
Label:
text: '大过栋挠利有攸往亨'
font_name: './yahei.ttf'
BoxLayout:
BoxLayout:
orientation: 'vertical'
canvas:
Color:
rgba: 1,1,1,1
Rectangle:
size: 60, 10
pos: 140, 240
Rectangle:
size: 60, 10
pos: 220, 240
Rectangle:
size: 140, 10
pos: 140, 200
Rectangle:
size: 140, 10
pos: 140, 160
Rectangle:
size: 140, 10
pos: 140, 120
Rectangle:
size: 140, 10
pos: 140, 80
Rectangle:
size: 60, 10
pos: | |
+ cj.trailing_spaces(16, IRTE, 0) + " | IRTE: water routing method 0=variable travel-time 1=Muskingum" + \
"\n" + cj.trailing_spaces(16, MSK_CO1, 3) + " | MSK_CO1 : Calibration coefficient used to control impact of the storage time constant (Km) for normal flow" + \
"\n" + cj.trailing_spaces(16, MSK_CO2, 3) + " | MSK_CO2 : Calibration coefficient used to control impact of the storage time constant (Km) for low flow " + \
"\n" + cj.trailing_spaces(16, MSK_X, 3) + " | MSK_X : Weighting factor controlling relative importance of inflow rate and outflow rate in determining water storage in reach segment" + \
"\n" + cj.trailing_spaces(16, IDEG, 0) + " | IDEG: channel degradation code" + \
"\n" + cj.trailing_spaces(16, IWQ, 0) + " | IWQ: in-stream water quality: 1=model in-stream water quality" + \
"\n" + " basins.wwq | WWQFILE: name of watershed water quality file" + \
"\n" + cj.trailing_spaces(16, TRNSRCH, 3) + " | TRNSRCH: reach transmission loss partitioning to deep aquifer" + \
"\n" + cj.trailing_spaces(16, EVRCH, 3) + " | EVRCH : Reach evaporation adjustment factor" + \
"\n" + cj.trailing_spaces(16, IRTPEST, 0) + " | IRTPEST : Number of pesticide to be routed through the watershed channel network" + \
"\n" + cj.trailing_spaces(16, ICN, 0) + " | ICN : Daily curve number calculation method" + \
"\n" + cj.trailing_spaces(16, CNCOEF, 3) + " | CNCOEF : Plant ET curve number coefficient" + \
"\n" + cj.trailing_spaces(16, CDN, 3) + " | CDN : Denitrification exponential rate coefficient" + \
"\n" + cj.trailing_spaces(16, SDNCO, 3) + " | SDNCO : Denitrification threshold water content" + \
"\n" + cj.trailing_spaces(16, BACT_SWF, 3) + " | BACT_SWF : Fraction of manure applied to land areas that has active colony forming units" + \
"\n" + cj.trailing_spaces(16, BACTMX, 3) + " | BACTMX : Bacteria percolation coefficient [10 m3/Mg]." + \
"\n" + cj.trailing_spaces(16, BACTMINLP, 3) + " | BACTMINLP : Minimum daily bacteria loss for less persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, BACTMINP, 3) + " | BACTMINP : Minimum daily bacteria loss for persistent bacteria [# cfu/m2]" + \
"\n" + cj.trailing_spaces(16, WDLPRCH, 3) + " | WDLPRCH: Die-off factor for less persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRCH, 3) + " | WDPRCH : Die-off factor for persistent bacteria in streams (moving water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDLPRES, 3) + " | WDLPRES : Die-off factor for less persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, WDPRES, 3) + " | WDPRES : Die-off factor for persistent bacteria in water bodies (still water) at 20 C [1/day]" + \
"\n" + cj.trailing_spaces(16, TB_ADJ, 3) + " | TB_ADJ : New variable in testing ...Adjustment factor for subdaily unit hydrograph basetime" + \
"\n" + cj.trailing_spaces(16, DEP_IMP, 3) + " | DEPIMP_BSN : Depth to impervious layer for modeling perched water tables [mm]" + \
"\n" + cj.trailing_spaces(16, DDRAIN_BSN, 3) + " | DDRAIN_BSN : Depth to the sub-surface drain [mm]" + \
"\n" + cj.trailing_spaces(16, TDRAIN_BSN, 3) + " | TDRAIN_BSN : Time to drain soil to field capacity [hours]" + \
"\n" + cj.trailing_spaces(16, GDRAIN_BSN, 3) + " | GDRAIN_BSN : Drain tile lag time [hours]" + \
"\n" + cj.trailing_spaces(16, CN_FROZ, 6) + " | CN_FROZ : Parameter for frozen soil adjustment on infiltration/runoff" + \
"\n" + cj.trailing_spaces(16, DORM_HR, 3) + " | DORM_HR : Time threshold used to define dormancy [hours]" + \
"\n" + cj.trailing_spaces(16, SMXCO, 3) + " | SMXCO : Adjustment factor for maximum curve number S factor" + \
"\n" + cj.trailing_spaces(16, FIXCO, 3) + " | FIXCO : Nitrogen fixation coefficient" + \
"\n" + cj.trailing_spaces(16, NFIXMX, 3) + " | NFIXMX : Maximum daily-n fixation [kg/ha]" + \
"\n" + cj.trailing_spaces(16, ANION_EXCL_BSN, 3) + " | ANION_EXCL_BSN : Fraction of porosity from which anions are excluded" + \
"\n" + cj.trailing_spaces(16, CH_ONCO_BSN, 3) + " | CH_ONCO_BSN : Channel organic nitrogen concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, CH_OPCO_BSN, 3) + " | CH_OPCO_BSN : Channel organic phosphorus concentration in basin [ppm]" + \
"\n" + cj.trailing_spaces(16, HLIFE_NGW_BSN, 3) + " | HLIFE_NGW_BSN : Half-life of nitrogen in groundwater [days]" + \
"\n" + cj.trailing_spaces(16, RCN_SUB_BSN, 3) + " | RCN_SUB_BSN : Concentration of nitrate in precipitation [ppm]" + \
"\n" + cj.trailing_spaces(16, BC1_BSN, 3) + " | BC1_BSN : Rate constant for biological oxidation of NH3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC2_BSN, 3) + " | BC2_BSN : Rate constant for biological oxidation NO2 to NO3 [1/day]" + \
"\n" + cj.trailing_spaces(16, BC3_BSN, 3) + " | BC3_BSN : Rate constant for hydrolosis of organic nitrogen to ammonia [1/day]" + \
"\n" + cj.trailing_spaces(16, BC4_BSN, 3) + " | BC4_BSN : Rate constant for decay of organic phosphorus to dissolved phosphorus [1/day]" + \
"\n" + cj.trailing_spaces(16, DECR_MIN, 3) + " | DECR_MIN: Minimum daily residue decay" + \
"\n" + cj.trailing_spaces(16, ICFAC, 3) + " | ICFAC : C-factor calculation method" + \
"\n" + cj.trailing_spaces(16, RSD_COVCO, 3) + " | RSD_COVCO : Residue cover factor for computing fraction of cover" + \
"\n" + cj.trailing_spaces(16, VCRIT, 3) + " | VCRIT : Critical velocity" + \
"\n" + cj.trailing_spaces(16, CSWAT, 0) + " | CSWAT : Code for new carbon routines" + \
"\n" + cj.trailing_spaces(16, RES_STLR_CO, 3) + " | RES_STLR_CO : Reservoir sediment settling coefficient" + \
"\n" + cj.trailing_spaces(16, BFLO_DIST, 3) + " | BFLO_DIST 0-1 (1:profile of baseflow in a day follows rainfall pattern, 0:baseflow evenly distributed to each time step during a day" + \
"\n" + cj.trailing_spaces(16, IUH, 0) + " | IUH : Unit hydrograph method: 1=triangular UH, 2=gamma function UH" + \
"\n" + cj.trailing_spaces(16, UHALPHA, 3) + " | UHALPHA : alpha coefficient for gamma function unit hydrograph. Required if iuh=2 is selected" + \
"\n" + "Land Use types in urban.dat that do not make runoff to urban BMPs:" + \
"\n" + \
"\n" + "Subdaily Erosion:" + \
"\n" + cj.trailing_spaces(16, EROS_SPL, 3) + " | EROS_SPL: The splash erosion coefficient ranges 0.9 - 3.1" + \
"\n" + cj.trailing_spaces(16, RILL_MULT, 3) + " | RILL_MULT: Multiplier to USLE_K for soil susceptible to rill erosion, ranges 0.5 - 2.0" + \
"\n" + cj.trailing_spaces(16, EROS_EXPO, 3) + " | EROS_EXPO: an exponent in the overland flow erosion equation, ranges 1.5 - 3.0" + \
"\n" + cj.trailing_spaces(16, SUBD_CHSED, 3) + " | SUBD_CHSED: 1=Brownlie(1981) model, 2=Yang(1973,1984) model" + \
"\n" + cj.trailing_spaces(16, C_FACTOR, 3) + " | C_FACTOR: Scaling parameter for Cover and management factor in ANSWERS erosion model" + \
"\n" + cj.trailing_spaces(16, CH_D50, 1) + " | CH_D50 : median particle diameter of channel bed [mm]" + \
"\n" + cj.trailing_spaces(16, SIG_G, 3) + " | SIG_G : geometric standard deviation of particle sizes" + \
"\n" + cj.trailing_spaces(16, RE_BSN, 2) + " | RE_BSN: Effective radius of drains" + \
"\n" + cj.trailing_spaces(16, SDRAIN_BSN, 2) + " | SDRAIN_BSN: Distance between two drain or tile tubes" + \
"\n" + cj.trailing_spaces(16, DRAIN_CO_BSN, 2) + " | DRAIN_CO_BSN: Drainage coefficient" + \
"\n" + cj.trailing_spaces(16, PC_BSN, 3) + " | PC_BSN: Pump capacity" + \
"\n" + cj.trailing_spaces(16, LATKSATF_BSN, 2) + " | LATKSATF_BSN: Multiplication factor to determine lateral ksat from SWAT ksat input value for HRU" + \
"\n" + cj.trailing_spaces(16, ITDRN, 0) + " | ITDRN: Tile drainage equations flag" + \
"\n" + cj.trailing_spaces(16, IWTDN, 0) + " | IWTDN: Water table depth algorithms flag" + \
"\n" | |
Rey"),
("Dewey University-Juana D�az","Dewey University-Juana D�az"),
("Dewey University-Manati","Dewey University-Manati"),
("Dewey University-Yabucoa","Dewey University-Yabucoa"),
("DiGrigoli School of Cosmetology","DiGrigoli School of Cosmetology"),
("Diablo Valley College","Diablo Valley College"),
("Diamond Beauty College","Diamond Beauty College"),
("Diamonds Cosmetology College","Diamonds Cosmetology College"),
("Dickinson College","Dickinson College"),
("Dickinson State University","Dickinson State University"),
("Diesel Driving Academy-Baton Rouge","Diesel Driving Academy-Baton Rouge"),
("Diesel Driving Academy-Shreveport","Diesel Driving Academy-Shreveport"),
("DigiPen Institute of Technology","DigiPen Institute of Technology"),
("Digital Film Academy","Digital Film Academy"),
("Digital Media Arts College","Digital Media Arts College"),
("Dillard University","Dillard University"),
("Diman Regional Technical Institute","Diman Regional Technical Institute"),
("Dine College","Dine College"),
("Divers Academy International","Divers Academy International"),
("Divers Institute of Technology","Divers Institute of Technology"),
("Diversified Vocational College","Diversified Vocational College"),
("Divine Word College","Divine Word College"),
("Dixie Applied Technology College","Dixie Applied Technology College"),
("Dixie State University","Dixie State University"),
("Doane College-Crete","Doane College-Crete"),
("Doane College-Lincoln Grand Island and Master","Doane College-Lincoln Grand Island and Master"),
("Dodge City Community College","Dodge City Community College"),
("Dominican College of Blauvelt","Dominican College of Blauvelt"),
("Dominican School of Philosophy & Theology","Dominican School of Philosophy & Theology"),
("Dominican University of California","Dominican University of California"),
("Dominican University","Dominican University"),
("Dominion School of Hair Design","Dominion School of Hair Design"),
("<NAME> Beauty School","<NAME> Beauty School"),
("<NAME> School of Hair Design","<NAME> School of Hair Design"),
("Dongguk University-Los Angeles","Dongguk University-Los Angeles"),
("Donna's Academy of Hair Design","Donna's Academy of Hair Design"),
("Donnelly College","Donnelly College"),
("Dordt College","Dordt College"),
("Dorothea Hopfer School of Nursing-Mt Vernon Hospital","Dorothea Hopfer School of Nursing-Mt Vernon Hospital"),
("Dorsey Business Schools-Farmington Hills","Dorsey Business Schools-Farmington Hills"),
("Dorsey Business Schools-Lansing","Dorsey Business Schools-Lansing"),
("Dorsey Business Schools-Madison Heights","Dorsey Business Schools-Madison Heights"),
("Dorsey Business Schools-Roseville Culinary Academy","Dorsey Business Schools-Roseville Culinary Academy"),
("Dorsey Business Schools-Roseville","Dorsey Business Schools-Roseville"),
("Dorsey Business Schools-Saginaw","Dorsey Business Schools-Saginaw"),
("Dorsey Business Schools-Southgate","Dorsey Business Schools-Southgate"),
("Dorsey Business Schools-Waterford Pontiac","Dorsey Business Schools-Waterford Pontiac"),
("Dorsey Business Schools-Wayne","Dorsey Business Schools-Wayne"),
("Douglas Education Center","Douglas Education Center"),
("Douglas J Aveda Institute","Douglas J Aveda Institute"),
("Dover Business College","Dover Business College"),
("Dowling College","Dowling College"),
("Downey Adult School","Downey Adult School"),
("Dragon Rises College of Oriental Medicine","Dragon Rises College of Oriental Medicine"),
("Drake College of Business-Elizabeth","Drake College of Business-Elizabeth"),
("Drake College of Business-Newark","Drake College of Business-Newark"),
("Drake University","Drake University"),
("Drew University","Drew University"),
("Drexel University","Drexel University"),
("Drury University","Drury University"),
("Du Bois Business College-Du Bois","Du Bois Business College-Du Bois"),
("Du Bois Business College-Huntingdon","Du Bois Business College-Huntingdon"),
("Du Bois Business College-Oil City","Du Bois Business College-Oil City"),
("DuVall's School of Cosmetology","DuVall's School of Cosmetology"),
("Duke University","Duke University"),
("Duluth Business University","Duluth Business University"),
("Dunwoody College of Technology","Dunwoody College of Technology"),
("Duquesne University","Duquesne University"),
("Durham Beauty Academy","Durham Beauty Academy"),
("Durham Technical Community College","Durham Technical Community College"),
("Dutchess BOCES-Practical Nursing Program","Dutchess BOCES-Practical Nursing Program"),
("Dutchess Community College","Dutchess Community College"),
("Dyersburg State Community College","Dyersburg State Community College"),
("E Q School of Hair Design","E Q School of Hair Design"),
("ECPI University","ECPI University"),
("EDIC College","EDIC College"),
("EDMC Central Administrative Office","EDMC Central Administrative Office"),
("EDP School of Computer Programming","EDP School of Computer Programming"),
("EDP Univeristy of Puerto Rico Inc-San Juan","EDP Univeristy of Puerto Rico Inc-San Juan"),
("EDP University of Puerto Rico Inc-San Sebastian","EDP University of Puerto Rico Inc-San Sebastian"),
("EHOVE Career Center","EHOVE Career Center"),
("EINE Inc","EINE Inc"),
("EMS Training Institute","EMS Training Institute"),
("ETI School of Skilled Trades","ETI School of Skilled Trades"),
("ETI School of Skilled Trades","ETI School of Skilled Trades"),
("ETI Technical College","ETI Technical College"),
("Eagle Gate College-Layton","Eagle Gate College-Layton"),
("Eagle Gate College-Murray","Eagle Gate College-Murray"),
("Eagle Gate College-Salt Lake City","Eagle Gate College-Salt Lake City"),
("Earlham College","Earlham College"),
("East Arkansas Community College","East Arkansas Community College"),
("East Carolina University","East Carolina University"),
("East Central College","East Central College"),
("East Central Community College","East Central Community College"),
("East Central University","East Central University"),
("East Georgia State College","East Georgia State College"),
("East Los Angeles College","East Los Angeles College"),
("East Mississippi Community College","East Mississippi Community College"),
("East San Gabriel Valley Regional Occupational Program","East San Gabriel Valley Regional Occupational Program"),
("East Stroudsburg University of Pennsylvania","East Stroudsburg University of Pennsylvania"),
("East Tennessee State University","East Tennessee State University"),
("East Texas Baptist University","East Texas Baptist University"),
("East Valley Institute of Technology","East Valley Institute of Technology"),
("East West College of Natural Medicine","East West College of Natural Medicine"),
("East West College of the Healing Arts","East West College of the Healing Arts"),
("East-West University","East-West University"),
("Eastern Arizona College","Eastern Arizona College"),
("Eastern Center for Arts and Technology","Eastern Center for Arts and Technology"),
("Eastern College of Health Vocations-Little Rock","Eastern College of Health Vocations-Little Rock"),
("Eastern College of Health Vocations-New Orleans","Eastern College of Health Vocations-New Orleans"),
("Eastern Connecticut State University","Eastern Connecticut State University"),
("Eastern Florida State College","Eastern Florida State College"),
("Eastern Gateway Community College","Eastern Gateway Community College"),
("Eastern Hills Academy of Hair Design","Eastern Hills Academy of Hair Design"),
("Eastern Idaho Technical College","Eastern Idaho Technical College"),
("Eastern Illinois University","Eastern Illinois University"),
("Eastern International College-Belleville","Eastern International College-Belleville"),
("Eastern International College-Jersey City","Eastern International College-Jersey City"),
("Eastern Iowa Community College District","Eastern Iowa Community College District"),
("Eastern Kentucky University","Eastern Kentucky University"),
("Eastern Maine Community College","Eastern Maine Community College"),
("Eastern Mennonite University","Eastern Mennonite University"),
("Eastern Michigan University","Eastern Michigan University"),
("Eastern Nazarene College","Eastern Nazarene College"),
("Eastern New Mexico University-Main Campus","Eastern New Mexico University-Main Campus"),
("Eastern New Mexico University-Roswell Campus","Eastern New Mexico University-Roswell Campus"),
("Eastern New Mexico University-Ruidoso Campus","Eastern New Mexico University-Ruidoso Campus"),
("Eastern Oklahoma County Technology Center","Eastern Oklahoma County Technology Center"),
("Eastern Oklahoma State College","Eastern Oklahoma State College"),
("Eastern Oregon University","Eastern Oregon University"),
("Eastern School of Acupuncture and Traditional Medicine","Eastern School of Acupuncture and Traditional Medicine"),
("Eastern Shore Community College","Eastern Shore Community College"),
("Eastern Suffolk BOCES-Practical Nursing Program","Eastern Suffolk BOCES-Practical Nursing Program"),
("Eastern University","Eastern University"),
("Eastern Virginia Medical School","Eastern Virginia Medical School"),
("Eastern Washington University","Eastern Washington University"),
("Eastern West Virginia Community and Technical College","Eastern West Virginia Community and Technical College"),
("Eastern Wyoming College","Eastern Wyoming College"),
("Eastfield College","Eastfield College"),
("Eastland-Fairfield Career and Technical Schools","Eastland-Fairfield Career and Technical Schools"),
("Eastwick College-Hackensack","Eastwick College-Hackensack"),
("Eastwick College-Ramsey","Eastwick College-Ramsey"),
("Ecclesia College","Ecclesia College"),
("Eckerd College","Eckerd College"),
("Ecotech Institute","Ecotech Institute"),
("Ecumenical Theological Seminary","Ecumenical Theological Seminary"),
("Eden Theological Seminary","Eden Theological Seminary"),
("Edgecombe Community College","Edgecombe Community College"),
("Edgewood College","Edgewood College"),
("Edinboro University of Pennsylvania","Edinboro University of Pennsylvania"),
("Edison State College","Edison State College"),
("Edison State Community College","Edison State Community College"),
("Edmonds Community College","Edmonds Community College"),
("Education and Technology Institute","Education and Technology Institute"),
("Educational Technical College-Recinto de Bayamon","Educational Technical College-Recinto de Bayamon"),
("Educational Technical College-Recinto de Coamo","Educational Technical College-Recinto de Coamo"),
("Educational Technical College-Recinto de san Sebastian","Educational Technical College-Recinto de san Sebastian"),
("Educators of Beauty College of Cosmetology-La Salle","Educators of Beauty College of Cosmetology-La Salle"),
("Educators of Beauty College of Cosmetology-Rockford","Educators of Beauty College of Cosmetology-Rockford"),
("Educators of Beauty College of Cosmetology-Sterling","Educators of Beauty College of Cosmetology-Sterling"),
("Edward Via College of Osteopathic Medicine","Edward Via College of Osteopathic Medicine"),
("Edward Waters College","Edward Waters College"),
("El Camino College-Compton Center","El Camino College-Compton Center"),
("El Camino Community College District","El Camino Community College District"),
("El Centro College","El Centro College"),
("El Paso Community College","El Paso Community College"),
("Elaine Sterling Institute","Elaine Sterling Institute"),
("Elaine Steven Beauty College","Elaine Steven Beauty College"),
("Eldon Career Center","Eldon Career Center"),
("Elegance International","Elegance International"),
("Elgin Community College","Elgin Community College"),
("Eli Whitney Technical High School","Eli Whitney Technical High School"),
("Elite Academy of Beauty Arts","Elite Academy of Beauty Arts"),
("Elite College of Cosmetology","Elite College of Cosmetology"),
("Elite Cosmetology School","Elite Cosmetology School"),
("Elite School of Cosmetology","Elite School of Cosmetology"),
("Elizabeth City State University","Elizabeth City State University"),
("Elizabeth Grady School of Esthetics and Massage Therapy","Elizabeth Grady School of Esthetics and Massage Therapy"),
("Elizabethtown College School of Continuing and Professional Studies","Elizabethtown College School of Continuing and Professional Studies"),
("Elizabethtown College","Elizabethtown College"),
("Elizabethtown Community and Technical College","Elizabethtown Community and Technical College"),
("Ellsworth Community College","Ellsworth Community College"),
("Elmhurst College","Elmhurst College"),
("Elmira Business Institute","Elmira Business Institute"),
("Elmira College","Elmira College"),
("Elon University","Elon University"),
("Embry-Riddle Aeronautical University-Daytona Beach","Embry-Riddle Aeronautical University-Daytona Beach"),
("Embry-Riddle Aeronautical University-Prescott","Embry-Riddle Aeronautical University-Prescott"),
("Embry-Riddle Aeronautical University-Worldwide","Embry-Riddle Aeronautical University-Worldwide"),
("Emerson College","Emerson College"),
("<NAME> Piano Hospital and Training Center","Emil Fries Piano Hospital and Training Center"),
("Emily Griffith Technical College","Emily Griffith Technical College"),
("Emma's Beauty Academy-Juana Diaz","Emma's Beauty Academy-Juana Diaz"),
("Emma's Beauty Academy-Mayaguez","Emma's Beauty Academy-Mayaguez"),
("Emmanuel Christian Seminary","Emmanuel Christian Seminary"),
("Emmanuel College","Emmanuel College"),
("Emmanuel College","Emmanuel College"),
("Emmaus Bible College","Emmaus Bible College"),
("Emory & Henry College","Emory & Henry College"),
("Emory University","Emory University"),
("Emperor's College of Traditional Oriental Medicine","Emperor's College of Traditional Oriental Medicine"),
("Empire Beauty School-Lehigh Valley","Empire Beauty School-Lehigh Valley"),
("Empire Beauty School-Appleton","Empire Beauty School-Appleton"),
("Empire Beauty School-Arlington Heights","Empire Beauty School-Arlington Heights"),
("Empire Beauty School-Arvada","Empire Beauty School-Arvada"),
("Empire Beauty School-Augusta","Empire Beauty School-Augusta"),
("Empire Beauty School-Aurora","Empire Beauty School-Aurora"),
("Empire Beauty School-Avondale","Empire Beauty School-Avondale"),
("Empire Beauty School-Bloomfield","Empire Beauty School-Bloomfield"),
("Empire Beauty School-Bloomington","Empire Beauty School-Bloomington"),
("Empire Beauty School-Bordentown","Empire Beauty School-Bordentown"),
("Empire Beauty School-Boston","Empire Beauty School-Boston"),
("Empire Beauty School-Brooklyn","Empire Beauty School-Brooklyn"),
("Empire Beauty School-Center City Philadelphia","Empire Beauty School-Center City Philadelphia"),
("Empire Beauty School-Chandler","Empire Beauty School-Chandler"),
("Empire Beauty School-Chenoweth","Empire Beauty School-Chenoweth"),
("Empire Beauty School-Cherry Hill","Empire Beauty School-Cherry Hill"),
("Empire Beauty School-Cincinnati","Empire Beauty School-Cincinnati"),
("Empire Beauty School-Concord","Empire Beauty School-Concord"),
("Empire Beauty School-Dixie","Empire Beauty School-Dixie"),
("Empire Beauty School-Dunwoody","Empire Beauty School-Dunwoody"),
("Empire Beauty School-E Memphis","Empire Beauty School-E Memphis"),
("Empire Beauty School-Eden Prairie","Empire Beauty School-Eden Prairie"),
("Empire Beauty School-Elizabethtown","Empire Beauty School-Elizabethtown"),
("Empire Beauty School-Exton","Empire Beauty School-Exton"),
("Empire Beauty School-Flagstaff","Empire Beauty School-Flagstaff"),
("Empire Beauty School-Florence","Empire Beauty School-Florence"),
("Empire Beauty School-Framingham","Empire Beauty School-Framingham"),
("Empire Beauty School-Glen Burnie","Empire Beauty School-Glen Burnie"),
("Empire Beauty School-Green Bay","Empire Beauty School-Green Bay"),
("Empire Beauty School-Gwinnett","Empire Beauty School-Gwinnett"),
("Empire Beauty School-Hanover Park","Empire Beauty School-Hanover Park"),
("Empire Beauty School-Hanover","Empire Beauty School-Hanover"),
("Empire Beauty School-Harrisburg","Empire Beauty School-Harrisburg"),
("Empire Beauty School-Highland","Empire Beauty School-Highland"),
("Empire Beauty School-Hooksett","Empire Beauty School-Hooksett"),
("Empire Beauty School-Hurstborne","Empire Beauty School-Hurstborne"),
("Empire Beauty School-Hyannis","Empire Beauty School-Hyannis"),
("Empire Beauty School-Indianapolis","Empire Beauty School-Indianapolis"),
("Empire Beauty School-Jackson","Empire Beauty School-Jackson"),
("Empire | |
from django.shortcuts import render, get_object_or_404
from joblistings.models import Job
from jobapplications.models import JobApplication, Resume, CoverLetter, Education, Experience, Ranking
from jobapplications.forms import ApplicationForm, resumeUpload, FilterApplicationForm
from django_sendfile import sendfile
import uuid
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponseRedirect
from ace.constants import USER_TYPE_EMPLOYER, USER_TYPE_CANDIDATE
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from io import BytesIO, StringIO
from PyPDF2 import PdfFileWriter, PdfFileReader
import requests
from ace.constants import FILE_TYPE_RESUME, FILE_TYPE_COVER_LETTER, FILE_TYPE_TRANSCRIPT, FILE_TYPE_OTHER, USER_TYPE_SUPER, USER_TYPE_CANDIDATE, USER_TYPE_EMPLOYER
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.utils.encoding import force_bytes, force_text
from django_sendfile import sendfile
from accounts.models import downloadProtectedFile_token, User, Candidate, Employer, Language, PreferredName
import uuid
from django.db import transaction
from django.db.models import Q
import json as simplejson
from datetime import datetime, timedelta
#u = uuid.uuid4()
#u.hex
# Create your views here.
def add_resume(request, pk= None, *args, **kwargs):
if not request.user.is_authenticated:
request.session['redirect'] = request.path
request.session['warning'] = "Warning: Please login before applying to a job"
return HttpResponseRedirect('/login')
else:
if request.user.user_type != USER_TYPE_CANDIDATE:
request.session['info'] = "Only candidates can access this page"
return HttpResponseRedirect('/')
jobApplication = JobApplication.objects.filter(job__pk=pk, candidate=Candidate.objects.get(user=request.user)).count()
if jobApplication !=0:
request.session['info'] = "You already applied to this job"
jobApplication = JobApplication.objects.get(job__pk=pk, candidate=Candidate.objects.get(user=request.user))
return HttpResponseRedirect('/jobApplicationDetails/' + str(jobApplication.pk) + "/")
instance = get_object_or_404(Job, pk=pk)
context = {'job': instance}
if (request.method == 'POST'):
form = ApplicationForm(
request.POST,
request.FILES,
extra_edu_count=request.POST.get('extra_edu_count'),
extra_exp_count=request.POST.get('extra_exp_count'),
extra_doc_count=request.POST.get('extra_doc_count'),
)
#request.session['form'] = form.as_p()
if form.is_valid():
form.clean()
jobApplication = form.save(instance, request.user)
return HttpResponseRedirect('/')
else:
form = ApplicationForm(extra_edu_count=1, extra_exp_count=1, extra_doc_count=0, user=request.user)
context['form'] = form
return render(request, "add-resume.html", context)
def download_test(request, pk):
download = get_object_or_404(Job, pk=pk)
return sendfile(request, download.company.image.path)
@transaction.atomic
def browse_job_applications(request, searchString = "", jobId= -1):
context = {}
jobApplications = None
form = FilterApplicationForm()
query = Q()
filterClasses = []
filterHTML = []
sortOrder = '-created_at'
if not request.user.is_authenticated:
request.session['redirect'] = request.path
request.session['warning'] = "Warning: Please login before applying to a job"
return HttpResponseRedirect('/login')
if request.user.user_type == USER_TYPE_SUPER:
kwargs = {}
if jobId != None:
query = Q(job__pk=jobId)
context["job"] = Job.objects.get(pk=jobId)
if request.user.user_type == USER_TYPE_EMPLOYER:
query = Q(job__jobAccessPermission=Employer.objects.get(user=request.user))
query &= ~Q(status="Pending Review")
query &= ~Q(status="Not Approved")
if jobId != None:
query &= Q(job__pk=jobId)
context["job"] = Job.objects.get(pk=jobId)
if request.user.user_type == USER_TYPE_CANDIDATE:
query = Q(candidate= Candidate.objects.get(user=request.user))
if (request.method == 'POST'):
form = FilterApplicationForm(request.POST)
if 'filter' in request.POST:
print(request.POST)
context['filterClasses'] = simplejson.dumps(form.getSelectedFilterClassAsList())
context['filterHTML'] = simplejson.dumps(form.getSelectedFilterHTMLAsList())
#for ob in request.POST.get('selected_filter'):
# print(ob)
#print("test***")
# Applying filter value here
filterSet = form.getSelectedFilterAsSet()
try:
if "Last 24 hours" in filterSet:
query &= Q(created_at__gte=datetime.now()-timedelta(days=1))
if "Last 7 days" in filterSet:
query &= Q(created_at__gte=datetime.now()-timedelta(days=7))
if "Last 14 days" in filterSet:
query &= Q(created_at__gte=datetime.now()-timedelta(days=14))
if "Last month" in filterSet:
query &= Q(created_at__gte=datetime.now()-timedelta(days=30))
if "Last 3 months" in filterSet:
query &= Q(created_at__gte=datetime.now()-timedelta(days=90))
if form["firstName"].value() != None and form["firstName"].value() != "":
query &= (Q(firstName__contains= form["firstName"].value()) | Q(preferredName__contains=form["firstName"].value()))
if form["lastName"].value() != None and form["lastName"].value() != "":
query &= Q(lastName__contains= form["lastName"].value())
if form["email"].value() != None and form["email"].value() != "":
query &= Q(candidate__user__email__contains=form["email"].value())
if form["studentId"].value() != None and form["studentId"].value() != "":
query &= Q(candidate__studentID__contains=form["studentId"].value())
if form["studentId"].value() != None and form["program"].value() != "ANY":
query &= Q(candidate__program= form["program"].value())
if form["gpa_min"].value() != None and form["gpa_min"].value() != "1.7" :
query &= Q(candidate__gpa__gte = float(form["gpa_min"].value()))
if form["gpa_max"].value() != None and form["gpa_max"].value() != "4.3" :
query &= Q(candidate__gpa__lte = float(form["gpa_max"].value()))
if 'Oldest First' in filterSet:
sortOrder = 'created_at'
if "Pending Review" in filterSet:
query &= Q(status="Pending Review")
if "Approved" in filterSet:
query &= (Q(status= "Submitted") | Q(status="Not Selected"))
if "Not Approved" in filterSet:
query &= Q(status="Not Approved")
if "Interviewing" in filterSet:
query &= (Q(status= "Interviewing") | Q(status="Ranked") | Q(status= "1st"))
if "Matched" in filterSet:
query &= Q(status="Matched")
if "Not Matched/Closed" in filterSet:
query &= (Q(status= "Not Matched") | Q(status="Closed"))
except:
pass
jobApplications = JobApplication.objects.filter(query).order_by(sortOrder)
context["jobApplications"] = jobApplications
context["form"] = form
if (request.method == 'POST'):
if 'pdf' in request.POST:
# PDF download request
response = HttpResponse()
response['Content-Disposition'] = 'attachment; filename=downloadApplications.pdf'
writer = PdfFileWriter()
# Change to https in prod (although django should automatically force https if settings.py is configured corretly in prod)
base_url = "http://" + str(get_current_site(request).domain) + "/getFile"
User.objects.filter(id=request.user.id).update(protect_file_temp_download_key=str(uuid.uuid4().hex))
token = downloadProtectedFile_token.make_token(request.user)
for application in jobApplications:
uid = urlsafe_base64_encode(force_bytes(request.user.pk))
candidateId = urlsafe_base64_encode(force_bytes(application.candidate.pk))
fileId = Resume.objects.get(JobApplication=application).id
fileId = urlsafe_base64_encode(force_bytes(fileId))
fileType = urlsafe_base64_encode(force_bytes(FILE_TYPE_RESUME))
url = base_url + "/" + str(uid) + "/" + str(candidateId) + "/"+ str(fileType) + "/" + str(fileId) + "/" + str(token) + "/"
getFile = requests.get(url).content
memoryFile = BytesIO(getFile)
pdfFile = PdfFileReader(memoryFile)
for pageNum in range(pdfFile.getNumPages()):
currentPage = pdfFile.getPage(pageNum)
#currentPage.mergePage(watermark.getPage(0))
writer.addPage(currentPage)
fileId = CoverLetter.objects.get(JobApplication=application).id
fileId = urlsafe_base64_encode(force_bytes(fileId))
fileType = urlsafe_base64_encode(force_bytes(FILE_TYPE_COVER_LETTER))
url = base_url + "/" + str(uid) + "/" + str(candidateId) + "/"+ str(fileType) + "/" + str(fileId) + "/" + str(token) + "/"
getFile = requests.get(url).content
memoryFile = BytesIO(getFile)
pdfFile = PdfFileReader(memoryFile)
for pageNum in range(pdfFile.getNumPages()):
currentPage = pdfFile.getPage(pageNum)
#currentPage.mergePage(watermark.getPage(0))
writer.addPage(currentPage)
fileType = urlsafe_base64_encode(force_bytes(FILE_TYPE_TRANSCRIPT))
url = base_url + "/" + str(uid) + "/" + str(candidateId) + "/"+ str(fileType) + "/" + str(fileId) + "/" + str(token) + "/"
getFile = requests.get(url).content
memoryFile = BytesIO(getFile)
pdfFile = PdfFileReader(memoryFile)
for pageNum in range(pdfFile.getNumPages()):
currentPage = pdfFile.getPage(pageNum)
#currentPage.mergePage(watermark.getPage(0))
writer.addPage(currentPage)
outputStream = BytesIO()
writer.write(outputStream)
response.write(outputStream.getvalue())
User.objects.filter(id=request.user.id).update(protect_file_temp_download_key="")
return response
return render(request, "dashboard-manage-applications.html", context)
def view_application_details(request, pk):
context = {}
if not request.user.is_authenticated:
request.session['redirect'] = request.path
request.session['warning'] = "Warning: Please login before applying to a job"
return HttpResponseRedirect('/login')
if request.user.user_type == USER_TYPE_SUPER:
jobApplication = get_object_or_404(JobApplication, id=pk)
context = {"jobApplication" : jobApplication}
if request.method == 'POST':
if request.POST.get('Approved'):
jobApplication.status= "Submitted"
jobApplication.save()
if request.POST.get('Reject'):
jobApplication.status= "Not Approved"
jobApplication.save()
if request.POST.get('Interview'):
ranking = Ranking()
ranking.jobApplication = jobApplication
ranking.job = jobApplication.job
ranking.candidate = jobApplication.candidate
ranking.save()
jobApplication.status= "Interviewing"
jobApplication.job.status= "Interviewing"
jobApplication.save()
if jobApplication.status == "Pending Review" or jobApplication.status== "Not Approved":
context['showButton'] = True
if jobApplication.status == "Submitted":
context['showInterview'] = True
if request.user.user_type == USER_TYPE_EMPLOYER:
query = Q(job__jobAccessPermission = Employer.objects.get(user=request.user))
query &= ~Q(status="Pending Review")
query &= ~Q(status="Not Approved")
query &= Q(id=pk)
jobApplication = get_object_or_404(JobApplication, query)
context = {"jobApplication" : jobApplication}
if request.method == 'POST':
if request.POST.get('Approved'):
ranking = Ranking()
ranking.jobApplication = jobApplication
ranking.job = jobApplication.job
ranking.candidate = jobApplication.candidate
ranking.save()
jobApplication.status= "Interviewing"
jobApplication.job.status= "Interviewing"
jobApplication.save()
if request.POST.get('Reject'):
jobApplication.status= "Not Selected"
jobApplication.save()
if jobApplication.status == "Submitted" or jobApplication.status== "Not Selected":
context['showButton'] = True
if request.user.user_type == USER_TYPE_CANDIDATE:
jobApplication = get_object_or_404(JobApplication,id=pk, candidate=Candidate.objects.get(user=request.user))
context = {"jobApplication" : jobApplication}
educations = Education.objects.filter(JobApplication=jobApplication)
experience = Experience.objects.filter(JobApplication=jobApplication)
preferredName = PreferredName.objects.get(user=jobApplication.candidate.user)
context['educations'] = educations
context['experience'] = experience
if preferredName:
context['preferredName'] = preferredName.preferredName
context['user'] = request.user
if 'warning' in request.session:
context['warning'] = request.session['warning']
del request.session['warning']
if 'success' in request.session:
context['success'] = request.session['success']
del request.session['success']
if 'info' in request.session:
context['info'] = request.session['info']
del request.session['info']
if 'danger' in request.session:
context['danger'] = request.session['danger']
del request.session['danger']
return render(request, "application-details.html", context)
def get_protected_file(request, uid, candidateId, filetype, fileid, token):
try:
uid = force_text(urlsafe_base64_decode(uid))
user = User.objects.get(pk=uid)
except(TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and downloadProtectedFile_token.check_token(user, token):
fileType = force_text(urlsafe_base64_decode(filetype))
fileId = force_text(urlsafe_base64_decode(fileid))
candidateId = force_text(urlsafe_base64_decode(candidateId))
if fileType == str(FILE_TYPE_RESUME):
resume = Resume.objects.get(id=fileId).resume
filePath = resume.path
if fileType == str(FILE_TYPE_COVER_LETTER):
coverLetter = CoverLetter.objects.get(id=fileId).coverLetter
filePath = coverLetter.path
if fileType == str(FILE_TYPE_TRANSCRIPT):
transcript = Candidate.objects.get(id=candidateId).transcript
filePath = transcript.path
if fileType == str(FILE_TYPE_OTHER):
filePath = None
return sendfile(request, filePath)
else:
return HttpResponse('Invalid permission token')
def get_protected_file_withAuth(request, fileType, applicationId):
if not request.user.is_authenticated:
request.session['redirect'] = request.path
request.session['warning'] = "Warning: Please login before applying to a job"
return HttpResponseRedirect('/login')
if request.user.user_type == USER_TYPE_SUPER:
if fileType == (FILE_TYPE_RESUME):
fileId = Resume.objects.get(JobApplication__id=applicationId).id
resume = Resume.objects.get(id=fileId).resume
filePath = resume.path
if fileType == (FILE_TYPE_COVER_LETTER):
fileId = CoverLetter.objects.get(JobApplication__id=applicationId).id
coverLetter = CoverLetter.objects.get(id=fileId).coverLetter
filePath = coverLetter.path
if fileType == (FILE_TYPE_TRANSCRIPT):
candidateId = JobApplication.objects.get(id=applicationId).candidate.id
transcript = Candidate.objects.get(id=candidateId).transcript
filePath = transcript.path
if fileType == (FILE_TYPE_OTHER):
filePath = None
return sendfile(request, filePath)
if request.user.user_type == USER_TYPE_EMPLOYER:
jobApplications = JobApplication.objects.filter(job__jobAccessPermission=Employer.objects.get(user=request.user), id=applicationId).count()
if jobApplications == 0:
return HttpResponse('Invalid permission token')
if fileType == (FILE_TYPE_RESUME):
fileId = Resume.objects.get(JobApplication__id=applicationId).id
resume = Resume.objects.get(id=fileId).resume
filePath = resume.path
if fileType == (FILE_TYPE_COVER_LETTER):
fileId = CoverLetter.objects.get(JobApplication__id=applicationId).id
coverLetter = CoverLetter.objects.get(id=fileId).coverLetter
filePath = coverLetter.path
if fileType == (FILE_TYPE_TRANSCRIPT):
candidateId = JobApplication.objects.get(id=applicationId).candidate.id
transcript = Candidate.objects.get(id=candidateId).transcript
filePath = transcript.path
if fileType == (FILE_TYPE_OTHER):
filePath = None
return sendfile(request, filePath)
if request.user.user_type == USER_TYPE_CANDIDATE:
jobApplications = JobApplication.objects.filter(candidate=Candidate.objects.get(user=request.user), id=applicationId).count()
if jobApplications == 0:
return HttpResponse('Invalid permission token')
if fileType == (FILE_TYPE_RESUME):
fileId = | |
import re
import sys
import copy
import random
import datetime
from six import iteritems
from pandajedi.jedicore.MsgWrapper import MsgWrapper
from pandajedi.jedicore.SiteCandidate import SiteCandidate
from pandajedi.jedicore import Interaction
from pandajedi.jedicore import JediCoreUtils
from .JobBrokerBase import JobBrokerBase
from . import AtlasBrokerUtils
from pandaserver.dataservice.DataServiceUtils import select_scope
from pandaserver.taskbuffer import JobUtils
# logger
from pandacommon.pandalogger.PandaLogger import PandaLogger
logger = PandaLogger().getLogger(__name__.split('.')[-1])
APP = 'jedi'
COMPONENT = 'jobbroker'
VO = 'atlas'
# brokerage for ATLAS analysis
class AtlasAnalJobBroker(JobBrokerBase):
# constructor
def __init__(self, ddmIF, taskBufferIF):
JobBrokerBase.__init__(self, ddmIF, taskBufferIF)
self.dataSiteMap = {}
self.summaryList = None
# wrapper for return
def sendLogMessage(self, tmpLog):
# send info to logger
#tmpLog.bulkSendMsg('analy_brokerage')
tmpLog.debug('sent')
# make summary
def add_summary_message(self, old_list, new_list, message):
if len(old_list) != len(new_list):
red = int(((len(old_list) - len(new_list)) * 100) / len(old_list))
self.summaryList.append('{:>5} -> {:>3} candidates, {:>3}% cut : {}'.format(len(old_list),
len(new_list),
red, message))
# dump summary
def dump_summary(self, tmp_log, final_candidates=None):
tmp_log.info('')
for m in self.summaryList:
tmp_log.info(m)
if not final_candidates:
final_candidates = []
tmp_log.info('the number of final candidates: {}'.format(len(final_candidates)))
tmp_log.info('')
# main
def doBrokerage(self, taskSpec, cloudName, inputChunk, taskParamMap):
# make logger
tmpLog = MsgWrapper(logger,'<jediTaskID={0}>'.format(taskSpec.jediTaskID),
monToken='<jediTaskID={0} {1}>'.format(taskSpec.jediTaskID,
datetime.datetime.utcnow().isoformat('/')))
tmpLog.debug('start')
# return for failure
retFatal = self.SC_FATAL,inputChunk
retTmpError = self.SC_FAILED,inputChunk
# new maxwdir
newMaxwdir = {}
# get primary site candidates
sitePreAssigned = False
siteListPreAssigned = False
excludeList = []
includeList = None
scanSiteList = []
# problematic sites
problematic_sites_dict = {}
# get list of site access
siteAccessList = self.taskBufferIF.listSiteAccess(None, taskSpec.userName)
siteAccessMap = {}
for tmpSiteName,tmpAccess in siteAccessList:
siteAccessMap[tmpSiteName] = tmpAccess
# disable VP for merging and forceStaged
if inputChunk.isMerging or taskSpec.avoid_vp():
useVP = False
else:
useVP = True
# get workQueue
workQueue = self.taskBufferIF.getWorkQueueMap().getQueueWithIDGshare(taskSpec.workQueue_ID, taskSpec.gshare)
# site limitation
if taskSpec.useLimitedSites():
if 'excludedSite' in taskParamMap:
excludeList = taskParamMap['excludedSite']
# str to list for task retry
try:
if not isinstance(excludeList, list):
excludeList = excludeList.split(',')
except Exception:
pass
if 'includedSite' in taskParamMap:
includeList = taskParamMap['includedSite']
# str to list for task retry
if includeList == '':
includeList = None
try:
if not isinstance(includeList, list):
includeList = includeList.split(',')
siteListPreAssigned = True
except Exception:
pass
# loop over all sites
for siteName,tmpSiteSpec in iteritems(self.siteMapper.siteSpecList):
if tmpSiteSpec.type == 'analysis' or tmpSiteSpec.is_grandly_unified():
scanSiteList.append(siteName)
# preassigned
preassignedSite = taskSpec.site
if preassignedSite not in ['',None]:
# site is pre-assigned
if not self.siteMapper.checkSite(preassignedSite):
# check ddm for unknown site
includeList = []
for tmpSiteName in self.get_unified_sites(scanSiteList):
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
scope_input, scope_output = select_scope(tmpSiteSpec, JobUtils.ANALY_PS, JobUtils.ANALY_PS)
if scope_input in tmpSiteSpec.ddm_endpoints_input and \
preassignedSite in tmpSiteSpec.ddm_endpoints_input[scope_input].all:
includeList.append(tmpSiteName)
if not includeList:
includeList = None
tmpLog.info('site={0} is ignored since unknown'.format(preassignedSite))
else:
tmpLog.info('site={0} is converted to {1}'.format(preassignedSite,
','.join(includeList)))
preassignedSite = None
else:
tmpLog.info('site={0} is pre-assigned'.format(preassignedSite))
sitePreAssigned = True
if preassignedSite not in scanSiteList:
scanSiteList.append(preassignedSite)
tmpLog.info('initial {0} candidates'.format(len(scanSiteList)))
# allowed remote access protocol
allowedRemoteProtocol = 'fax'
# MP
if taskSpec.coreCount is not None and taskSpec.coreCount > 1:
# use MCORE only
useMP = 'only'
elif taskSpec.coreCount == 0:
# use MCORE and normal
useMP = 'any'
else:
# not use MCORE
useMP = 'unuse'
# get statistics of failures
timeWindowForFC = self.taskBufferIF.getConfigValue('anal_jobbroker', 'TW_DONE_JOB_STAT', 'jedi', taskSpec.vo)
if timeWindowForFC is None:
timeWindowForFC = 6
# get total job stat
totalJobStat = self.get_task_common('totalJobStat')
if totalJobStat is None:
if taskSpec.workingGroup:
totalJobStat = self.taskBufferIF.countJobsPerTarget_JEDI(taskSpec.workingGroup, False)
else:
totalJobStat = self.taskBufferIF.countJobsPerTarget_JEDI(taskSpec.origUserName, True)
self.set_task_common('totalJobStat', totalJobStat)
# check total to cap
if totalJobStat:
if taskSpec.workingGroup:
gdp_token_jobs = 'CAP_RUNNING_GROUP_JOBS'
gdp_token_cores = 'CAP_RUNNING_GROUP_CORES'
else:
gdp_token_jobs = 'CAP_RUNNING_USER_JOBS'
gdp_token_cores = 'CAP_RUNNING_USER_CORES'
maxNumRunJobs = self.taskBufferIF.getConfigValue('prio_mgr', gdp_token_jobs)
maxNumRunCores = self.taskBufferIF.getConfigValue('prio_mgr', gdp_token_cores)
maxFactor = 2
if maxNumRunJobs:
if totalJobStat['nRunJobs'] > maxNumRunJobs:
tmpLog.error(
'throttle to generate jobs due to too many running jobs {} > {}'.format(
totalJobStat['nRunJobs'],
gdp_token_jobs))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
elif totalJobStat['nQueuedJobs'] > maxFactor*maxNumRunJobs:
tmpLog.error(
'throttle to generate jobs due to too many queued jobs {} > {}x{}'.format(
totalJobStat['nQueuedJobs'],
maxFactor,
gdp_token_jobs))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
if maxNumRunCores:
if totalJobStat['nRunCores'] > maxNumRunCores:
tmpLog.error(
'throttle to generate jobs due to too many running cores {} > {}'.format(
totalJobStat['nRunCores'],
gdp_token_cores))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
elif totalJobStat['nQueuedCores'] > maxFactor*maxNumRunCores:
tmpLog.error(
'throttle to generate jobs due to too many queued cores {} > {}x{}'.format(
totalJobStat['nQueuedCores'],
maxFactor,
gdp_token_cores))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
# check global disk quota
if taskSpec.workingGroup:
quota_ok, quota_msg = self.ddmIF.check_quota(taskSpec.workingGroup)
else:
quota_ok, quota_msg = self.ddmIF.check_quota(taskSpec.userName)
if not quota_ok:
tmpLog.error('throttle to generate jobs due to {}'.format(quota_msg))
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
self.sendLogMessage(tmpLog)
return retTmpError
# get failure count
failureCounts = self.get_task_common('failureCounts')
if failureCounts is None:
failureCounts = self.taskBufferIF.getFailureCountsForTask_JEDI(taskSpec.jediTaskID, timeWindowForFC)
self.set_task_common('failureCounts', failureCounts)
# two loops with/without data locality check
scanSiteLists = [(copy.copy(scanSiteList), True)]
if len(inputChunk.getDatasets()) > 0:
nRealDS = 0
for datasetSpec in inputChunk.getDatasets():
if not datasetSpec.isPseudo():
nRealDS += 1
if taskSpec.taskPriority >= 2000:
if inputChunk.isMerging:
scanSiteLists.append((copy.copy(scanSiteList), False))
else:
scanSiteLists = [(copy.copy(scanSiteList), False)]
elif taskSpec.taskPriority > 1000 or nRealDS > 1:
scanSiteLists.append((copy.copy(scanSiteList), False))
retVal = None
checkDataLocality = False
scanSiteWoVP = []
avoidVP = False
summaryList = []
for scanSiteList, checkDataLocality in scanSiteLists:
useUnionLocality = False
self.summaryList = []
self.summaryList.append('===== Brokerage summary =====')
self.summaryList.append('data locality check: {}'.format(checkDataLocality))
self.summaryList.append('the number of initial candidates: {}'.format(len(scanSiteList)))
if checkDataLocality:
tmpLog.debug('!!! look for candidates WITH data locality check')
else:
tmpLog.debug('!!! look for candidates WITHOUT data locality check')
######################################
# selection for data availability
hasDDS = False
dataWeight = {}
ddsList = set()
remoteSourceList = {}
for datasetSpec in inputChunk.getDatasets():
datasetSpec.reset_distributed()
if inputChunk.getDatasets() != [] and checkDataLocality:
oldScanSiteList = copy.copy(scanSiteList)
oldScanUnifiedSiteList = self.get_unified_sites(oldScanSiteList)
for datasetSpec in inputChunk.getDatasets():
datasetName = datasetSpec.datasetName
if datasetName not in self.dataSiteMap:
# get the list of sites where data is available
tmpLog.debug('getting the list of sites where {0} is available'.format(datasetName))
tmpSt,tmpRet = AtlasBrokerUtils.getAnalSitesWithData(self.get_unified_sites(scanSiteList),
self.siteMapper,
self.ddmIF,datasetName)
if tmpSt in [Interaction.JEDITemporaryError,Interaction.JEDITimeoutError]:
tmpLog.error('temporary failed to get the list of sites where data is available, since %s' % tmpRet)
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retTmpError
if tmpSt == Interaction.JEDIFatalError:
tmpLog.error('fatal error when getting the list of sites where data is available, since %s' % tmpRet)
taskSpec.setErrDiag(tmpLog.uploadLog(taskSpec.jediTaskID))
# send info to logger
self.sendLogMessage(tmpLog)
return retFatal
# append
self.dataSiteMap[datasetName] = tmpRet
if datasetName.startswith('ddo'):
tmpLog.debug(' {0} sites'.format(len(tmpRet)))
else:
tmpLog.debug(' {0} sites : {1}'.format(len(tmpRet),str(tmpRet)))
# check if distributed
if tmpRet != {}:
isDistributed = True
for tmpMap in tmpRet.values():
for tmpVal in tmpMap.values():
if tmpVal['state'] == 'complete':
isDistributed = False
break
if not isDistributed:
break
if isDistributed or datasetName.endswith('/'):
# check if really distributed
isDistributed = self.ddmIF.isDistributedDataset(datasetName)
if isDistributed or datasetName.endswith('/'):
hasDDS = True
datasetSpec.setDistributed()
tmpLog.debug(' {0} is distributed'.format(datasetName))
ddsList.add(datasetName)
# disable VP since distributed datasets triggers transfers
useVP = False
avoidVP = True
# check if the data is available at somewhere
if self.dataSiteMap[datasetName] == {}:
for tmpSiteName in scanSiteList:
#tmpLog.info(' skip site={0} data is unavailable criteria=-input'.format(tmpSiteName))
pass
tmpLog.error('{0} is unavailable at any site'.format(datasetName))
retVal = retFatal
continue
# get the list of sites where data is available
scanSiteList = None
scanSiteListOnDisk = None
scanSiteListUnion = None
scanSiteListOnDiskUnion = None
scanSiteWoVpUnion = None
normFactor = 0
for datasetName,tmpDataSite in iteritems(self.dataSiteMap):
normFactor += 1
useIncomplete = datasetName in ddsList
# get sites where replica is available
tmpSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite, includeTape=True,
use_incomplete=useIncomplete)
tmpDiskSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite,includeTape=False,
use_vp=useVP,
use_incomplete=useIncomplete)
tmpNonVpSiteList = AtlasBrokerUtils.getAnalSitesWithDataDisk(tmpDataSite, includeTape=True,
use_vp=False,
use_incomplete=useIncomplete)
# get sites which can remotely access source sites
if inputChunk.isMerging or taskSpec.useLocalIO():
# disable remote access for merging
tmpSatelliteSites = {}
elif (not sitePreAssigned) or (sitePreAssigned and preassignedSite not in tmpSiteList):
tmpSatelliteSites = AtlasBrokerUtils.getSatelliteSites(tmpDiskSiteList,
self.taskBufferIF,
self.siteMapper,nSites=50,
protocol=allowedRemoteProtocol)
else:
tmpSatelliteSites = {}
# make weight map for local
for tmpSiteName in tmpSiteList:
if tmpSiteName not in dataWeight:
dataWeight[tmpSiteName] = 0
# give more weight to disk
if tmpSiteName in tmpDiskSiteList:
dataWeight[tmpSiteName] += 1
else:
dataWeight[tmpSiteName] += 0.001
# make weight map for remote
for tmpSiteName,tmpWeightSrcMap in iteritems(tmpSatelliteSites):
# skip since local data is available
if tmpSiteName in tmpSiteList:
continue
tmpSiteSpec = self.siteMapper.getSite(tmpSiteName)
# negative weight for remote access
wRemote = 50.0
if tmpSiteSpec.wansinklimit not in [0,None]:
wRemote /= float(tmpSiteSpec.wansinklimit)
# sum weight
if tmpSiteName not in dataWeight:
dataWeight[tmpSiteName] = float(tmpWeightSrcMap['weight'])/wRemote
else:
dataWeight[tmpSiteName] += float(tmpWeightSrcMap['weight'])/wRemote
# make remote source list
if tmpSiteName not in remoteSourceList:
remoteSourceList[tmpSiteName] = {}
remoteSourceList[tmpSiteName][datasetName] = tmpWeightSrcMap['source']
# first list
if scanSiteList is None:
scanSiteList = []
for tmpSiteName in tmpSiteList + list(tmpSatelliteSites.keys()):
if tmpSiteName not in oldScanUnifiedSiteList:
continue
if tmpSiteName not in scanSiteList:
| |
rot, scale, rms, n))
fp.close()
def table_to_radec(table, output='coords.radec'):
"""Make a DS9 region file from a table object
"""
if 'X_WORLD' in table.colnames:
rc, dc = 'X_WORLD', 'Y_WORLD'
else:
rc, dc = 'ra', 'dec'
table[rc, dc].write(output, format='ascii.commented_header',
overwrite=True)
def table_to_regions(table, output='ds9.reg', comment=None):
"""Make a DS9 region file from a table object
"""
fp = open(output,'w')
fp.write('fk5\n')
if 'X_WORLD' in table.colnames:
rc, dc = 'X_WORLD', 'Y_WORLD'
else:
rc, dc = 'ra', 'dec'
### GAIA
if 'solution_id' in table.colnames:
e = np.sqrt(table['ra_error']**2+table['dec_error']**2)/1000.
e = np.maximum(e, 0.1)
else:
e = np.ones(len(table))*0.5
lines = ['circle({0:.7f}, {1:.7f}, {2:.3f}")\n'.format(table[rc][i],
table[dc][i], e[i])
for i in range(len(table))]
if comment is not None:
for i in range(len(table)):
lines[i] = '{0} # text={{{1}}}\n'.format(lines[i].strip(), comment[i])
fp.writelines(lines)
fp.close()
SEXTRACTOR_DEFAULT_PARAMS = ["NUMBER", "X_IMAGE", "Y_IMAGE", "X_WORLD",
"Y_WORLD", "A_IMAGE", "B_IMAGE", "THETA_IMAGE",
"MAG_AUTO", "MAGERR_AUTO", "FLUX_AUTO", "FLUXERR_AUTO",
"FLUX_RADIUS", "BACKGROUND", "FLAGS"]
# Aperture *Diameters*
SEXTRACTOR_PHOT_APERTURES = "6, 8.335, 16.337, 20"
SEXTRACTOR_PHOT_APERTURES_ARCSEC = [float(ap)*0.06*u.arcsec for ap in SEXTRACTOR_PHOT_APERTURES.split(',')]
SEXTRACTOR_CONFIG_3DHST = {'DETECT_MINAREA':14, 'DEBLEND_NTHRESH':32, 'DEBLEND_MINCONT':0.005, 'FILTER_NAME':'/usr/local/share/sextractor/gauss_3.0_7x7.conv', 'FILTER':'Y'}
# /usr/local/share/sextractor/gauss_3.0_7x7.conv
GAUSS_3_7x7 = np.array(
[[ 0.004963, 0.021388, 0.051328, 0.068707, 0.051328, 0.021388, 0.004963],
[ 0.021388, 0.092163, 0.221178, 0.296069, 0.221178, 0.092163, 0.021388],
[ 0.051328, 0.221178, 0.530797, 0.710525, 0.530797, 0.221178, 0.051328],
[ 0.068707, 0.296069, 0.710525, 0.951108, 0.710525, 0.296069, 0.068707],
[ 0.051328, 0.221178, 0.530797, 0.710525, 0.530797, 0.221178, 0.051328],
[ 0.021388, 0.092163, 0.221178, 0.296069, 0.221178, 0.092163, 0.021388],
[ 0.004963, 0.021388, 0.051328, 0.068707, 0.051328, 0.021388, 0.004963]])
SEP_DETECT_PARAMS = {'minarea':5, 'filter_kernel':GAUSS_3_7x7,
'filter_type':'conv', 'clean':True, 'clean_param':1,
'deblend_nthresh':32, 'deblend_cont':0.005}
def make_SEP_FLT_catalog(flt_file, ext=1, **kwargs):
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
im = pyfits.open(flt_file)
sci = im['SCI',ext].data - im['SCI',ext].header['MDRIZSKY']
err = im['ERR',ext].data
mask = im['DQ',ext].data > 0
wcs = pywcs.WCS(im['SCI',ext].header, fobj=im)
tab, seg = make_SEP_catalog_from_arrays(sci, err, mask, wcs=wcs, **kwargs)
return tab, seg
def make_SEP_catalog_from_arrays(sci, err, mask, wcs=None, threshold=2., ZP=25, get_background=True, detection_params=SEP_DETECT_PARAMS, segmentation_map=False):
import copy
import astropy.units as u
import sep
uJy_to_dn = 1/(3631*1e6*10**(-0.4*ZP))
if sci.dtype != np.float32:
sci_data = sci.byteswap().newbyteorder()
else:
sci_data = sci
if err.dtype != np.float32:
err_data = err.byteswap().newbyteorder()
else:
err_data = err
if segmentation_map:
objects, seg = sep.extract(sci_data, threshold, err=err_data,
mask=mask, segmentation_map=True, **detection_params)
else:
objects = sep.extract(sci_data, threshold, err=err_data,
mask=mask, segmentation_map=False, **detection_params)
seg = None
tab = utils.GTable(objects)
if wcs is not None:
tab['ra'], tab['dec'] = wcs.all_pix2world(tab['x'], tab['y'], 1)
tab['ra'].unit = u.deg
tab['dec'].unit = u.deg
tab['x_world'], tab['y_world'] = tab['ra'], tab['dec']
return tab, seg
def make_SEP_catalog(root='',threshold=2., get_background=True,
bkg_only=False,
bkg_params={'bw':32, 'bh':32, 'fw':3, 'fh':3},
verbose=True, sci=None, wht=None,
phot_apertures=SEXTRACTOR_PHOT_APERTURES,
rescale_weight=True,
column_case=str.upper, save_to_fits=True,
source_xy=None, autoparams=[2.5, 3.5], mask_kron=False,
max_total_corr=2, err_scale=-np.inf,
detection_params = SEP_DETECT_PARAMS, bkg_mask=None,
pixel_scale=0.06,
**kwargs):
"""Make a catalog from drizzle products using the SEP implementation of SExtractor
phot_apertures are aperture *diameters*, in pixels.
"""
import copy
import astropy.units as u
import sep
if sci is not None:
drz_file = sci
else:
drz_file = glob.glob('{0}_dr[zc]_sci.fits'.format(root))[0]
im = pyfits.open(drz_file)
## Get AB zeropoint
if 'PHOTFNU' in im[0].header:
ZP = -2.5*np.log10(im[0].header['PHOTFNU'])+8.90
elif 'PHOTFLAM' in im[0].header:
ZP = (-2.5*np.log10(im[0].header['PHOTFLAM']) - 21.10 -
5*np.log10(im[0].header['PHOTPLAM']) + 18.6921)
elif 'FILTER' in im[0].header:
fi = im[0].header['FILTER'].upper()
if fi in model.photflam_list:
ZP = (-2.5*np.log10(model.photflam_list[fi]) - 21.10 -
5*np.log10(model.photplam_list[fi]) + 18.6921)
else:
print('Couldn\'t find PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
else:
print('Couldn\'t find FILTER, PHOTFNU or PHOTPLAM/PHOTFLAM keywords, use ZP=25')
ZP = 25
if verbose:
print('Image AB zeropoint: {0:.3f}'.format(ZP))
# Scale fluxes to mico-Jy
uJy_to_dn = 1/(3631*1e6*10**(-0.4*ZP))
if wht is not None:
weight_file = wht
else:
weight_file = drz_file.replace('_sci.fits', '_wht.fits').replace('_drz.fits', '_wht.fits')
if (weight_file == drz_file) | (not os.path.exists(weight_file)):
WEIGHT_TYPE = "NONE"
weight_file = None
else:
WEIGHT_TYPE = "MAP_WEIGHT"
drz_im = pyfits.open(drz_file)
data = drz_im[0].data.byteswap().newbyteorder()
try:
wcs = pywcs.WCS(drz_im[0].header)
wcs_header = utils.to_header(wcs)
pixel_scale = utils.get_wcs_pscale(wcs)
except:
wcs = None
wcs_header = drz_im[0].header.copy()
if isinstance(phot_apertures, str):
apertures = np.cast[float](phot_apertures.replace(',','').split())
else:
apertures = []
for ap in phot_apertures:
if hasattr(ap, 'unit'):
apertures.append(ap.to(u.arcsec).value/pixel_scale)
else:
apertures.append(ap)
if weight_file is not None:
wht_im = pyfits.open(weight_file)
wht_data = wht_im[0].data.byteswap().newbyteorder()
err = 1/np.sqrt(wht_data)
del(wht_data)
err[~np.isfinite(err)] = 0
mask = (err == 0)
else:
mask = (data == 0)
err = None
data_mask = np.cast[data.dtype](mask)
if get_background | (err_scale < 0):
if bkg_mask is not None:
bkg = sep.Background(data, mask=mask | bkg_mask, **bkg_params)
else:
bkg = sep.Background(data, mask=mask, **bkg_params)
bkg_data = bkg.back()
if bkg_only:
return bkg_data
pyfits.writeto('{0}_bkg.fits'.format(root), data=bkg_data,
header=wcs_header, overwrite=True)
if err is None:
err = bkg.rms()
ratio = bkg.rms()/err
if err_scale == -np.inf:
err_scale = np.median(ratio[(~mask) & np.isfinite(ratio)])
else:
# Just return the error scale
if err_scale < 0:
xerr_scale = np.median(ratio[(~mask) & np.isfinite(ratio)])
return xerr_scale
else:
if err_scale is None:
err_scale = 1.
if not get_background:
bkg_data = 0.
if verbose:
print('SEP: err_scale={:.3f}'.format(err_scale))
if rescale_weight:
err *= err_scale
#mask = None
if source_xy is None:
### Run the detection
if verbose:
print(' SEP: Extract...')
if get_background:
objects, seg = sep.extract(data - bkg_data, threshold, err=err,
mask=mask, segmentation_map=True,
**detection_params)
else:
objects, seg = sep.extract(data, threshold, err=err,
mask=mask, segmentation_map=True,
**detection_params)
if verbose:
print(' Done.')
tab = utils.GTable(objects)
# make one indexed like SExtractor
tab['x'] += 1
tab['y'] += 1
# ID
tab['number'] = np.arange(len(tab), dtype=np.int32)+1
tab['theta'] = np.clip(tab['theta'], -np.pi/2, np.pi/2)
for c in ['a','b','x','y','theta']:
tab = tab[np.isfinite(tab[c])]
## Segmentation
seg[mask] = 0
pyfits.writeto('{0}_seg.fits'.format(root), data=seg,
header=wcs_header, overwrite=True)
# WCS coordinates
if wcs is not None:
tab['ra'], tab['dec'] = wcs.all_pix2world(tab['x'], tab['y'], 1)
tab['ra'].unit = u.deg
tab['dec'].unit = u.deg
tab['x_world'], tab['y_world'] = tab['ra'], tab['dec']
if 'minarea' in detection_params:
tab.meta['MINAREA'] = (detection_params['minarea'],
'Minimum source area in pixels')
else:
tab.meta['MINAREA'] = (5, 'Minimum source area in pixels')
if 'clean' in detection_params:
tab.meta['CLEAN'] = (detection_params['clean'],
'Detection cleaning')
else:
tab.meta['CLEAN'] = (True, 'Detection cleaning')
if 'deblend_cont' in detection_params:
tab.meta['DEBCONT'] = (detection_params['deblend_cont'],
'Deblending contrast ratio')
else:
tab.meta['DEBCONT'] = (0.005, 'Deblending contrast ratio')
if 'deblend_nthresh' in detection_params:
tab.meta['DEBTHRSH'] = (detection_params['deblend_nthresh'],
'Number of deblending thresholds')
else:
tab.meta['DEBTHRSH'] = (32, 'Number of deblending thresholds')
if 'filter_type' in detection_params:
tab.meta['FILTER_TYPE'] = (detection_params['filter_type'],
'Type of filter applied, conv or weight')
else:
tab.meta['FILTER_TYPE'] = ('conv',
'Type of filter applied, conv or weight')
tab.meta['THRESHOLD'] = (threshold, 'Detection threshold')
## FLUX_AUTO
# https://sep.readthedocs.io/en/v1.0.x/apertures.html#equivalent-of-flux-auto-e-g-mag-auto-in-source-extractor
kronrad, krflag = sep.kron_radius(data - bkg_data,
tab['x']-1, tab['y']-1,
tab['a'], tab['b'], tab['theta'], 6.0)
#kronrad *= 2.5
kronrad *= autoparams[0]
kronrad[~np.isfinite(kronrad)] = autoparams[1]
kronrad = np.maximum(kronrad, autoparams[1])
try:
kron_out = sep.sum_ellipse(data - bkg_data,
tab['x']-1, tab['y']-1,
tab['a'], tab['b'],
tab['theta'],
kronrad, subpix=5, err=err)
except:
kron_out=None
print(tab['theta'].min(), tab['theta'].max())
#except:
# kron_out = sep.sum_circle(data - bkg_data,
# tab['x']-1, tab['y']-1,
# kronrad, subpix=5, err=err)
kron_flux, kron_fluxerr, kron_flag = kron_out
kron_flux_flag = kron_flag
## By object
# kronrad = tab['x']*1.
# krflag = kronrad*1.
if mask_kron:
if mask_kron*1 == 1:
# Only flagged objects
keep = (tab['flag'] & 1) > 0
else:
keep = tab['flag'] > -1
print('Manual mask for Kron radius/flux')
for i in range(len(tab)):
#print(keep[i], tab['flag'][i], mask_kron*1)
if not keep[i]:
continue
id = tab['number'][i]
#print('Kron ',id)
mask = (seg > 0) & (seg != id)
kr, krflag[i] = sep.kron_radius(data - bkg_data,
tab['x'][i]-1, tab['y'][i]-1,
tab['a'][i], tab['b'][i],
tab['theta'][i], 6.0, mask=mask)
kronrad[i] = np.maximum(kr*autoparams[0], autoparams[1])
out = sep.sum_ellipse(data - bkg_data,
tab['x'][i]-1, tab['y'][i]-1,
tab['a'][i], tab['b'][i],
tab['theta'][i],
kronrad[i], subpix=5, mask=mask,
err=err)
kron_flux[i], kron_fluxerr[i], kron_flux_flag[i] = out
# Minimum radius = 3.5, PHOT_AUTOPARAMS 2.5, 3.5
# r_min = autoparams[1] #3.5
# #use_circle = kronrad * np.sqrt(tab['a'] * tab['b']) < r_min
# use_circle = kronrad < r_min
# kron_out = sep.sum_ellipse(data - bkg_data,
# tab['x'][use_circle]-1,
# tab['y'][use_circle]-1,
# tab['a'][use_circle], tab['b'][use_circle],
# tab['theta'][use_circle],
# r_min, subpix=5)
#
# cflux, cfluxerr, cflag = kron_out
# kron_flux_flag[use_circle] = cflag
# cflux, cfluxerr, cflag = sep.sum_circle(data - bkg_data,
# tab['x'][use_circle]-1,
# tab['y'][use_circle]-1,
# autoparams[0]*r_min, subpix=5)
# kron_flux[use_circle] = cflux
# kron_fluxerr[use_circle] = cfluxerr
# kronrad[use_circle] = r_min
tab['flux_auto'] = kron_flux/uJy_to_dn*u.uJy
tab['fluxerr_auto'] = kron_fluxerr/uJy_to_dn*u.uJy
if get_background:
kron_out = sep.sum_ellipse(bkg_data, tab['x']-1, | |
Inventory Adjustments """
if r.representation == "html" and r.name == "adj":
record = r.record
if record:
T = current.T
tabs = [(T("Edit Details"), None),
(T("Items"), "adj_item"),
(T("Photos"), "image"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(
TR(TH("%s: " % table.adjuster_id.label),
table.adjuster_id.represent(record.adjuster_id),
TH("%s: " % table.adjustment_date.label),
table.adjustment_date.represent(record.adjustment_date),
),
TR(TH("%s: " % table.site_id.label),
table.site_id.represent(record.site_id),
TH("%s: " % table.category.label),
table.category.represent(record.category),
),
))
if record.status == 0: # In process
if current.auth.s3_has_permission("update", "inv_adj",
record_id = record.id,
):
# aitable = current.s3db.inv_adj_item
# query = (aitable.adj_id == record.id) & \
# (aitable.new_quantity == None)
# row = current.db(query).select(aitable.id,
# limitby = (0, 1),
# ).first()
# if row == None:
close_btn = A(T("Complete Adjustment"),
_href = URL(c = "inv",
f = "adj",
args = [record.id,
"close",
]
),
_id = "adj-close",
_class = "action-btn",
)
# Handle Confirmation
# Switch to POST
s3 = current.response.s3
if s3.debug:
s3.scripts.append("/%s/static/scripts/S3/s3.inv_adj_rheader.js" % r.application)
else:
s3.scripts.append("/%s/static/scripts/S3/s3.inv_adj_rheader.min.js" % r.application)
s3.js_global.append('''i18n.adj_close_confirm="%s"''' % \
T("Do you want to complete & close this adjustment?"))
rheader.append(close_btn)
rheader.append(rheader_tabs)
# else:
# msg = T("You need to check all the revised quantities before you can close this adjustment")
# rfooter.append(SPAN(msg))
return rheader
return None
# =============================================================================
def inv_commit_all(r, **attr):
"""
Custom Method to commit to a Request
- creates a commit with commit_items for each req_item
- called via POST from inv_send_rheader
- called via JSON method to reduce request overheads
"""
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD,
next = URL(),
)
T = current.T
req_id = r.id
if not req_id:
r.error(405, "Can only commit to a single request")
auth = current.auth
s3db = current.s3db
table = s3db.inv_commit
if not auth.s3_has_permission("create", table):
r.unauthorised()
db = current.db
record = r.record
# Check if there is an existing Commitment
exists = db(table.req_id == req_id).select(table.id,
limitby = (0, 1),
).first()
if exists:
# Browse existing commitments
error = T("Some items have already been committed")
current.session.error = error
r.error(409, error,
tree = URL(args = [req_id, "commit"]),
)
# Create the commitment
cid = table.insert(req_id = req_id)
# Items
ritable = s3db.inv_req_item
items = db(ritable.req_id == req_id).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
ritable.comments,
)
if items:
citable = s3db.inv_commit_item
insert = citable.insert
for item in items:
commit_item_id = item.id
quantity = item.quantity
insert(commit_id = cid,
req_item_id = commit_item_id,
item_pack_id = item.item_pack_id,
quantity = quantity,
comments = item.comments,
)
# Mark Item in the Request as Committed
db(ritable.id == commit_item_id).update(quantity_commit = quantity)
# Mark Request as Committed
db(s3db.inv_req.id == req_id).update(commit_status = REQ_STATUS_COMPLETE)
if "send" in r.args:
message = ""
url = URL(f = "commit",
args = [cid, "send"],
)
elif "assign" in r.args:
message = ""
url = URL(f = "commit",
args = [cid, "assign"],
)
else:
message = T("You have committed to all items in this Request. Please check that all details are correct and update as-required.")
current.session.confirmation = message
url = URL(c = "inv",
f = "commit",
args = [cid],
)
current.response.headers["Content-Type"] = "application/json"
return json.dumps({"message": s3_str(message),
"tree": url,
}, separators=SEPARATORS)
# =============================================================================
def inv_commit_send(r, **attr):
"""
Create a Shipment containing all Items in a Commitment
- called via POST from inv_send_rheader
- called via JSON method to reduce request overheads
@ToDo: inv_commit_all
"""
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD,
next = URL(),
)
T = current.T
commit_id = r.id
if not commit_id:
r.error(405, "Can only create a shipment from a single commit.",
tree = URL(),
)
s3db = current.s3db
stable = s3db.inv_send
if not current.auth.s3_has_permission("create", stable):
r.unauthorised()
record = r.record
req_id = record.req_id
db = current.db
req_table = db.inv_req
rim_table = db.inv_req_item
com_table = db.inv_commit
cim_table = db.inv_commit_item
req_record = db(req_table.id == req_id).select(req_table.requester_id,
req_table.site_id,
#req_table.req_ref, # Only used for External Requests
limitby = (0, 1),
).first()
# @ToDo: Identify if we have stock items which match the commit items
# If we have a single match per item then proceed automatically (as-now) & then decrement the stock quantity
# If we have no match then warn the user & ask if they should proceed anyway
# If we have mulitple matches then provide a UI to allow the user to select which stock items to use
# Create an inv_send and link to the commit
form_vars = Storage(sender_id = record.committer_id,
site_id = record.site_id,
recipient_id = req_record.requester_id,
to_site_id = req_record.site_id,
#req_ref = req_record.req_ref,
status = 0,
)
send_id = stable.insert(**form_vars)
form_vars.id = send_id
inv_send_onaccept(Storage(vars = form_vars))
s3db.inv_send_req.insert(send_id = send_id,
req_id = req_id,
)
# Get all of the committed items
query = (cim_table.commit_id == commit_id) & \
(cim_table.req_item_id == rim_table.id)
rows = db(query).select(rim_table.id,
rim_table.item_id,
rim_table.item_pack_id,
rim_table.currency,
rim_table.quantity,
rim_table.quantity_transit,
rim_table.quantity_fulfil,
cim_table.quantity,
)
# Create inv_track_items for each commit item
track_org_id = record.organisation_id
insert = s3db.inv_track_item.insert
for row in rows:
rim = row.inv_req_item
# Now done as a VirtualField instead (looks better & updates closer to real-time, so less of a race condition)
#quantity_shipped = max(rim.quantity_transit, rim.quantity_fulfil)
#quantity_needed = rim.quantity - quantity_shipped
insert(req_item_id = rim.id,
track_org_id = organisation_id,
send_id = send_id,
status = 1,
item_id = rim.item_id,
item_pack_id = rim.item_pack_id,
currency = rim.currency,
#req_quantity = quantity_needed,
quantity = row.inv_commit_item.quantity,
recv_quantity = row.inv_commit_item.quantity,
)
message = T("Shipment created")
current.session.confirmation = message
# Redirect to inv_send for the send id just created
current.response.headers["Content-Type"] = "application/json"
return json.dumps({"message": s3_str(message),
"tree": URL(c = "inv",
f = "send",
args = [send_id],
),
}, separators=SEPARATORS)
# =============================================================================
def inv_gift_certificate(r, **attr):
"""
Generate a Gift Certificate for an Outbound Shipment.
This is part of Humanitarian Logistics when sending goods across borders
so as not to incur import duties.
Gift Certificate should be readable to the Country of Destination
- we default to English, with an option for a 2nd language to be added.
This is exported in XLS format to allow modification before use.
"""
from s3.codecs.xls import S3XLS
try:
import xlwt
except ImportError:
r.error(503, S3XLS.ERROR.XLWT_ERROR)
# Extract the Data
send_id = r.id
record = r.record
send_ref = record.send_ref
site_id = record.site_id
to_site_id = record.to_site_id
db = current.db
s3db = current.s3db
# Items
table = s3db.inv_track_item
itable = s3db.supply_item
#ptable = s3db.supply_item_pack
query = (table.send_id == send_id) & \
(table.item_id == itable.id)# & \
#(table.item_pack_id == ptable.id)
items = db(query).select(table.quantity,
table.pack_value,
table.currency,
#itable.code,
itable.name,
#ptable.name,
)
# Destination
stable = s3db.org_site
gtable = s3db.gis_location
query = (stable.site_id == to_site_id) & \
(stable.location_id == gtable.id)
location = db(query).select(gtable.id,
gtable.L0,
limitby = (0, 1),
).first()
country = location.L0
fr = "fr" in current.deployment_settings.get_L10n_languages_by_country(country)
# Organisations
otable = s3db.org_organisation
query = (stable.site_id.belongs((to_site_id, site_id))) & \
(stable.organisation_id == otable.id)
fields = [stable.site_id,
otable.id,
otable.root_organisation,
otable.name,
otable.logo,
]
if fr:
ontable = s3db.org_organisation_name
fields.append(ontable.name_l10n)
left = ontable.on((ontable.organisation_id == otable.id) & \
(ontable.language == "fr"))
else:
left = None
orgs = db(query).select(*fields,
left = left,
limitby = (0, 2)
)
for row in orgs:
if row["org_site.site_id"] == site_id:
# Sender Org
if fr:
org_name = row["org_organisation_name.name_l10n"]
org = row["org_organisation"]
if not org_name:
org_name = org.name
else:
org = row["org_organisation"]
org_name = org.name
if org.id == org.root_organisation:
branch = None
else:
branch = org.name
# Lookup Root Org
fields = [otable.name,
otable.logo,
]
if fr:
fields.append(ontable.name_l10n)
org = db(otable.id == org.root_organisation).select(*fields,
left = left,
limitby = (0, 1)
).first()
if fr:
org_name = org["org_organisation_name.name_l10n"]
org = org["org_organisation"]
if not org_name:
org_name = org.name
else:
org_name = org.name
else:
# Recipient Org
if fr:
dest_org_name = row["org_organisation_name.name_l10n"]
dest_org = row["org_organisation"]
if not dest_org_name:
dest_org_name = dest_org.name
else:
dest_org = row["org_organisation"]
dest_org_name = dest_org.name
if dest_org.id != dest_org.root_organisation:
# Lookup Root Org
fields = [otable.name,
]
if fr:
fields.append(ontable.name_l10n)
dest_org = db(otable.id == dest_org.root_organisation).select(*fields,
left = left,
limitby = (0, 1)
).first()
if fr:
dest_org_name = dest_org["org_organisation_name.name_l10n"]
dest_org = dest_org["org_organisation"]
if not dest_org_name:
dest_org_name = dest_org.name
else:
dest_org_name = dest_org.name
# Represent the Data
from .org import org_SiteRepresent
destination = org_SiteRepresent(show_type = False)(to_site_id)
from .gis import gis_LocationRepresent
address = gis_LocationRepresent(show_level = False)(location.id)
recipient_id = record.recipient_id
if recipient_id:
from .pr import pr_PersonRepresent
recipient = pr_PersonRepresent(truncate = False)(recipient_id)
else:
recipient = None
T = current.T
if | |
#! /usr/bin/python
#
# Copyright (c) 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
import logging
import os
#FIXME there is a circular import here because compile.py imports runtime.py
import compile
import unify
class Tracer(object):
def __init__(self):
self.expressions = []
def trace(self, table):
self.expressions.append(table)
def is_traced(self, table):
return table in self.expressions or '*' in self.expressions
def log(self, table, msg, depth=0):
if self.is_traced(table):
logging.debug("{}{}".format(("| " * depth), msg))
class CongressRuntime (Exception):
pass
class ExecutionLogger(object):
def __init__(self):
self.messages = []
def debug(self, msg):
self.messages.append(msg)
def info(self, msg):
self.messages.append(msg)
def warn(self, msg):
self.messages.append(msg)
def error(self, msg):
self.messages.append(msg)
def critical(self, msg):
self.messages.append(msg)
def content(self):
return '\n'.join(self.messages)
def empty(self):
self.messages = []
##############################################################################
## Events
##############################################################################
class EventQueue(object):
def __init__(self):
self.queue = collections.deque()
def enqueue(self, event):
self.queue.append(event)
def dequeue(self):
return self.queue.popleft()
def __len__(self):
return len(self.queue)
def __str__(self):
return "[" + ",".join([str(x) for x in self.queue]) + "]"
class Event(object):
def __init__(self, formula=None, insert=True, proofs=None, target=None):
if proofs is None:
proofs = []
self.formula = formula
self.proofs = proofs
self.insert = insert
self.target = target
# logging.debug("EV: created event {}".format(str(self)))
def is_insert(self):
return self.insert
def tablename(self):
return self.formula.tablename()
def __str__(self):
if self.insert:
text = "insert"
else:
text = "delete"
if self.target is None:
target = ""
elif isinstance(self.target, Theory):
target = " for {}".format(self.target.name)
else:
target = " for {}".format(str(self.target))
return "{}[{}]{} with proofs {}".format(
text, str(self.formula), target, iterstr(self.proofs))
def __hash__(self):
return hash("Event(formula={}, proofs={}, insert={}".format(
str(self.formula), str(self.proofs), str(self.insert)))
def __eq__(self, other):
return (self.formula == other.formula and
self.proofs == other.proofs and
self.insert == other.insert)
def iterstr(iter):
return "[" + ";".join([str(x) for x in iter]) + "]"
def list_to_database(atoms):
database = Database()
for atom in atoms:
if atom.is_atom():
database.insert(atom)
return database
def string_to_database(string):
return list_to_database(compile.parse(string))
##############################################################################
## Logical Building Blocks
##############################################################################
class Proof(object):
"""A single proof. Differs semantically from Database's
Proof in that this verison represents a proof that spans rules,
instead of just a proof for a single rule.
"""
def __init__(self, root, children):
self.root = root
self.children = children
def __str__(self):
return self.str_tree(0)
def str_tree(self, depth):
s = " " * depth
s += str(self.root)
s += "\n"
for child in self.children:
s += child.str_tree(depth + 1)
return s
def leaves(self):
if len(self.children) == 0:
return [self.root]
result = []
for child in self.children:
result.extend(child.leaves())
return result
class DeltaRule(object):
"""Rule describing how updates to data sources change table."""
def __init__(self, trigger, head, body, original):
self.trigger = trigger # atom
self.head = head # atom
self.body = body # list of literals
self.original = original # Rule from which SELF was derived
def __str__(self):
return "<trigger: {}, head: {}, body: {}>".format(
str(self.trigger), str(self.head), [str(lit) for lit in self.body])
def __eq__(self, other):
return (self.trigger == other.trigger and
self.head == other.head and
len(self.body) == len(other.body) and
all(self.body[i] == other.body[i]
for i in xrange(0, len(self.body))))
def variables(self):
"""Return the set of variables occurring in this delta rule."""
vs = self.trigger.variables()
vs |= self.head.variables()
for atom in self.body:
vs |= atom.variables()
return vs
def tablenames(self):
"""Return the set of tablenames occurring in this delta rule."""
tables = set()
tables.add(self.head.table)
tables.add(self.trigger.table)
for atom in self.body:
tables.add(atom.table)
return tables
##############################################################################
## Abstract Theories
##############################################################################
class Theory(object):
def __init__(self, name=None, abbr=None):
self.tracer = Tracer()
if name is None:
self.name = repr(self)
else:
self.name = name
if abbr is None:
self.abbr = "th"
else:
self.abbr = abbr
maxlength = 6
if len(self.abbr) > maxlength:
self.trace_prefix = self.abbr[0:maxlength]
else:
self.trace_prefix = self.abbr + " " * (maxlength - len(self.abbr))
def set_tracer(self, tracer):
self.tracer = tracer
def log(self, table, msg, depth=0):
self.tracer.log(table, self.trace_prefix + ": " + msg, depth)
def policy(self):
"""Return a list of the policy statements in this theory."""
raise NotImplementedError()
def content(self):
"""Return a list of the contents of this theory: may be rules
and/or data. Note: do not change name to CONTENTS, as this
is reserved for a dictionary of stuff used by TopDownTheory.
"""
raise NotImplementedError()
def tablenames(self):
tablenames = set()
for rule in self.policy():
tablenames |= rule.tablenames()
return tablenames
def __str__(self):
s = ""
for p in self.policy():
s += str(p) + '\n'
return s + '\n'
def get_rule(self, ident):
for p in self.policy():
if hasattr(p, 'id') and p.id == ident:
return p
return
def get_arity_self(self, tablename):
"""Returns the number of arguments for the given TABLENAME
or None if the table is not defined by SELF.
A table is defined-by SELF if this theory believes it is
the source of truth for that table, i.e. this is a Database
theory and we store the contents of that table or this is
a rule theory, and that tablename is in the head of a rule.
"""
raise NotImplementedError
def get_arity(self, tablename):
result = self.get_arity_self(tablename)
if result is not None:
return result
if not hasattr(self, "includes"):
return None
for th in self.includes:
result = th.get_arity(tablename)
if result is not None:
return result
return None
class TopDownTheory(Theory):
"""Class that holds the Top-Down evaluation routines. Classes
will inherit from this class if they want to import and specialize
those routines.
"""
class TopDownContext(object):
"""Struct for storing the search state of top-down evaluation."""
def __init__(self, literals, literal_index, binding, context, depth):
self.literals = literals
self.literal_index = literal_index
self.binding = binding
self.previous = context
self.depth = depth
def __str__(self):
return (
"TopDownContext<literals={}, literal_index={}, binding={}, "
"previous={}, depth={}>").format(
"[" + ",".join([str(x) for x in self.literals]) + "]",
str(self.literal_index), str(self.binding),
str(self.previous), str(self.depth))
class TopDownResult(object):
"""Stores a single result for top-down-evaluation."""
def __init__(self, binding, support):
self.binding = binding
self.support = support # for abduction
def __str__(self):
return "TopDownResult(binding={}, support={})".format(
unify.binding_str(self.binding), iterstr(self.support))
class TopDownCaller(object):
"""Struct for storing info about the original caller of top-down
evaluation.
VARIABLES is the list of variables (from the initial query)
that we want bindings for.
BINDING is the initially empty BiUnifier.
FIND_ALL controls whether just the first or all answers are found.
ANSWERS is populated by top-down evaluation: it is the list of
VARIABLES instances that the search process proved true.
"""
def __init__(self, variables, binding, theory,
find_all=True, save=None):
# an iterable of variable objects
self.variables = variables
# a bi-unifier
self.binding = binding
# the top-level theory (for included theories)
self.theory = theory
# a boolean
self.find_all = find_all
# The results of top-down-eval: a list of TopDownResults
self.results = []
# a Function that takes a compile.Literal and a unifier and
# returns T iff that literal under the unifier should be
# saved as part of an abductive explanation
self.save = save
# A variable used to store explanations as they are constructed
self.support = []
def __str__(self):
return (
"TopDownCaller<variables={}, binding={}, find_all={}, "
"results={}, save={}, support={}>".format(
iterstr(self.variables), str(self.binding),
str(self.find_all), iterstr(self.results), repr(self.save),
iterstr(self.support)))
#########################################
## External interface
def __init__(self, name=None, abbr=None):
super(TopDownTheory, self).__init__(name=name, abbr=abbr)
self.includes = []
def select(self, query, find_all=True):
"""Return list of instances of QUERY that are true.
If FIND_ALL is False, the return list has at most 1 element.
"""
assert compile.is_datalog(query), "Query must be atom/rule"
if compile.is_atom(query):
literals = [query]
else:
literals = query.body
# Because our output is instances of QUERY, need all the variables
# in QUERY.
bindings = self.top_down_evaluation(query.variables(), literals,
find_all=find_all)
# logging.debug("Top_down_evaluation returned: {}".format(
# str(bindings)))
if len(bindings) > 0:
self.log(query.tablename(), "Found answer {}".format(
"[" + ",".join([str(query.plug(x))
for x in bindings]) + "]"))
return [query.plug(x) for x in bindings]
def explain(self, query, tablenames, find_all=True):
"""Same as select except stores instances of TABLENAMES
that participated in each proof. If QUERY is an atom,
returns list of rules with QUERY in the head and
the stored instances of TABLENAMES in the body; if QUERY is
a rule, the rules | |
record.
dns_name: Name for an NSEC record in punycode format.
dns_next_owner_name: Name of the next owner in punycode format.
last_queried: The time of the last DNS query in Epoch seconds
format.
name: The name of the NSEC record in FQDN format.
next_owner_name: Name of the next owner that has authoritative data
or that contains a delegation point NS record.
rrset_types: The RRSet types that exist at the original owner name
of the NSEC RR.
ttl: The Time To Live (TTL) value for the record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:nsec'
_fields = ['cloud_info', 'creation_time', 'creator', 'dns_name',
'dns_next_owner_name', 'last_queried', 'name',
'next_owner_name', 'rrset_types', 'ttl', 'use_ttl', 'view',
'zone']
_search_for_update_fields = ['name', 'view']
_updateable_search_fields = []
_all_searchable_fields = ['creator', 'name', 'next_owner_name', 'view',
'zone']
_return_fields = ['name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class Nsec3Record(InfobloxObject):
""" Nsec3Record: DNS NSEC3 record object.
Corresponds to WAPI object 'record:nsec3'
When a name server receives a request for a domain name that does
not exist in a zone, the name server sends an authenticated negative
response in the form of an NSEC or NSEC3 RR. NSEC and NSEC3 records
contain the next secure domain name in a zone and list the RR types
present at the NSEC or NSEC3 RR's owner name. The difference between
an NSEC and NSEC3 RRs is that the owner name in an NSEC3 RR is a
cryptographic hash of the original owner name prepended to the name
of the zone. NSEC3 RRs protect against zone enumeration.
NSEC3 resource record is desribed in RFC 5155.
NSEC3 records are automatically generated during signing of the
corresponding zone.
The
name
part of a DNS NSEC3 object reference has the following components:
Example:
record:nsec3/ZG5zLmJpsaG9zdA:us.example.com/default.external
Fields:
algorithm: The hash algorithm that was used.
cloud_info: Structure containing all cloud API related information
for this object.
creation_time: The creation time of the record.
creator: Creator of the record.
dns_name: Name for an NSEC3 record in punycode format.
flags: The set of 8 one-bit flags, of which only one flag, the Opt-
Out flag, is defined by RFC 5155. The Opt-Out flag indicates
whether the NSEC3 record covers unsigned delegations.
iterations: The number of times the hash function was performed.
last_queried: The time of the last DNS query in Epoch seconds
format.
name: The name of the NSEC3 record in FQDN format.
next_owner_name: The hashed next owner name that has authoritative
data or that contains a delegation point NS record.
rrset_types: The RRSet types that exist at the original owner name
of the NSEC3 RR.
salt: A series of case-insensitive hexadecimal digits. It is
appended to the original owner name as protection against pre-
calculated dictionary attacks. A new salt value is generated
when ZSK rolls over. You can control the period of the rollover.
For random salt values, the selected length is between one and
15 octets.
ttl: The Time To Live (TTL) value for the record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:nsec3'
_fields = ['algorithm', 'cloud_info', 'creation_time', 'creator',
'dns_name', 'flags', 'iterations', 'last_queried', 'name',
'next_owner_name', 'rrset_types', 'salt', 'ttl', 'use_ttl',
'view', 'zone']
_search_for_update_fields = ['name', 'view']
_updateable_search_fields = []
_all_searchable_fields = ['algorithm', 'creator', 'flags', 'iterations',
'name', 'view', 'zone']
_return_fields = ['name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class Nsec3ParamRecord(InfobloxObject):
""" Nsec3ParamRecord: DNS NSEC3 record object.
Corresponds to WAPI object 'record:nsec3param'
An authoritative DNS server uses NSEC3PARAM RRs to determine which
NSEC3 records it includes in its negative responses. An NSEC3PARAM
RR contains the parameters that an authoritative server needs to
calculate hashed owner names. As stated in RFC 5155, the presence of
an NSEC3PARAM RR at a zone apex indicates that the specified
parameters may be used by authoritative servers to choose an
appropriate set of NSEC3 RRs for negative responses.
The NSEC3PARAM resource record is desribed in RFC 5155.
The NSEC3PARAM record is generated automatically upon the signing of
the corresponding zone.
The
name
part of a DNS NSEC3PARAM object reference has the following
components:
Example:
record:nsec3param/ZG5zLmJpsaG9zdA:us.example.com/default.external
Fields:
algorithm: The hash algorithm that was used.
cloud_info: Structure containing all cloud API related information
for this object.
creation_time: The creation time of the record.
creator: Creator of the record.
dns_name: Name for an NSEC3PARAM record in punycode format.
flags: The set of 8 one-bit flags, of which only one flag, the Opt-
Out flag, is defined by RFC 5155. The Opt-Out flag indicates
whether the NSEC3 record covers unsigned delegations.
iterations: The number of times the hash function was performed.
last_queried: The time of the last DNS query in Epoch seconds
format.
name: The name of the NSEC3PARAM record in FQDN format. It has to be
the same as the zone, where the record resides.
salt: A series of case-insensitive hexadecimal digits. It is
appended to the original owner name as protection against pre-
calculated dictionary attacks. A new salt value is generated
when the ZSK rolls over, for which the user can control the
period. For a random salt value, the selected length is between
one and 15 octets.
ttl: The Time To Live (TTL) value for the record. A 32-bit unsigned
integer that represents the duration, in seconds, for which the
record is valid (cached). Zero indicates that the record should
not be cached.
use_ttl: Use flag for: ttl
view: The name of the DNS View in which the record resides. Example:
"external".
zone: The name of the zone in which the record resides. Example:
"zone.com". If a view is not specified when searching by zone,
the default view is used.
"""
_infoblox_type = 'record:nsec3param'
_fields = ['algorithm', 'cloud_info', 'creation_time', 'creator',
'dns_name', 'flags', 'iterations', 'last_queried', 'name',
'salt', 'ttl', 'use_ttl', 'view', 'zone']
_search_for_update_fields = ['name', 'view']
_updateable_search_fields = []
_all_searchable_fields = ['algorithm', 'creator', 'flags', 'iterations',
'name', 'view', 'zone']
_return_fields = ['name', 'view']
_remap = {}
_shadow_fields = ['_ref']
class PtrRecord(InfobloxObject):
@classmethod
def get_v4_class(cls):
return PtrRecordV4
@classmethod
def get_v6_class(cls):
return PtrRecordV6
class PtrRecordV4(PtrRecord):
""" PtrRecordV4: DNS PTR record object.
Corresponds to WAPI object 'record:ptr'
In a forward-mapping zone, a PTR (pointer) record maps a domain name
to another domain name. In a reverse-mapping zone, a PTR (pointer)
record maps an address to a domain name. To define a specific
address-to-name mapping, add a PTR record to a previously defined
authoritative reverse-mapping zone.
Fields:
aws_rte53_record_info: Aws Route 53 record information.
cloud_info: Structure containing all cloud API related information
for this object.
comment: Comment for the record; maximum 256 characters.
creation_time: The time of the record creation in Epoch seconds
format.
creator: The record creator.Note that changing creator from or to
'SYSTEM' value is not allowed.
ddns_principal: The GSS-TSIG principal that owns this record.
ddns_protected: Determines if the DDNS updates for this record are
allowed or not.
disable: Determines if the record is disabled or not. False means
that the record is enabled.
discovered_data: The discovered data for this PTR record.
dns_name: The name for a DNS PTR record in punycode format.
dns_ptrdname: The domain name of the DNS PTR record in punycode
format.
| |
<filename>kolibri/utils/options.py
"""
This module is intended to allow customization of Kolibri settings with the
options.ini file.
The settings can be changed through environment variables or sections and keys
in the options.ini file.
The following options are supported:
[Cache]
CACHE_BACKEND
CACHE_TIMEOUT
CACHE_MAX_ENTRIES
CACHE_PASSWORD
CACHE_LOCATION
CACHE_LOCK_TTL
CACHE_REDIS_MIN_DB
CACHE_REDIS_MAX_POOL_SIZE
CACHE_REDIS_POOL_TIMEOUT
CACHE_REDIS_MAXMEMORY
CACHE_REDIS_MAXMEMORY_POLICY
[Database]
DATABASE_ENGINE
DATABASE_NAME
DATABASE_PASSWORD
DATABASE_USER
DATABASE_HOST
DATABASE_PORT
[Server]
CHERRYPY_START
CHERRYPY_THREAD_POOL
CHERRYPY_SOCKET_TIMEOUT
CHERRYPY_QUEUE_SIZE
CHERRYPY_QUEUE_TIMEOUT
PROFILE
[Paths]
CONTENT_DIR
[Urls]
CENTRAL_CONTENT_BASE_URL
DATA_PORTAL_SYNCING_BASE_URL
[Deployment]
HTTP_PORT
RUN_MODE
URL_PATH_PREFIX
LANGUAGES
STATIC_USE_SYMLINKS
We currently create symbolic links when collecting static files. The option
can be set to False to disable the feature to instead copy files to STATIC_ROOT.
This is useful for the cloud infrastructure where Nginx and Kolibri are set
up in separate network mounted volumes such that Nginx cannot access symlinked
static files in the other volume.
ZIP_CONTENT_HOST
ZIP_CONTENT_PORT
[Python]
PICKLE_PROTOCOL
"""
import logging.config
import os
import sys
from configobj import ConfigObj
from configobj import flatten_errors
from configobj import get_extra_values
from django.utils.functional import SimpleLazyObject
from django.utils.six import string_types
from validate import Validator
from validate import VdtValueError
try:
import kolibri.utils.pskolibri as psutil
except NotImplementedError:
# This module can't work on this OS
psutil = None
from kolibri.utils.i18n import KOLIBRI_LANGUAGE_INFO
from kolibri.utils.i18n import KOLIBRI_SUPPORTED_LANGUAGES
from kolibri.plugins.utils.options import extend_config_spec
def calculate_thread_pool():
"""
Returns the default value for CherryPY thread_pool:
- calculated based on the best values obtained in several partners installations
- value must be between 10 (default CherryPy value) and 200
- servers with more memory can deal with more threads
- calculations are done for servers with more than 2 Gb of RAM
"""
MIN_POOL = 50
MAX_POOL = 150
if psutil:
MIN_MEM = 2
MAX_MEM = 6
total_memory = psutil.virtual_memory().total / pow(2, 30) # in Gb
# if it's in the range, scale thread count linearly with available memory
if MIN_MEM < total_memory < MAX_MEM:
return MIN_POOL + int(
(MAX_POOL - MIN_POOL)
* float(total_memory - MIN_MEM)
/ (MAX_MEM - MIN_MEM)
)
# otherwise return either the min or max amount
return MAX_POOL if total_memory >= MAX_MEM else MIN_POOL
elif sys.platform.startswith(
"darwin"
): # Considering MacOS has at least 4 Gb of RAM
return MAX_POOL
return MIN_POOL
ALL_LANGUAGES = "kolibri-all"
SUPPORTED_LANGUAGES = "kolibri-supported"
def _process_language_string(value):
"""
Used to validate string values.
The only valid argument in this case is that it is a string
so we first try to coerce it to a string, then do some checks
to see if it is any of our special values. Then if it is an
appropriate language code value.
If no value is appropriate, raise a ValueError.
"""
value = str(value)
if value == ALL_LANGUAGES:
return list(KOLIBRI_LANGUAGE_INFO.keys())
if value == SUPPORTED_LANGUAGES:
return list(KOLIBRI_SUPPORTED_LANGUAGES)
if value in KOLIBRI_LANGUAGE_INFO:
return [value]
raise ValueError
def language_list(value):
"""
Check that the supplied value is a list of languages,
or a single language, or a special shortcut parameter.
In the case that it is a special shortcut name, we return the full list
of relevant languages for that parameter, or throw a validation error
if that parameter would return an empty list.
If a single language code is the parameter, this function will return a list
with that language code as the only member.
:param Union[str, list[str]] value: Either a string or a list of strings
String can be any value that is a key of KOLIBRI_LANGUAGE_INFO
or one of the special strings represented by ALL_LANGUAGES or SUPPORTED_LANGUAGES
A list must be a list of these strings.
"""
# Check the supplied value is a list
if not isinstance(value, list):
value = [value]
out = set()
errors = []
for entry in value:
try:
entry_list = _process_language_string(entry)
out.update(entry_list)
except ValueError:
errors.append(entry)
if errors:
raise VdtValueError(errors)
if not out:
raise VdtValueError(value)
return sorted(list(out))
def path_list(value):
"""
Check that the supplied value is a semicolon-delimited list of paths.
Note: we do not guarantee that these paths all currently exist.
"""
if isinstance(value, string_types):
value = value.split(";")
if isinstance(value, list):
errors = []
for item in value:
if not isinstance(item, string_types):
errors.append(repr(item))
if errors:
raise VdtValueError(errors)
return value
base_option_spec = {
"Cache": {
"CACHE_BACKEND": {
"type": "option",
"options": ("memory", "redis"),
"default": "memory",
"envvars": ("KOLIBRI_CACHE_BACKEND",),
},
"CACHE_TIMEOUT": {
"type": "integer",
"default": 300,
"envvars": ("KOLIBRI_CACHE_TIMEOUT",),
},
"CACHE_MAX_ENTRIES": {
"type": "integer",
"default": 1000,
"envvars": ("KOLIBRI_CACHE_MAX_ENTRIES",),
},
"CACHE_PASSWORD": {
"type": "string",
"default": "",
"envvars": ("KOLIBRI_CACHE_PASSWORD",),
},
"CACHE_LOCATION": {
"type": "string",
"default": "localhost:6379",
"envvars": ("KOLIBRI_CACHE_LOCATION",),
},
"CACHE_LOCK_TTL": {
"type": "integer",
"default": 30,
"envvars": ("KOLIBRI_CACHE_LOCK_TTL",),
},
"CACHE_REDIS_MIN_DB": {
"type": "integer",
"default": 0,
"envvars": ("KOLIBRI_CACHE_REDIS_MIN_DB",),
},
"CACHE_REDIS_MAX_POOL_SIZE": {
"type": "integer",
"default": 50, # use redis-benchmark to determine better value
"envvars": ("KOLIBRI_CACHE_REDIS_MAX_POOL_SIZE",),
},
"CACHE_REDIS_POOL_TIMEOUT": {
"type": "integer",
"default": 30, # seconds
"envvars": ("KOLIBRI_CACHE_REDIS_POOL_TIMEOUT",),
},
# Optional redis settings to overwrite redis.conf
"CACHE_REDIS_MAXMEMORY": {
"type": "integer",
"default": 0,
"envvars": ("KOLIBRI_CACHE_REDIS_MAXMEMORY",),
},
"CACHE_REDIS_MAXMEMORY_POLICY": {
"type": "option",
"options": (
"",
"allkeys-lru",
"volatile-lru",
"allkeys-random",
"volatile-random",
"volatile-ttl",
"noeviction",
),
"default": "",
"envvars": ("KOLIBRI_CACHE_REDIS_MAXMEMORY_POLICY",),
},
},
"Database": {
"DATABASE_ENGINE": {
"type": "option",
"options": ("sqlite", "postgres"),
"default": "sqlite",
"envvars": ("KOLIBRI_DATABASE_ENGINE",),
},
"DATABASE_NAME": {"type": "string", "envvars": ("KOLIBRI_DATABASE_NAME",)},
"DATABASE_PASSWORD": {
"type": "string",
"envvars": ("KOLIBRI_DATABASE_PASSWORD",),
},
"DATABASE_USER": {"type": "string", "envvars": ("KOLIBRI_DATABASE_USER",)},
"DATABASE_HOST": {"type": "string", "envvars": ("KOLIBRI_DATABASE_HOST",)},
"DATABASE_PORT": {"type": "string", "envvars": ("KOLIBRI_DATABASE_PORT",)},
},
"Server": {
"CHERRYPY_START": {
"type": "boolean",
"default": True,
"envvars": ("KOLIBRI_CHERRYPY_START",),
},
"CHERRYPY_THREAD_POOL": {
"type": "integer",
"default": calculate_thread_pool(),
"envvars": ("KOLIBRI_CHERRYPY_THREAD_POOL",),
},
"CHERRYPY_SOCKET_TIMEOUT": {
"type": "integer",
"default": 10,
"envvars": ("KOLIBRI_CHERRYPY_SOCKET_TIMEOUT",),
},
"CHERRYPY_QUEUE_SIZE": {
"type": "integer",
"default": 30,
"envvars": ("KOLIBRI_CHERRYPY_QUEUE_SIZE",),
},
"CHERRYPY_QUEUE_TIMEOUT": {
"type": "float",
"default": 0.1,
"envvars": ("KOLIBRI_CHERRYPY_QUEUE_TIMEOUT",),
},
"PROFILE": {
"type": "boolean",
"default": False,
"envvars": ("KOLIBRI_SERVER_PROFILE",),
},
"DEBUG": {"type": "boolean", "default": False, "envvars": ("KOLIBRI_DEBUG",)},
"DEBUG_LOG_DATABASE": {
"type": "boolean",
"default": False,
"envvars": ("KOLIBRI_DEBUG_LOG_DATABASE",),
},
},
"Paths": {
"CONTENT_DIR": {
"type": "string",
"default": "content",
"envvars": ("KOLIBRI_CONTENT_DIR",),
},
"CONTENT_FALLBACK_DIRS": {
"type": "path_list",
"default": "",
"envvars": ("KOLIBRI_CONTENT_FALLBACK_DIRS",),
},
},
"Urls": {
"CENTRAL_CONTENT_BASE_URL": {
"type": "string",
"default": "https://studio.learningequality.org",
"envvars": (
"KOLIBRI_CENTRAL_CONTENT_BASE_URL",
"CENTRAL_CONTENT_DOWNLOAD_BASE_URL",
),
},
"DATA_PORTAL_SYNCING_BASE_URL": {
"type": "string",
"default": "https://kolibridataportal.learningequality.org",
"envvars": ("KOLIBRI_DATA_PORTAL_SYNCING_BASE_URL",),
},
},
"Deployment": {
"HTTP_PORT": {
"type": "integer",
"default": 8080,
"envvars": ("KOLIBRI_HTTP_PORT", "KOLIBRI_LISTEN_PORT"),
},
"RUN_MODE": {"type": "string", "envvars": ("KOLIBRI_RUN_MODE",)},
"DISABLE_PING": {
"type": "boolean",
"default": False,
"envvars": ("KOLIBRI_DISABLE_PING",),
},
"URL_PATH_PREFIX": {
"type": "string",
"default": "/",
"envvars": ("KOLIBRI_URL_PATH_PREFIX",),
"clean": lambda x: x.lstrip("/").rstrip("/") + "/",
},
"LANGUAGES": {
"type": "language_list",
"default": SUPPORTED_LANGUAGES,
"envvars": ("KOLIBRI_LANGUAGES",),
},
"STATIC_USE_SYMLINKS": {
"type": "boolean",
"default": True,
"envvars": ("KOLIBRI_STATIC_USE_SYMLINKS",),
},
"ZIP_CONTENT_HOST": {
"type": "string",
"default": "",
"envvars": ("KOLIBRI_ZIP_CONTENT_HOST",),
},
"ZIP_CONTENT_PORT": {
"type": "integer",
"default": 8888,
"envvars": ("KOLIBRI_ZIP_CONTENT_PORT",),
},
},
"Python": {
"PICKLE_PROTOCOL": {
"type": "integer",
"default": 2,
"envvars": ("KOLIBRI_PICKLE_PROTOCOL",),
}
},
}
def _get_validator():
return Validator({"language_list": language_list, "path_list": path_list})
def _get_logger(KOLIBRI_HOME):
"""
We define a minimal default logger config here, since we can't yet
load up Django settings.
NB! Since logging can be defined by options, the logging from some
of the functions in this module do not use fully customized logging.
"""
from kolibri.utils.conf import LOG_ROOT
from kolibri.utils.logger import get_default_logging_config
logging.config.dictConfig(get_default_logging_config(LOG_ROOT))
return logging.getLogger(__name__)
def _get_option_spec():
"""
Combine the default option spec with any options that are defined in plugins
"""
return extend_config_spec(base_option_spec)
option_spec = SimpleLazyObject(_get_option_spec)
def get_configspec():
"""
Read the option_spec dict defined above, and turn it into a "configspec" object (per the configobj library)
so that we can use it to parse the options.ini file.
"""
lines = []
for section, opts in option_spec.items():
lines.append("[{section}]".format(section=section))
for name, attrs in opts.items():
default = attrs.get("default", "")
the_type = attrs["type"]
args = ["%r" % op for op in attrs.get("options", [])] + [
"default=list('{default_list}')".format(
default_list="','".join(default)
)
if isinstance(default, list)
else "default='{default}'".format(default=default)
]
line = "{name} = {type}({args})".format(
name=name, type=the_type, args=", ".join(args)
)
lines.append(line)
return ConfigObj(lines, _inspec=True)
def clean_conf(conf):
# override any values from their environment variables (if set)
for section, opts in option_spec.items():
for optname, attrs in opts.items():
# if any options have clean functions defined, then apply them now
if "clean" in attrs:
conf[section][optname] = attrs["clean"](conf[section][optname])
return conf
def read_options_file(KOLIBRI_HOME, ini_filename="options.ini"):
logger = _get_logger(KOLIBRI_HOME)
ini_path = os.path.join(KOLIBRI_HOME, ini_filename)
conf = ConfigObj(ini_path, configspec=get_configspec())
# validate once up front to ensure section structure is in place
conf.validate(_get_validator())
# keep track of which options were overridden using environment variables, to support error reporting
using_env_vars = {}
# override any values from their environment variables (if set)
for section, opts in option_spec.items():
for optname, attrs in opts.items():
for envvar in attrs.get("envvars", []):
if os.environ.get(envvar):
logger.info(
"Option {optname} in section [{section}] being overridden by environment variable {envvar}".format(
optname=optname, section=section, envvar=envvar
)
)
conf[section][optname] = os.environ[envvar]
using_env_vars[optname] = envvar
break
conf | |
freqstop,
step_size,
sweepname=None,
save_fields=True,
save_rad_fields_only=False,
sweep_type="Interpolating",
interpolation_tol_percent=0.5,
interpolation_max_solutions=250,
use_q3d_for_dc=False,
):
"""Create a sweep with the specified frequency step.
Parameters
----------
setupname : str
Name of the setup to attach to the sweep.
unit : str
Unit of the frequency. For example, ``"MHz"`` or ``"GHz"``.
freqstart : float
Starting frequency of the sweep.
freqstop : float
Stopping frequency of the sweep.
step_size : float
Frequency size of the step.
sweepname : str, optional
Name of the sweep. The default is ``None``.
save_fields : bool, optional
Whether to save fields for a discrete sweep only. The
default is ``True``.
save_rad_fields_only : bool, optional
Whether to save only radiated fields if
``save_fields=True``. The default is ``False``.
sweep_type : str, optional
Type of the sweep. Options are ``"Fast"``,
``"Interpolating"``, and ``"Discrete"``. The default is
``"Interpolating"``.
interpolation_tol_percent : float, optional
Error tolerance threshold for the interpolation
process. The default is ``0.5``.
interpolation_max_solutions : int, optional
Maximum number of solutions evaluated for the
interpolation process. The default is ``250``.
use_q3d_for_dc : bool, optional
Whether to use Q3D to solve the DC point. The default is ``False``.
Returns
-------
:class:`pyaedt.modules.SetupTemplates.SweepHFSS3DLayout` or bool
Sweep object if successful, ``False`` otherwise.
References
----------
>>> oModule.AddSweep
"""
if sweep_type not in ["Discrete", "Interpolating", "Fast"]:
raise AttributeError("Invalid `sweep_type`. It has to be either 'Discrete', 'Interpolating', or 'Fast'")
if sweepname is None:
sweepname = generate_unique_name("Sweep")
interpolation = False
if sweep_type == "Interpolating":
interpolation = True
save_fields = False
if not save_fields:
save_rad_fields_only = False
interpolation_tol = interpolation_tol_percent / 100.0
for s in self.setups:
if s.name == setupname:
setupdata = s
if sweepname in [sweep.name for sweep in setupdata.sweeps]:
oldname = sweepname
sweepname = generate_unique_name(oldname)
self.logger.warning(
"Sweep %s is already present. Sweep has been renamed in %s.", oldname, sweepname
)
sweep = setupdata.add_sweep(sweepname=sweepname)
if not sweep:
return False
sweep.change_range("LinearStep", freqstart, freqstop, step_size, unit)
sweep.props["GenerateSurfaceCurrent"] = save_fields
sweep.props["SaveRadFieldsOnly"] = save_rad_fields_only
sweep.props["FastSweep"] = interpolation
sweep.props["SAbsError"] = interpolation_tol
sweep.props["EnforcePassivity"] = interpolation
sweep.props["UseQ3DForDC"] = use_q3d_for_dc
sweep.props["MaxSolutions"] = interpolation_max_solutions
sweep.update()
self.logger.info("Linear step sweep %s has been correctly created.", sweepname)
return sweep
return False
@pyaedt_function_handler()
def create_single_point_sweep(
self,
setupname,
unit,
freq,
sweepname=None,
save_fields=False,
save_rad_fields_only=False,
):
"""Create a sweep with a single frequency point.
Parameters
----------
setupname : str
Name of the setup.
unit : str
Unit of the frequency. For example, ``"MHz`` or ``"GHz"``.
freq : float, list
Frequency of the single point or list of frequencies to create distinct single points.
sweepname : str, optional
Name of the sweep. The default is ``None``.
save_fields : bool, optional
Whether to save fields for all points and subranges defined in the sweep. The default is ``False``.
save_rad_fields_only : bool, optional
Whether to save only radiating fields. The default is ``False``.
Returns
-------
:class:`pyaedt.modules.SetupTemplates.SweepHFSS` or bool
Sweep object if successful, ``False`` otherwise.
References
----------
>>> oModule.AddSweep
"""
if sweepname is None:
sweepname = generate_unique_name("SinglePoint")
add_subranges = False
if isinstance(freq, list):
if not freq:
raise AttributeError("Frequency list is empty. Specify at least one frequency point.")
freq0 = freq.pop(0)
if freq:
add_subranges = True
else:
freq0 = freq
if setupname not in self.setup_names:
return False
for s in self.setups:
if s.name == setupname:
setupdata = s
if sweepname in [sweep.name for sweep in setupdata.sweeps]:
oldname = sweepname
sweepname = generate_unique_name(oldname)
self.logger.warning(
"Sweep %s is already present. Sweep has been renamed in %s.", oldname, sweepname
)
sweepdata = setupdata.add_sweep(sweepname, "Discrete")
sweepdata.change_range("SinglePoint", freq0, unit=unit)
sweepdata.props["GenerateSurfaceCurrent"] = save_fields
sweepdata.props["SaveRadFieldsOnly"] = save_rad_fields_only
sweepdata.update()
if add_subranges:
for f in freq:
sweepdata.add_subrange(rangetype="SinglePoint", start=f, unit=unit)
self.logger.info("Single point sweep %s has been correctly created.", sweepname)
return sweepdata
return False
@pyaedt_function_handler()
def _import_cad(
self, cad_path, cad_format="gds", aedb_path=None, xml_path=None, set_as_active=True, close_active_project=False
):
method = None
if cad_format == "gds":
method = self.oimport_export.ImportGDSII
elif cad_format == "dxf":
method = self.oimport_export.ImportAutoCAD
elif cad_format == "gerber":
method = self.oimport_export.ImportGerber
elif cad_format == "awr":
method = self.oimport_export.ImportAWRMicrowaveOffice
elif cad_format == "brd":
method = self.oimport_export.ImportExtracta
elif cad_format == "ipc2581":
method = self.oimport_export.ImportIPC
elif cad_format == "odb++":
method = self.oimport_export.ImportODB
if not method:
return False
active_project = self.project_name
path_ext = os.path.splitext(cad_path)
project_name = os.path.splitext(os.path.basename(cad_path))[0]
if not aedb_path:
aedb_path = path_ext[0] + ".aedb"
if os.path.exists(aedb_path):
old_name = project_name
project_name = generate_unique_name(project_name)
aedb_path = aedb_path.replace(old_name, project_name)
self.logger.warning("aedb_exists. Renaming it to %s", project_name)
if not xml_path:
xml_path = ""
if cad_format == "gds":
method(cad_path, aedb_path, xml_path, "")
else:
method(cad_path, aedb_path, xml_path)
if set_as_active:
self._close_edb()
self.__init__(project_name)
if close_active_project:
self.odesktop.CloseProject(active_project)
return True
@pyaedt_function_handler()
def import_gds(self, gds_path, aedb_path=None, control_file=None, set_as_active=True, close_active_project=False):
"""Import GDS file into HFSS 3D Layout and assign the stackup from an XML file if present.
Parameters
----------
gds_path : str
Full path to the GDS file.
aedb_path : str, optional
Full path to the AEDB file.
control_file : str, optional
Path to the XML file with the stackup information. The default is ``None``, in
which case the stackup is not edited.
set_as_active : bool, optional
Whether to set the GDS file as active. The default is ``True``.
close_active_project : bool, optional
Whether to close the active project after loading the GDS file.
The default is ''False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ImportGDSII
"""
return self._import_cad(gds_path, "gds", aedb_path, control_file, set_as_active, close_active_project)
@pyaedt_function_handler()
def import_dxf(self, dxf_path, aedb_path=None, control_file=None, set_as_active=True, close_active_project=False):
"""Import DXf file into HFSS 3D Layout and assign the stackup from an XML file if present.
Parameters
----------
gds_path : str
Full path to the GDS file.
aedb_path : str, optional
Full path to the AEDB file.
control_file : str, optional
Path to the XML file with the stackup information. The default is ``None``, in
which case the stackup is not edited.
set_as_active : bool, optional
Whether to set the GDS file as active. The default is ``True``.
close_active_project : bool, optional
Whether to close the active project after loading the GDS file.
The default is ''False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ImportDXF
"""
return self._import_cad(dxf_path, "dxf", aedb_path, control_file, set_as_active, close_active_project)
@pyaedt_function_handler()
def import_gerber(
self, gerber_path, aedb_path=None, control_file=None, set_as_active=True, close_active_project=False
):
"""Import Gerber zip into HFSS 3D Layout and assign the stackup from an XML file if present.
Parameters
----------
gerber_path : str
Full path to the gerber zip file.
aedb_path : str, optional
Full path to the AEDB file.
control_file : str, optional
Path to the XML file with the stackup information. The default is ``None``, in
which case the stackup is not edited.
set_as_active : bool, optional
Whether to set the GDS file as active. The default is ``True``.
close_active_project : bool, optional
Whether to close the active project after loading the GDS file.
The default is ''False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ImportGerber
"""
return self._import_cad(gerber_path, "gerber", aedb_path, control_file, set_as_active, close_active_project)
@pyaedt_function_handler()
def import_brd(
self, input_file, aedb_path=None, set_as_active=True, close_active_project=False
): # pragma: no cover
"""Import brd into HFSS 3D Layout and assign the stackup from an XML file if present.
Parameters
----------
input_file : str
Full path to the brd fi.
aedb_path : str, optional
Full path to the AEDB file.
set_as_active : bool, optional
Whether to set the GDS file as active. The default is ``True``.
close_active_project : bool, optional
Whether to close the active project after loading the GDS file.
The default is ''False``.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
References
----------
>>> oModule.ImportExtracta
"""
return self._import_cad(input_file, "brd", aedb_path, "", set_as_active, close_active_project)
@pyaedt_function_handler()
def import_awr(
self, input_file, aedb_path=None, control_file=None, set_as_active=True, close_active_project=False
): # pragma: no cover
"""Import AWR Microwave Office file into HFSS 3D Layout and assign the stackup from an XML file if present.
Parameters
----------
input_file : str
Full path to the AWR xml file.
aedb_path : str, optional
Full path to the AEDB file.
control_file : str, optional
Path to the XML file with the stackup information. The default is ``None``, in
which case the stackup is not edited.
set_as_active : bool, | |
NetworkSerializer.get_by_domain_install(domain=asset['parent'])
else:
network_serializer = NetworkSerializer.get_by_asset_vlan(domain=asset['parent'], vlan=asset['provision']['vlan'])
parent_asset = AssetSerializer.get(service_tag=asset['parent'])
si, vcenter, datacenter, cluster = connect_hypervisor_vmware(parent_asset)
network, backing = find_network_by_vlan_vmware(cluster, network_serializer)
vm = find_vm_vmware(si, datacenter, asset)
devices = [
pyVmomi.vim.vm.device.VirtualDeviceSpec(
operation=pyVmomi.vim.vm.device.VirtualDeviceSpec.Operation.edit,
device=[device for device in vm.config.hardware.device if isinstance(device, pyVmomi.vim.vm.device.VirtualEthernetCard)][0],
),
]
devices[0].device.backing = backing
task = vm.ReconfigVM_Task(pyVmomi.vim.vm.ConfigSpec(deviceChange=devices))
wait_for_task_completion_vmware(task)
if task.info.state != pyVmomi.vim.TaskInfo.State.success:
raise Exception("Failed to reconfigure networking on VM %s" % asset['provision']['hostname'])
update = extract_asset_vmware(parent_asset, asset, cluster, vm)
update['log'] = 'Reconfigured network port'
pyVim.connect.Disconnect(si)
return asset_update(asset, update)
def reconfigure_network_port_ovirt(asset):
if asset['provisioning']:
network_serializer = NetworkSerializer.get_by_domain_install(domain=asset['parent'])
else:
network_serializer = NetworkSerializer.get_by_asset_vlan(domain=asset['parent'], vlan=asset['provision']['vlan'])
parent_asset = AssetSerializer.get(service_tag=asset['parent'])
api, datacenter, cluster = connect_hypervisor_ovirt(parent_asset)
vm = api.vms.get(id=vm_id_ovirt(asset))
nic = vm.nics.list()[0]
new_network = ovirtsdk.xml.params.Network(id=api.networks.get(name=network_serializer['asset_name']).id)
if new_network.id != nic.network.id:
nic.network = new_network
nic.update()
update = extract_asset_ovirt(parent_asset, asset, api, vm)
update['log'] = 'Reconfigured network port'
api.disconnect()
return asset_update(asset, update)
def reconfigure_network_port_ansible(switch_asset, url, asset):
switch = url.netloc.split("@")[-1]
ansible_asset = asset_get(asset['service_tag'])
if asset.get('provisioning', False) or asset.get('maintenance', False):
if 'provision' not in ansible_asset:
ansible_asset['provision'] = {}
elif 'vlan' not in ansible_asset['provision']:
ansible_asset['provision']['vlan'] = {}
ansible_asset['provision']['vlan']['network'] = NetworkSerializer.get_by_domain_install(domain=switch_asset['switch']['domain'])
ansible_asset['provision'].pop('vlans', None)
elif 'provision' in ansible_asset:
if 'vlan' in ansible_asset['provision']:
ansible_asset['provision']['vlan']['network'] = NetworkSerializer.get_by_asset_vlan(domain=switch_asset['switch']['domain'], vlan=ansible_asset['provision']['vlan'])
for vlan in ansible_asset['provision'].get('vlans', []):
vlan['network'] = NetworkSerializer.get_by_asset_vlan(domain=switch_asset['switch']['domain'], vlan=vlan)
additional_vlans = []
conn = get_connection()
# Automatically add networks for hypervisors
try:
cluster_asset = r.table('assets'). \
filter({'state': 'in-use', 'asset_type': 'vmcluster'}). \
filter(lambda x: x['hypervisors'].contains(asset['service_tag'])). \
nth(0).run(conn)
except:
pass
else:
additional_vlans = list(r.table('networks').filter(
lambda network: network['domains'].has_fields(
cluster_asset['service_tag']
)
).merge(
lambda network: {'asset_domain': network['domains'][switch_asset['switch']['domain']]}
).run(conn))
if asset['asset_type'] == 'network' and 'network' in asset and 'device' in asset['network']:
remote_domain = switch_asset['switch']['domain']
additional_vlans = list(r.table('networks').filter(
lambda network: network['domains'].has_fields(
remote_domain
) &
network['domains'].has_fields(
asset['network']['device']
)
).merge(
lambda network: {'asset_domain': network['domains'][remote_domain]}
).run(conn))
elif asset['asset_type'] == 'network' and 'switch' in asset and 'domain' in asset['switch']:
remote_domain = switch_asset['switch']['domain']
additional_vlans = list(r.table('networks').filter(
lambda network: network['domains'].has_fields(
remote_domain
) &
network['domains'].has_fields(
asset['switch']['domain']
)
).merge(
lambda network: {'asset_domain': network['domains'][remote_domain]}
).run(conn))
run_playbook(ansible_asset, url.path.lstrip("/") + "reconfigure.yml",
switch=switch, extra_vars={
'switch_asset': switch_asset,
'url': url,
'additional_vlans': additional_vlans,
})
@shared_task
def reconfigure_network_port(asset):
if asset['asset_type'] in ('server', 'network', 'storage'):
domains = set(map(lambda x: x.value, jsonpath_rw_ext.parse('$.nics[*].remote.domain').find(asset)))
for domain in domains:
switch_asset = AssetSerializer.filter(switch={'domain': domain}).next()
url = urlparse.urlparse(switch_asset['url'])
if url.scheme == 'ansible':
reconfigure_network_port_ansible(switch_asset, url, asset)
else:
raise Exception("Unknown switch URL scheme for %s" % switch_asset['service_tag'])
elif asset['asset_type'] == 'vm':
if asset['asset_subtype'] == 'vmware':
asset = reconfigure_network_port_vmware(asset)
elif asset['asset_subtype'] == 'ovirt':
asset = reconfigure_network_port_ovirt(asset)
elif asset['asset_subtype'] == 'libvirt':
asset = reconfigure_network_port_libvirt(asset)
return asset
ansible_password_hider = re.compile(r'password=([^ ]+)')
@shared_task
def run_playbook(asset, playbook, **kwargs):
extra_vars = {'asset': asset}
if hasattr(settings, 'ANSIBLE_EXTRA_VARS'):
extra_vars.update(settings.ANSIBLE_EXTRA_VARS)
if 'extra_vars' in kwargs:
extra_vars.update(kwargs['extra_vars'])
hosts = [asset.get('provision', {}).get('hostname', asset['service_tag'] + '.' + settings.SOCRATES_OOB_DOMAIN)]
#clause to run with switch as host if passed to function
if 'switch' in kwargs:
hosts = [kwargs.pop('switch')]
template = NamedTemporaryFile(delete=False)
#clause to include template if passed to function
if 'template' in kwargs:
template.write(kwargs.pop('template'))
extra_vars['ev_template'] = template.name
template.close()
else:
template.close()
extra_vars_temp = NamedTemporaryFile(delete=False, suffix='.json')
json.dump(extra_vars, extra_vars_temp, cls=JSONEncoder)
extra_vars_temp.close()
if settings.ANSIBLE_PLAYBOOK_DIR is None:
return asset
p = subprocess.Popen([
"ansible-playbook",
"-i", ",".join(hosts) + ",",
"-e", "@" + extra_vars_temp.name,
os.path.join(settings.ANSIBLE_PLAYBOOK_DIR, playbook)
], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
prefix = "%s: %s: " % (asset['service_tag'], playbook)
for line in stdout.splitlines():
if line:
line = ansible_password_hider.sub("password=HIDDEN", line)
logger.info(prefix + line)
for line in stderr.splitlines():
if line:
line = ansible_password_hider.sub("password=HIDDEN", line)
logger.error(prefix + line)
if p.returncode != 0:
raise Exception("Playbook run of %s failed on %s with %d" % (playbook, asset['service_tag'], p.returncode))
os.remove(extra_vars_temp.name)
os.remove(template.name)
return asset
@shared_task
def run_playbook_with_output(*args, **kwargs):
output = NamedTemporaryFile(delete=False)
output.close()
if 'extra_vars' not in kwargs:
kwargs['extra_vars'] = {}
kwargs['extra_vars']['socrates_output'] = output.name
run_playbook(*args, **kwargs)
f = open(output.name)
data = json.load(f)
f.close()
os.unlink(output.name)
return data
def get_ipam(asset, forward_user=True, username=None):
if forward_user and username is None:
history = HistorySerializer.filter({'object': {'id': asset['id']}})
for entry in sorted(history, key=lambda x: x['object']['version'], reverse=True):
if 'username' in entry and entry['username']:
username = entry['username']
break
return import_string(settings.SOCRATES_IPAM)(settings, username)
@shared_task
def add_to_dns(asset, old_asset=None):
ipam = get_ipam(asset)
update = {'log': 'Provisioned to DNS', 'provision': copy.deepcopy(asset['provision'])}
changed = False
old_vlans = {}
now_vlans = dict(map(lambda x: (x['suffix'], x), filter(lambda x: 'suffix' in x, update['provision'].get('vlans', []))))
if old_asset is not None and 'provision' in old_asset and 'cidr' in old_asset['provision'].get('vlan', ''):
# Handle moves to a new network
if asset['provision']['vlan']['cidr'] != old_asset['provision']['vlan']['cidr']:
changed = True
old_network = NetworkSerializer.get_by_asset_vlan(old_asset, old_asset['provision']['vlan'])
if 'ip' in old_asset['provision']['vlan']:
ipam.ip_address_remove(old_network, asset, old_asset['provision']['hostname'], old_asset['provision']['vlan']['ip'])
new_network = NetworkSerializer.get_by_asset_vlan(asset, asset['provision']['vlan'])
kwargs = {}
if 'ip' in asset['provision']['vlan'] and ipv4_network_contains(asset['provision']['vlan']['cidr'], asset['provision']['vlan']['ip']):
kwargs['ip'] = asset['provision']['vlan']['ip']
if 'ports' in asset['provision']['vlan']:
kwargs['mac'] = [nic['mac'] for nic in asset['nics'] if nic['name'] in asset['provision']['vlan']['ports']]
else:
kwargs['mac'] = [nic['mac'] for nic in asset['nics']]
update['provision']['vlan']['ip'] = ipam.ip_address_allocate(new_network, asset, asset['provision']['hostname'], **kwargs)
# Handle changing hostname
elif asset['provision']['hostname'] != old_asset['provision']['hostname']:
network = NetworkSerializer.get_by_asset_vlan(asset, asset['provision']['vlan'])
ipam.ip_address_update(network, asset, asset['provision']['hostname'], asset['provision']['vlan']['ip'])
for cidr, vlan in now_vlans.iteritems():
network = NetworkSerializer.get_by_asset_vlan(asset, vlan)
shortname, domain = asset['provision']['hostname'].split(".", 1)
hostname = "%s%s.%s" % (shortname, vlan['suffix'], domain)
ipam.ip_address_update(network, asset, hostname, vlan['ip'])
# Remove old additional VLANs
old_vlans = dict(map(lambda x: (x['suffix'], x), filter(lambda x: 'suffix' in x, old_asset['provision'].get('vlans', []))))
for suffix in set(old_vlans.keys()) - set(now_vlans.keys()):
vlan = old_vlans[suffix]
network = NetworkSerializer.get_by_asset_vlan(old_asset, vlan)
shortname, domain = old_asset['provision']['hostname'].split(".", 1)
hostname = "%s%s.%s" % (shortname, vlan['suffix'], domain)
ipam.ip_address_remove(network, asset, hostname, vlan['ip'])
else:
# Add new host
changed = True
network = NetworkSerializer.get_by_asset_vlan(asset, asset['provision']['vlan'])
kwargs = {}
if 'ip' in asset['provision']['vlan']:
kwargs['ip'] = asset['provision']['vlan']['ip']
if 'ports' in asset['provision']['vlan']:
kwargs['mac'] = [nic['mac'] for nic in asset['nics'] if nic['name'] in asset['provision']['vlan']['ports']]
else:
kwargs['mac'] = [nic['mac'] for nic in asset['nics']]
update['provision']['vlan']['ip'] = ipam.ip_address_allocate(network, asset, asset['provision']['hostname'], **kwargs)
# Add new additional VLANs
for suffix in set(now_vlans.keys()) - set(old_vlans.keys()):
changed = True
vlan = now_vlans[suffix]
network = NetworkSerializer.get_by_asset_vlan(asset, vlan)
shortname, domain = asset['provision']['hostname'].split(".", 1)
hostname = "%s%s.%s" % (shortname, vlan['suffix'], domain)
kwargs = {}
if 'ip' in vlan:
kwargs['ip'] = vlan['ip']
if 'ports' in vlan and vlan.get('native', False):
kwargs['mac'] = [nic['mac'] for nic in asset['nics'] if nic['name'] in vlan['ports']]
vlan['ip'] = ipam.ip_address_allocate(network, asset, hostname, **kwargs)
if changed:
asset = asset_update(asset, update)
now_aliases = set(asset['provision'].get('aliases', []))
old_aliases = set()
if old_asset is not None and 'provision' in old_asset:
old_aliases = set(old_asset['provision'].get('aliases', []))
for alias in now_aliases - old_aliases:
ipam.cname_add(asset, alias, asset['provision']['hostname'])
for alias in old_aliases - now_aliases:
ipam.cname_remove(asset, alias)
return asset
def remove_ip_from_asset(asset):
new_asset = copy.deepcopy(asset)
if 'provision' in asset and 'vlan' in asset['provision'] and 'ip' in asset['provision']['vlan']:
new_asset['provision']['vlan'] = dict([(key, val) for key, val in asset['provision']['vlan'].iteritems() if key != "ip"])
new_asset['provision']['vlans'] = [dict([(key, val) for key, val in vlan.iteritems() if key != "ip"]) for vlan in asset['provision'].get('vlans', [])]
new_asset['log'] = 'Removed from DNS'
return asset_replace(asset, new_asset)
@shared_task
def remove_from_dns(asset):
ipam = get_ipam(asset)
for vlan in [asset['provision'].get('vlan', {})] + asset['provision'].get('vlans', []):
if 'cidr' in vlan:
network = NetworkSerializer.get_by_asset_vlan(asset, vlan)
else:
network = None
shortname, domain = asset['provision']['hostname'].split(".", 1)
hostname = "%s%s.%s" % (shortname, vlan.get('suffix', ""), domain)
ipam.ip_address_remove(network, asset, hostname, vlan.get('ip', None))
for alias in asset['provision'].get('aliases', []):
ipam.cname_remove(asset, alias)
return remove_ip_from_asset(asset)
def remove_vm_service_tags(asset, present_service_tags):
conn = get_connection()
all_vms = set([vm_asset['service_tag'] for vm_asset in
r.table('assets').get_all(asset['service_tag'], index='parent').filter(lambda asset: asset['state'] != "deleted").pluck("service_tag").run(conn)
])
for removed_vm in all_vms - present_service_tags:
asset = AssetSerializer.get(service_tag=removed_vm)
update = {
'state': 'deleted',
'provisioning': False,
'log': 'Removed VM',
}
asset_update(asset, update)
def connect_hypervisor_vmware(parent_asset):
url = urlparse.urlparse(parent_asset['url'])
sslcontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
if hasattr(settings, 'SOCRATES_VMWARE_CERT_VERIFY') and settings.SOCRATES_VMWARE_CERT_VERIFY:
sslcontext.verify_mode = ssl.CERT_REQUIRED
else:
sslcontext.verify_mode = ssl.CERT_NONE
si = pyVim.connect.SmartConnect(host=url.netloc, user=settings.SOCRATES_VMWARE_USERNAME, pwd=settings.SOCRATES_VMWARE_PASSWORD, sslContext=sslcontext)
vcenter = si.RetrieveContent()
_, datacenter_name, cluster_name = url.path.split("/")
datacenter = filter(lambda x: x.name == datacenter_name, vcenter.rootFolder.childEntity)[0]
cluster = filter(lambda x: x.name == cluster_name, datacenter.hostFolder.childEntity)[0]
return si, vcenter, datacenter, cluster
def wait_for_task_completion_vmware(tasks):
if not isinstance(tasks, list):
tasks = [tasks]
while any([task.info.state in (pyVmomi.vim.TaskInfo.State.queued, pyVmomi.vim.TaskInfo.State.running) for task in tasks]):
time.sleep(5)
return tasks
def find_network_name_by_backing_vmware(cluster, backing):
for possible_network in cluster.network:
if (isinstance(backing, pyVmomi.vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo) and
isinstance(possible_network, pyVmomi.vim.dvs.DistributedVirtualPortgroup)):
if (possible_network.config.distributedVirtualSwitch.uuid == backing.port.switchUuid and
possible_network.key == backing.port.portgroupKey):
return possible_network.name
elif (isinstance(backing, pyVmomi.vim.vm.device.VirtualEthernetCard.NetworkBackingInfo) and
isinstance(possible_network, pyVmomi.vim.Network)):
if possible_network.name == backing.network.name:
return possible_network.name
return None
def find_network_by_vlan_vmware(cluster, network_serializer):
pg_lookup = {}
for pg in cluster.host[0].config.network.portgroup:
pg_lookup[pg.spec.name] = pg.spec.vlanId
for possible_network in cluster.network:
if isinstance(possible_network, pyVmomi.vim.dvs.DistributedVirtualPortgroup):
if possible_network.config.defaultPortConfig.vlan.vlanId == network_serializer['asset_domain']['vlan_id']:
return possible_network, pyVmomi.vim.VirtualEthernetCardDistributedVirtualPortBackingInfo(
port=pyVmomi.vim.DistributedVirtualSwitchPortConnection(
portgroupKey=possible_network.key,
switchUuid=possible_network.config.distributedVirtualSwitch.uuid,
)
)
elif isinstance(possible_network, pyVmomi.vim.Network):
if possible_network.name not in pg_lookup.keys():
continue
if pg_lookup[possible_network.name] == network_serializer['asset_domain']['vlan_id']:
return possible_network, pyVmomi.vim.VirtualEthernetCardNetworkBackingInfo(
deviceName=possible_network.name,
network=possible_network,
)
else:
raise Exception("Network %s couldn't | |
is listening
self.isActive = False #Is this the current patch?
# Has this channel been disabled?
# Usually should only happen if another channel is
# listening for a new trigger/fader
self.isDisabled = False
self.errmsg = None #Returns message if something goes wrong
# Various Tk frames to get the UI element placements right.
self.container = ttk.Frame(container)
self.bottomside = ttk.Frame(self.container)
self.rightside = ttk.Frame(self.bottomside)
self.faderbuttons = ttk.Frame(self.rightside)
self.triggerbuttons = ttk.Frame(self.rightside)
self.type = type
# Channel label
if self.type == COL_NORMAL:
self.gui_channellabel = ttk.Label(self.rightside,
text='CH ' + str(channel),
justify=LEFT)
else:
self.gui_channellabel = ttk.Label(self.rightside,
text='PAD', justify=LEFT)
self.gui_channellabel.grid(column=0, row=0, sticky=(W))
# Status "LED" indicator.
# Grey=Not Configured/Disabled
# Red=Error
# Dark Green=Inactive
# Light Green=Active
# Yellow=Listening
self.gui_led = tkinter.Label(self.rightside, width=2, bg='grey')
self.gui_led.grid(column=1, row=0, sticky=(E))
# Fader label (Looks like: "F: CC128")
self.gui_faderlabel = ttk.Label(self.rightside, text='F:')
self.gui_faderlabel.grid(column=0, row=1, sticky=(E))
self.gui_fadervalue = ttk.Label(self.rightside, text='N/A', width=6)
self.gui_fadervalue.grid(column=1, row=1, sticky=(W))
# Fader listen button
# This is kinda crazy here. We want to handle these buttons
# with the RowElement. This makes saving/disabling/re-enabling
# all the columns at once much easier This gives our class
# access to the main loop to hit the upper callback function
# for all button events. Lambda is used here to pass args. to
# our callback. Sorry these lines break PEP-8 line length rules.
self.gui_faderlisten = ttk.Button(self.faderbuttons,
text='L', width=1,
command=lambda:callback('F', channel, self.gui_faderlisten))
# This button clears any fader bindings
self.gui_faderclear = ttk.Button(self.faderbuttons, text='X',
width=1,
command=self.deleteFader)
self.gui_faderlisten.grid(column=0, row=0)
self.gui_faderclear.grid(column=1, row=0)
self.faderbuttons.grid(column=0, row=2, columnspan=2)
self.gui_padchannel = ttk.Entry(self.rightside, width=3,
validate='key',
validatecommand=updatePadChannel)
# Trigger listen buttons / labels
# If this channel isn't a pad channel...
if self.type == COL_NORMAL:
# Trigger Label (Looks like: "T: CC48")
self.gui_triggerlabel = ttk.Label(self.rightside, text='T:')
self.gui_triggerlabel.grid(column=0, row=3, sticky=(E))
self.gui_triggervalue = ttk.Label(self.rightside,
text='N/A', width=6)
self.gui_triggervalue.grid(column=1, row=3, sticky=(W))
self.gui_triggerlisten = ttk.Button(self.triggerbuttons,
text='L', width=1,
command=lambda:callback('T', channel, self.gui_triggerlisten)) # Haha, passing the button itself as an argument! How meta!
self.gui_triggerclear = ttk.Button(self.triggerbuttons,
text='X', width=1,
command=self.deleteTrigger)
self.gui_triggerlisten.grid(column=0, row=0)
self.gui_triggerclear.grid(column=1, row=0)
self.triggerbuttons.grid(column=0, row=4, columnspan=2)
# If it's a pad channel, we don't need to trigger it, so those
# UI elements aren't displayed.
else:
# Literally just the word "Channel" above the entry box
self.gui_channelBoxHint = ttk.Label(self.rightside,
text='Channel',
justify=CENTER)
self.gui_channelBoxHint.grid(column=0, row=3, columnspan=2)
self.gui_padchannel.grid(column=0, row=4, columnspan=2)
self.rightside.grid(column=1, row=1, sticky=(E))
# Separator between columns
ttk.Separator(self.bottomside, orient=VERTICAL).grid(column=0,
row=1,
sticky=(N,S),
padx=LAYOUT_PAD_X)
self.bottomside.pack()
self.container.grid(row=0, column=(channel-1))
# This is a list of all the elements that could be hidden when
# SwitchBox goes into "minimized" mode.
self.minimizeableElements = [self.faderbuttons,
self.triggerbuttons,
self.gui_faderlabel,
self.gui_fadervalue]
if self.type == COL_NORMAL:
self.minimizeableElements.extend([self.gui_triggerlabel,
self.gui_triggervalue])
else:
self.minimizeableElements.extend([self.gui_padchannel,
self.gui_channelBoxHint])
# Give an update before we finish initializing.
self.checkStatus()
"""Updates the status and reports back any errors.
Basically makes the blinkenlights show the right colors, and
updates the labels
"""
def checkStatus(self):
self.errmsg = None
if not self.listening:
self.gui_faderlisten['text'] = 'L'
if self.type == COL_NORMAL:
self.gui_triggerlisten['text'] = 'L'
if self.listening:
self.gui_led['bg'] = YELLOW
self.errmsg = 'is listening'
logging.info('Is Listening')
# If a channel has no fader and no trigger, that's fine. It
# just won't do anything.
elif self.fader is None and self.trigger is None:
self.gui_led['bg'] = GRAY
elif self.isDisabled:
self.gui_led['bg'] = GRAY
elif (self.type == COL_PAD and
self.padchannel is None and
self.fader is None):
self.gui_led['bg'] = GRAY
# A channel has to have a trigger. If there's also a fader
# paired, it can't be re-routed, which is a problem.
elif self.trigger is None and self.type == COL_NORMAL:
self.errmsg = 'has a Fader, but no Trigger'
self.gui_led['bg'] = RED
# A pad channel only works when it has a channel number assigned
# Otherwise, it wouldn't make sense to have a fader paired
# since it would never be re-routed.
elif (self.type == COL_PAD and
self.padchannel is None and
self.fader is not None):
self.errmsg = 'has a Fader, but no Pad Channel.'
self.gui_led['bg'] = RED
# Because pad channels are always active.
elif self.type == COL_PAD:
self.gui_led['bg'] = LIGHTGREEN
elif self.isActive:
self.gui_led['bg'] = LIGHTGREEN
else:
self.gui_led['bg'] = GREEN
# This grays out widgets if current channel is disabled.
if self.isDisabled:
self.gui_faderclear['state'] = DISABLED
self.gui_faderlisten['state'] = DISABLED
if self.type == COL_NORMAL:
self.gui_triggerclear['state'] = DISABLED
self.gui_triggerlisten['state'] = DISABLED
else:
self.gui_padchannel['state'] = DISABLED
else:
self.gui_faderclear['state'] = NORMAL
self.gui_faderlisten['state'] = NORMAL
if self.type == COL_NORMAL:
self.gui_triggerclear['state'] = NORMAL
self.gui_triggerlisten['state'] = NORMAL
else:
self.gui_padchannel['state'] = NORMAL
if self.type == COL_NORMAL:
if self.trigger is None:
self.gui_triggervalue['text'] = 'N/A'
else:
self.gui_triggervalue['text'] = 'CC' + str(self.trigger)
if self.fader is None:
self.gui_fadervalue['text'] = 'N/A'
else:
self.gui_fadervalue['text'] = 'CC' + str(self.fader)
""" Button handler for when the "delete" button is pressed on a
trigger binding.
"""
def deleteTrigger(self):
self.trigger = None
self.listening = False
self.checkStatus()
self.callback('T', self.channel, self.gui_triggerclear)
"""Same thing but for faders.
"""
def deleteFader(self):
self.fader = None
self.listening = False
self.checkStatus()
self.callback('F', self.channel, self.gui_faderclear)
"""Puts this ColumnElement into a "minimized" state.
This makes the column more compact by only showing essential info.
"""
def minimize(self):
for elements in self.minimizeableElements:
elements.grid_remove()
"""The opposite of the above function.
"""
def maximize(self):
for elements in self.minimizeableElements:
elements.grid()
"""The App class is basically a Tkinter frame that runs the whole show.
It contains all the RowElements and handles "top-level" operations,
like scanning MIDI ports, keyboard shortcuts, and XML read/write
operations.
"""
class App(ttk.Frame):
"""Initialize the App
Arguments:
master -- A Tk Frame that holds the App. Most likely a top-level
window.
"""
def __init__(self, master):
ttk.Frame.__init__(self, master)
self.menu = Menu(self.master)
helpmenu = Menu(self.menu)
helpmenu.add_command(label='SwitchBox Help',
command=self.help, accelerator='F1')
self.bind_all('<F1>', self.help)
# If on a Mac, make the "About" menu show up in the menu with
# the application's name in it (Apple menu). Otherwise, make it
# show up in the "Help" menu.
if isaMac:
applemenu = Menu(self.menu, name='apple')
applemenu.add_command(label='About SwitchBox',
command=self.on_about_action)
self.menu.add_cascade(menu=applemenu)
else:
helpmenu.add_command(label='About SwitchBox',
command=self.on_about_action)
self.menu.add_cascade(menu=helpmenu, label='Help')
master.config(menu=self.menu)
# Sets up keyboard shortcuts
if isaMac:
master.bind('<Mod1-w>', self.onApplicationClose)
else:
master.bind('<Control-q>', self.onApplicationClose)
master.bind('<Control-w>', self.onApplicationClose)
# Set up window close event handler
master.protocol('WM_DELETE_WINDOW', self.onApplicationClose)
self.rowlist = []
# Holds the RowElements and "top bar"
self.windowUpper = ttk.Frame(self.master)
# Holds the horizontal separator and +/- buttons
self.windowLower = ttk.Frame(self.master)
# Holds maximize/minimize button and status text
self.topbar = ttk.Frame(self.windowUpper)
# Holds +/- buttons
self.bottombar = ttk.Frame(self.windowLower)
# Maximize/Minimize button
self.isExpanded = True
self.expand = ttk.Button(self.topbar, text='Minimize',
command=self.on_expand_pressed)
self.expand.grid(column=0, row=0, padx=LAYOUT_PAD_X,
pady=LAYOUT_PAD_Y)
# Status text on topbar
self.gui_errmsg = ttk.Label(self.topbar)
self.gui_errmsg.grid(column=1, row=0, padx=LAYOUT_PAD_X,
pady=LAYOUT_PAD_Y)
self.topbar.grid(column=0, row=0, sticky=(W,E))
# Buttons for adding/removing a slot
self.gui_add = ttk.Button(self.bottombar, text='+', width=1,
command=self.addRow)
self.gui_sub = ttk.Button(self.bottombar, text='-', width=1,
command=self.delRow)
self.gui_add.grid(column=1, row=0, padx=LAYOUT_PAD_X,
pady=LAYOUT_PAD_Y)
self.gui_sub.grid(column=2, row=0, pady=LAYOUT_PAD_Y)
# Bottom separator to divide RowElements from +/- buttons
ttk.Separator(self.windowLower,
orient=HORIZONTAL).pack(fill='x',
padx=LAYOUT_PAD_X)
self.bottombar.pack(fill='x')
self.readState()
# Prevents you from deleting rows when there's only one left.
if len(self.rowlist) <= 1:
self.gui_sub['state'] = DISABLED
self.windowUpper.pack()
self.windowLower.pack(fill='x')
self.myXML = self.myTree.getroot()
logging.info('About to enter loop!')
master.after(INTERVAL_CHECKNEW_MS, self.onUpdateTick)
"""Load savefile from XML file
"""
def readState(self):
# Try to open file; Creates a brand new one if it can't
try:
self.myTree = etree.ElementTree(file=PATH_CURRENT_XML)
logging.info('Successfully read XML')
except:
logging.warning('Cannot read savefile; Creating new file')
myRoot = etree.Element('swr')
myRoot.set('title', 'Auto-Generated Save File')
etree.SubElement(myRoot, 'row')
self.myTree = etree.ElementTree(element=myRoot)
self.myTree.write(PATH_CURRENT_XML, pretty_print=True)
# This is where the XML loading magic happens
for rows in self.myTree.getroot():
logging.info('Reading row from XML...')
# Pass the XML element to the | |
#!/usr/bin/env python
"""create.py - create the world.
"""
from __future__ import print_function
import argparse
from builtins import input
import codecs
from munch import DefaultMunch
import os
import sys
import yaml
from utils import io, templater
# Environment variable used to hold the OKD admin and developer passwords.
OKD_ADMIN_PASSWORD_ENV = 'TF_VAR_okd_<PASSWORD>'
OKD_DEVELOPER_PASSWORD_ENV = 'TF_VAR_okd_developer_password'
OKD_KEYPAIR_NAME_ENV = 'TF_VAR_keypair_name'
OKD_CERTBOT_EMAIL_ENV = 'TF_VAR_master_certbot_email'
OKD_DEPLOYMENTS_DIRECTORY = io.get_deployments_directory()
OKD_DEFAULT_CLUSTER_SSH_USER = 'centos'
# The list of supported deployment configuration file versions.
# The config file contains a version number, we only handle
# those that are in this list.
#
# As soon as backwards support is lost (an unsupported)
# the version must be removed form the list ans replaced by a new one.
SUPPORTED_DEPLOYMENT_VERSIONS = [1]
def _main(cli_args, chosen_deployment_name):
"""Deployment entry point.
:param cli_args: The command-line arguments
:type cli_args: ``list``
:param chosen_deployment_name: The deployment file
:type chosen_deployment_name: ``str``
:returns: True on success
:rtype: ``bool``
"""
config_file = os.path.join(OKD_DEPLOYMENTS_DIRECTORY,
chosen_deployment_name,
io.get_deployment_config_filename(
chosen_deployment_name))
if not os.path.isfile(config_file):
print('Config file does not exist ({})'.
format(chosen_deployment_name))
return False
with codecs.open(config_file, 'r', 'utf8') as stream:
deployment = DefaultMunch.fromDict(yaml.load(stream))
# First check:
# is the version present
# and do we support it?
if 'version' not in deployment:
print('The deployment configuration has no version.')
return False
if deployment.version not in SUPPORTED_DEPLOYMENT_VERSIONS:
supported_versions = str(SUPPORTED_DEPLOYMENT_VERSIONS[0])
for version in SUPPORTED_DEPLOYMENT_VERSIONS[1:]:
supported_versions += ', {}'.format(version)
print('The deployment configuration file version ({})'
' is not supported.'.format(deployment.version))
print('Supported versions are: {}'.format(supported_versions))
return False
# There must be an okd/inventories directory
inventory_dir = deployment.okd.inventory_dir
if not os.path.isdir('okd/inventories/{}'.format(inventory_dir)):
print('Missing "okd/inventories" directory')
print('Expected to find the inventory directory "{}"'
' but it was not there.'.format(inventory_dir))
print('Every deployment must have an "inventories" directory')
return False
# If the cluster SSH user is not defined,
# insert it.
if 'ssh_user' not in deployment.cluster:
print('Setting default SSH user "{}"'.
format(OKD_DEFAULT_CLUSTER_SSH_USER))
deployment.cluster.ssh_user = OKD_DEFAULT_CLUSTER_SSH_USER
# -----
# Hello
# -----
io.banner(deployment.name, full_heading=True, quiet=False)
if not cli_args.auto_acknowledge and not cli_args.just_plan:
# Display the orchestration description
# (f there is one)
if deployment.description:
io.description(deployment.description)
confirmation_word = io.get_confirmation_word()
target = 'CREATE the Cluster' \
if cli_args.cluster else 'INSTALL OpenShift/OKD'
confirmation = input('Enter "{}" to {}: '.
format(confirmation_word, target))
if confirmation != confirmation_word:
print('Phew! That was close!')
return True
# Some key information...
okd_admin_password = os.environ.get(OKD_ADMIN_PASSWORD_ENV)
if not okd_admin_password:
io.error('You must define {}'.format(OKD_ADMIN_PASSWORD_ENV))
okd_api_hostname = deployment.cluster.public_hostname
okd_api_port = deployment.cluster.api_port
# -------
# Ansible (A specific version)
# -------
# Install the ansible version name in the deployment file
cmd = 'pip install --upgrade pip setuptools --user'
rv, _ = io.run(cmd, '.', cli_args.quiet)
if not rv:
return False
cmd = 'pip install ansible=={} --user'. \
format(deployment.okd.ansible_version)
rv, _ = io.run(cmd, '.', cli_args.quiet)
if not rv:
return False
t_dir = deployment.cluster.terraform_dir
if cli_args.cluster:
# ------
# Render (jinja2 files)
# ------
# Translate content of Jinja2 template files
# using the deployment configuration's YAML file content.
if not cli_args.skip_rendering:
cmd = './render.py {} --ssh-user {}'.\
format(chosen_deployment_name,
deployment.cluster.ssh_user)
cwd = '.'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# If the deployment file has a 'my_machines' section
# then we assume the user's provided their own cluster
# and the Terraform step is not needed.
if 'my_machines' in deployment:
# -----------------
# Manual Templating
# -----------------
# The user has provided their own cluster
# and defined it in the my_machines section
# of their deployment configuration.
#
# Here we process the rendered inventory files
# just as Terraform would do.
io.banner('Templating ...')
print('inventory')
if not templater.render(deployment):
return False
print('bastion/inventory')
file_name = 'ansible/bastion/inventory.yaml.tpl'
if not templater.\
render(deployment,
template_file_name=file_name):
return False
print('post-okd/inventory')
file_name = 'ansible/post-okd/inventory.yaml.tpl'
if not templater. \
render(deployment,
template_file_name=file_name,
admin_password=okd_admin_password):
return False
else:
# ---------
# Terraform
# ---------
# Create compute instances for the cluster.
cmd = 'terraform init'
cwd = 'terraform/{}'.format(t_dir)
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# Plan or Apply?
action = 'plan' if cli_args.just_plan else 'apply -auto-approve'
cmd = 'terraform {}' \
' -state=.terraform.{}'.format(action,
chosen_deployment_name)
cwd = 'terraform/{}'.format(t_dir)
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
if cli_args.just_plan:
# Just plan means just that...
return True
# -------
# Ansible
# -------
# Run the bastion site file.
if not cli_args.skip_pre_okd:
extra_env = ''
if deployment.okd.certificates:
if deployment.okd.certificates.generate_api_cert:
certbot_email = os.environ.get(OKD_CERTBOT_EMAIL_ENV)
if not certbot_email:
io.error('You must define {}'.
format(OKD_CERTBOT_EMAIL_ENV))
return False
extra_env += ' -e master_cert_email="{}"'.\
format(certbot_email)
extra_env += ' -e public_hostname="{}"'. \
format(deployment.cluster.public_hostname)
elif (deployment.okd.certificates.wildcard_cert or
deployment.okd.certificates.master_api_cert):
# User-supplied certificates -
# expect a vault password file
# in the deployment directory
extra_env += ' --vault-password-file' \
' {}/{}/vault-pass.txt'.\
format(OKD_DEPLOYMENTS_DIRECTORY,
chosen_deployment_name)
if OKD_DEPLOYMENTS_DIRECTORY != 'deployments':
extra_env += ' -e deployments_directory="{}"'.\
format(OKD_DEPLOYMENTS_DIRECTORY)
else:
extra_env += ' -e deployments_directory="../../deployments"'
keypair_name = os.environ.get(OKD_KEYPAIR_NAME_ENV)
if not keypair_name:
io.error('You must define {}'.format(OKD_KEYPAIR_NAME_ENV))
return False
cmd = 'ansible-playbook site.yaml' \
' {}' \
' -e keypair_name={}' \
' -e inventory_dir={}' \
' -e cluster_ssh_user={}' \
' -e deployment_name={}'.format(extra_env,
keypair_name,
deployment.okd.inventory_dir,
deployment.cluster.ssh_user,
chosen_deployment_name)
cwd = 'ansible/bastion'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# Now expose the Bastion's IP...
if 'my_machines' in deployment:
# Simulate the final step in Terraform,
# i.e. exposing the bastion.
# Doing this simplifies things for the user
# i.e. "it looks and feels the same"
io.banner('terraform output ...')
print('bastion_ip = {}'.format(deployment.my_machines.bastion))
else:
cmd = 'terraform output' \
' -state=.terraform.{}'.format(chosen_deployment_name)
cwd = 'terraform/{}'.format(t_dir)
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# Leave.
return True
# If we get here we're installing OpenShift/OKD
# (on a cluster that is assumed to exist)...
# -----
# Clone (OpenShift Ansible Repo)
# -----
# ...and checkout the revision defined by the deployment tag.
if not cli_args.skip_okd:
# If the expected clone directory does not exist
# then clone OpenShift Ansible.
if not os.path.exists('openshift-ansible'):
cmd = 'git clone' \
' https://github.com/openshift/openshift-ansible.git' \
' --no-checkout'
cwd = '.'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# Checkout the required OpenShift Ansible TAG
cmd = 'git checkout tags/{}'. \
format(deployment.okd.ansible_tag)
cwd = 'openshift-ansible'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# -------
# Ansible (Pre-OKD)
# -------
if not cli_args.skip_pre_okd:
extra_env = ''
if deployment.okd.certificates and\
deployment.okd.certificates.generate_api_cert:
extra_env += ' -e public_hostname={}'. \
format(deployment.cluster.public_hostname)
cmd = 'ansible-playbook site.yaml' \
' {}' \
' -i ../../okd/inventories/{}/inventory.yaml'.\
format(extra_env, inventory_dir)
cwd = 'ansible/pre-okd'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# -------
# Ansible (OKD)
# -------
# Deploy using the playbooks named in the deployment
# (from the checked-out version).
if not cli_args.skip_okd:
for play in deployment.okd.play:
cmd = 'ansible-playbook ../openshift-ansible/playbooks/{}.yml' \
' -i inventories/{}/inventory.yaml'.\
format(play, inventory_dir)
cwd = 'okd'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# -------
# Ansible (Post-OKD)
# -------
if not cli_args.skip_post_okd:
# Always run the 'site' playbook.
# This adds the OKD admin and (optional) developer user accounts
# and other common things like template deployment.
#
# The following variables are made available to all the playbooks: -
#
# - okd_api_hostname
# - okd_admin
# - okd_admin_password
extra_env = ''
dev_password = os.environ.get(OKD_DEVELOPER_PASSWORD_ENV)
if dev_password:
extra_env += ' -e okd_developer_password={}'.format(dev_password)
# The template namespace
# (optionally defined in the configuration)
if deployment.okd.template and deployment.okd.template.namespace:
template_namespace = deployment.okd.template.namespace
extra_env += ' -e template_namespace={}'.format(template_namespace)
cmd = 'ansible-playbook site.yaml' \
'{}' \
' -e okd_api_hostname=https://{}:{}' \
' -e okd_admin=admin' \
' -e okd_admin_password={}' \
' -e okd_deployment={}'. \
format(extra_env,
okd_api_hostname, okd_api_port,
okd_admin_password, chosen_deployment_name)
cwd = 'ansible/post-okd'
rv, _ = io.run(cmd, cwd, cli_args.quiet)
if not rv:
return False
# Now iterate through the plays listed in the cluster's
# 'post_okd' list...
if deployment.okd.post_okd:
for play in deployment.okd.post_okd:
# Any user-defined 'extra' variables?
play_vars = ''
if play.vars:
for var in play.vars:
play_vars += '-e {} '.format(var)
play_vars = play_vars[:-1]
# Run the user playbook...
cmd = 'ansible-playbook playbooks/{}/deploy.yaml' \
' -e okd_api_hostname=https://{}:{}' \
' -e okd_admin=admin' \
' -e okd_admin_password={}' \
' -e okd_deployment={}' \
' {}'.\
format(play.play,
okd_api_hostname, okd_api_port,
| |
request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = analytics_admin.DeleteDisplayVideo360AdvertiserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_display_video360_advertiser_link,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
async def update_display_video360_advertiser_link(self,
request: analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest = None,
*,
display_video_360_advertiser_link: resources.DisplayVideo360AdvertiserLink = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLink:
r"""Updates a DisplayVideo360AdvertiserLink on a
property.
Args:
request (:class:`google.analytics.admin_v1alpha.types.UpdateDisplayVideo360AdvertiserLinkRequest`):
The request object. Request message for
UpdateDisplayVideo360AdvertiserLink RPC.
display_video_360_advertiser_link (:class:`google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink`):
The DisplayVideo360AdvertiserLink to
update
This corresponds to the ``display_video_360_advertiser_link`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
Required. The list of fields to be updated. Omitted
fields will not be updated. To replace the entire
entity, use one path with the string "*" to match all
fields.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLink:
A link between a GA4 property and a
Display & Video 360 advertiser.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([display_video_360_advertiser_link, update_mask])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = analytics_admin.UpdateDisplayVideo360AdvertiserLinkRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if display_video_360_advertiser_link is not None:
request.display_video_360_advertiser_link = display_video_360_advertiser_link
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_display_video360_advertiser_link,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("display_video_360_advertiser_link.name", request.display_video_360_advertiser_link.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def get_display_video360_advertiser_link_proposal(self,
request: analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLinkProposal:
r"""Lookup for a single
DisplayVideo360AdvertiserLinkProposal.
Args:
request (:class:`google.analytics.admin_v1alpha.types.GetDisplayVideo360AdvertiserLinkProposalRequest`):
The request object. Request message for
GetDisplayVideo360AdvertiserLinkProposal RPC.
name (:class:`str`):
Required. The name of the
DisplayVideo360AdvertiserLinkProposal to
get. Example format:
properties/1234/displayVideo360AdvertiserLinkProposals/5678
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal:
A proposal for a link between an GA4
property and a Display & Video 360
advertiser.
A proposal is converted to a
DisplayVideo360AdvertiserLink once
approved. Google Analytics admins
approve inbound proposals while Display
& Video 360 admins approve outbound
proposals.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = analytics_admin.GetDisplayVideo360AdvertiserLinkProposalRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_display_video360_advertiser_link_proposal,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def list_display_video360_advertiser_link_proposals(self,
request: analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDisplayVideo360AdvertiserLinkProposalsAsyncPager:
r"""Lists DisplayVideo360AdvertiserLinkProposals on a
property.
Args:
request (:class:`google.analytics.admin_v1alpha.types.ListDisplayVideo360AdvertiserLinkProposalsRequest`):
The request object. Request message for
ListDisplayVideo360AdvertiserLinkProposals RPC.
parent (:class:`str`):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.services.analytics_admin_service.pagers.ListDisplayVideo360AdvertiserLinkProposalsAsyncPager:
Response message for
ListDisplayVideo360AdvertiserLinkProposals
RPC. Iterating over this object will
yield results and resolve additional
pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError("If the `request` argument is set, then none of "
"the individual field arguments should be set.")
request = analytics_admin.ListDisplayVideo360AdvertiserLinkProposalsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_display_video360_advertiser_link_proposals,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListDisplayVideo360AdvertiserLinkProposalsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def create_display_video360_advertiser_link_proposal(self,
request: analytics_admin.CreateDisplayVideo360AdvertiserLinkProposalRequest = None,
*,
parent: str = None,
display_video_360_advertiser_link_proposal: resources.DisplayVideo360AdvertiserLinkProposal = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> resources.DisplayVideo360AdvertiserLinkProposal:
r"""Creates a DisplayVideo360AdvertiserLinkProposal.
Args:
request (:class:`google.analytics.admin_v1alpha.types.CreateDisplayVideo360AdvertiserLinkProposalRequest`):
The request object. Request message for
CreateDisplayVideo360AdvertiserLinkProposal RPC.
parent (:class:`str`):
Required. Example format:
properties/1234
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
display_video_360_advertiser_link_proposal (:class:`google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal`):
Required. The
DisplayVideo360AdvertiserLinkProposal to
create.
This corresponds to the ``display_video_360_advertiser_link_proposal`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.analytics.admin_v1alpha.types.DisplayVideo360AdvertiserLinkProposal:
A proposal for a link between an GA4
property and a Display & Video 360
advertiser.
A proposal is converted to a
DisplayVideo360AdvertiserLink once
approved. Google Analytics admins
approve inbound proposals while Display
& Video 360 admins approve outbound
proposals.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we | |
= (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/ARB/ARB_sample_locations.txt # GL_SAMPLE_LOCATION_ARB
_m[0x8E50] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_sample_locations.txt # GL_SAMPLE_LOCATION_NV
_m[0x933F] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/ARB/ARB_sample_locations.txt # GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_ARB
_m[0x933F] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_sample_locations.txt # GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV
_m[0x933E] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/ARB/ARB_sample_locations.txt # GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_ARB
_m[0x933E] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_sample_locations.txt # GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV
_m[0x933D] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/ARB/ARB_sample_locations.txt # GL_SAMPLE_LOCATION_SUBPIXEL_BITS_ARB
_m[0x933D] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_sample_locations.txt # GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV
_m[0x8E51] = (1,) # GL_SAMPLE_MASK
_m[0x80A0] = (1,) # GL_SAMPLE_MASK_EXT
_m[0x80AB] = (1,) # GL_SAMPLE_MASK_INVERT_SGIS
_m[0x8E51] = (1,) # GL_SAMPLE_MASK_NV
_m[0x80A0] = (1,) # GL_SAMPLE_MASK_SGIS
_m[0x8E52] = (1,) # GL_SAMPLE_MASK_VALUE
_m[0x80AA] = (1,) # GL_SAMPLE_MASK_VALUE_SGIS
_m[0x80AC] = (1,) # GL_SAMPLE_PATTERN_EXT
_m[0x80AC] = (1,) # GL_SAMPLE_PATTERN_SGIS
_m[0x8E50] = (2,) # GL_SAMPLE_POSITION
_m[0x8C36] = (1,) # GL_SAMPLE_SHADING_ARB
_m[0x8C36] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/OES/OES_sample_shading.txt # GL_SAMPLE_SHADING_OES
_m[0x0C10] = (4,) # GL_SCISSOR_BOX
_m[0x9556] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_scissor_exclusive.txt # GL_SCISSOR_BOX_EXCLUSIVE_NV
_m[0x0C11] = (1,) # GL_SCISSOR_TEST
_m[0x9555] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_scissor_exclusive.txt # GL_SCISSOR_TEST_EXCLUSIVE_NV
_m[0x845E] = (1,) # GL_SECONDARY_COLOR_ARRAY
_m[0x889C] = (1,) # GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING
_m[0x889C] = (1,) # GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB
_m[0x8F31] = (1,) # GL_SECONDARY_COLOR_ARRAY_LENGTH_NV
_m[0x845D] = (1,)#TODO Review http://www.opengl.org/registry/specs//EXT/secondary_color.txt # GL_SECONDARY_COLOR_ARRAY_POINTER_EXT
_m[0x845A] = (1,) # GL_SECONDARY_COLOR_ARRAY_SIZE
_m[0x845A] = (1,) # GL_SECONDARY_COLOR_ARRAY_SIZE_EXT
_m[0x845C] = (1,) # GL_SECONDARY_COLOR_ARRAY_STRIDE
_m[0x845C] = (1,) # GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT
_m[0x845B] = (1,) # GL_SECONDARY_COLOR_ARRAY_TYPE
_m[0x845B] = (1,) # GL_SECONDARY_COLOR_ARRAY_TYPE_EXT
_m[0x0DF3] = (1,) # GL_SELECTION_BUFFER_POINTER
_m[0x0DF4] = (1,) # GL_SELECTION_BUFFER_SIZE
_m[0x8012] = (1,) # GL_SEPARABLE_2D
_m[0x8012] = (1,) # GL_SEPARABLE_2D_EXT
_m[0x8DF8] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/ES2_compatibility.txt # GL_SHADER_BINARY_FORMATS
_m[0x8DFA] = (1,) # GL_SHADER_COMPILER
_m[0x82A6] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SHADER_IMAGE_ATOMIC
_m[0x82A4] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SHADER_IMAGE_LOAD
_m[0x82A5] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SHADER_IMAGE_STORE
_m[0x86DF] = (1,) # GL_SHADER_OPERATION_NV
_m[0x8F64] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/EXT/EXT_shader_pixel_local_storage.txt # GL_SHADER_PIXEL_LOCAL_STORAGE_EXT
_m[0x8B88] = (1,) # GL_SHADER_SOURCE_LENGTH
_m[0x90D2] = (1,) # GL_SHADER_STORAGE_BUFFER
_m[0x90D3] = (1,) # GL_SHADER_STORAGE_BUFFER_BINDING
_m[0x90DF] = (1,) # GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT
_m[0x90D5] = (1,) # GL_SHADER_STORAGE_BUFFER_SIZE
_m[0x90D4] = (1,) # GL_SHADER_STORAGE_BUFFER_START
_m[0x8B4F] = (1,) # GL_SHADER_TYPE
_m[0x0B54] = (1,) # GL_SHADE_MODEL
_m[0x8B8C] = (1,) # GL_SHADING_LANGUAGE_VERSION
_m[0x955B] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shading_rate_image.txt # GL_SHADING_RATE_IMAGE_BINDING_NV
_m[0x9563] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shading_rate_image.txt # GL_SHADING_RATE_IMAGE_NV
_m[0x955E] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shading_rate_image.txt # GL_SHADING_RATE_IMAGE_PALETTE_SIZE_NV
_m[0x955D] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shading_rate_image.txt # GL_SHADING_RATE_IMAGE_TEXEL_HEIGHT_NV
_m[0x955C] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shading_rate_image.txt # GL_SHADING_RATE_IMAGE_TEXEL_WIDTH_NV
_m[0x1601] = (1,) # GL_SHININESS
_m[0x82AC] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST
_m[0x82AE] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE
_m[0x82AD] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST
_m[0x82AF] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE
_m[0x0B23] = (1,) # GL_SMOOTH_LINE_WIDTH_GRANULARITY
_m[0x0B22] = (2,) # GL_SMOOTH_LINE_WIDTH_RANGE
_m[0x0B13] = (1,) # GL_SMOOTH_POINT_SIZE_GRANULARITY
_m[0x0B12] = (2,) # GL_SMOOTH_POINT_SIZE_RANGE
_m[0x933B] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/shader_thread_group.txt # GL_SM_COUNT_NV
_m[0x858B] = (1,) # GL_SOURCE3_ALPHA_NV
_m[0x8583] = (1,) # GL_SOURCE3_RGB_NV
_m[0x82F8] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/sparse_buffer.txt # GL_SPARSE_BUFFER_PAGE_SIZE_ARB
_m[0x91A9] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/sparse_texture.txt # GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_ARB
_m[0x91A9] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/EXT/EXT_sparse_texture.txt # GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_EXT
_m[0x1202] = (4,) # GL_SPECULAR
_m[0x9552] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/ARB/ARB_gl_spirv.txt # GL_SPIR_V_BINARY_ARB
_m[0x1206] = (1,) # GL_SPOT_CUTOFF
_m[0x1204] = (3,) # GL_SPOT_DIRECTION
_m[0x1205] = (1,) # GL_SPOT_EXPONENT
_m[0x814A] = (3,) # GL_SPRITE_AXIS_SGIX
_m[0x8149] = (1,) # GL_SPRITE_MODE_SGIX
_m[0x8148] = (1,) # GL_SPRITE_SGIX
_m[0x814B] = (3,) # GL_SPRITE_TRANSLATION_SGIX
_m[0x8588] = (1,) # GL_SRC0_ALPHA
_m[0x8580] = (1,) # GL_SRC0_RGB
_m[0x8589] = (1,) # GL_SRC1_ALPHA
_m[0x8581] = (1,) # GL_SRC1_RGB
_m[0x858A] = (1,) # GL_SRC2_ALPHA
_m[0x8582] = (1,) # GL_SRC2_RGB
_m[0x8299] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SRGB_DECODE_ARB
_m[0x8297] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SRGB_READ
_m[0x8298] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_SRGB_WRITE
_m[0x8801] = (1,) # GL_STENCIL_BACK_FAIL
_m[0x8801] = (1,) # GL_STENCIL_BACK_FAIL_ATI
_m[0x8800] = (1,) # GL_STENCIL_BACK_FUNC
_m[0x8800] = (1,) # GL_STENCIL_BACK_FUNC_ATI
_m[0x874D] = (1,) # GL_STENCIL_BACK_OP_VALUE_AMD
_m[0x8802] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_FAIL
_m[0x8802] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI
_m[0x8803] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_PASS
_m[0x8803] = (1,) # GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI
_m[0x8CA3] = (1,) # GL_STENCIL_BACK_REF
_m[0x8CA4] = (1,) # GL_STENCIL_BACK_VALUE_MASK
_m[0x8CA5] = (1,) # GL_STENCIL_BACK_WRITEMASK
_m[0x0D57] = (1,) # GL_STENCIL_BITS
_m[0x88F3] = (1,) # GL_STENCIL_CLEAR_TAG_VALUE_EXT
_m[0x0B91] = (1,) # GL_STENCIL_CLEAR_VALUE
_m[0x8285] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_STENCIL_COMPONENTS
_m[0x0B94] = (1,) # GL_STENCIL_FAIL
_m[0x0B92] = (1,) # GL_STENCIL_FUNC
_m[0x874C] = (1,) # GL_STENCIL_OP_VALUE_AMD
_m[0x0B95] = (1,) # GL_STENCIL_PASS_DEPTH_FAIL
_m[0x0B96] = (1,) # GL_STENCIL_PASS_DEPTH_PASS
_m[0x0B97] = (1,) # GL_STENCIL_REF
_m[0x8288] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_STENCIL_RENDERABLE
_m[0x932E] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_framebuffer_mixed_samples.txt # GL_STENCIL_SAMPLES_NV
_m[0x88F2] = (1,) # GL_STENCIL_TAG_BITS_EXT
_m[0x0B90] = (1,) # GL_STENCIL_TEST
_m[0x8910] = (1,) # GL_STENCIL_TEST_TWO_SIDE_EXT
_m[0x0B93] = (1,) # GL_STENCIL_VALUE_MASK
_m[0x0B98] = (1,) # GL_STENCIL_WRITEMASK
_m[0x0C33] = (1,) # GL_STEREO
_m[0x00000004] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_ARITHMETIC_BIT_KHR
_m[0x00000008] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_BALLOT_BIT_KHR
_m[0x00000001] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_BASIC_BIT_KHR
_m[0x00000040] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_CLUSTERED_BIT_KHR
_m[0x00000100] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_shader_subgroup_partitioned.txt # GL_SUBGROUP_FEATURE_PARTITIONED_BIT_NV
_m[0x00000080] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_QUAD_BIT_KHR
_m[0x00000010] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_SHUFFLE_BIT_KHR
_m[0x00000020] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT_KHR
_m[0x00000002] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_FEATURE_VOTE_BIT_KHR
_m[0x9535] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_QUAD_ALL_STAGES_KHR
_m[0x9532] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_SIZE_KHR
_m[0x9534] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_SUPPORTED_FEATURES_KHR
_m[0x9533] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/KHR/KHR_shader_subgroup.txt # GL_SUBGROUP_SUPPORTED_STAGES_KHR
_m[0x0D50] = (1,) # GL_SUBPIXEL_BITS
_m[0x9347] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_conservative_raster.txt # GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV
_m[0x9348] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_conservative_raster.txt # GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV
_m[0x883F] = (1,)#TODO Review http://www.opengl.org/registry/specs//AMD/sample_positions.txt # GL_SUBSAMPLE_DISTANCE_AMD
_m[0x9372] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_internalformat_sample_query.txt # GL_SUPERSAMPLE_SCALE_X_NV
_m[0x9373] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_internalformat_sample_query.txt # GL_SUPERSAMPLE_SCALE_Y_NV
_m[0x91B7] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/AMD/AMD_framebuffer_multisample_advanced.txt # GL_SUPPORTED_MULTISAMPLE_MODES_AMD
_m[0x9113] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/sync.txt # GL_SYNC_CONDITION
_m[0x9115] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/sync.txt # GL_SYNC_FLAGS
_m[0x9114] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/sync.txt # GL_SYNC_STATUS
_m[0x8439] = (1,) # GL_TANGENT_ARRAY_EXT
_m[0x8442] = (1,) # GL_TANGENT_ARRAY_POINTER_EXT
_m[0x843F] = (1,) # GL_TANGENT_ARRAY_STRIDE_EXT
_m[0x843E] = (1,) # GL_TANGENT_ARRAY_TYPE_EXT
_m[0x953F] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/NV/NV_mesh_shader.txt # GL_TASK_WORK_GROUP_SIZE_NV
_m[0x9004] = (1,) # GL_TESSELLATION_MODE_AMD
_m[0x8E75] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/tessellation_shader.txt # GL_TESS_CONTROL_OUTPUT_VERTICES
_m[0x891E] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/tessellation_program5.txt # GL_TESS_CONTROL_PROGRAM_NV
_m[0x8E88] = (1,) # GL_TESS_CONTROL_SHADER
_m[0x829C] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TESS_CONTROL_TEXTURE
_m[0x891F] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/tessellation_program5.txt # GL_TESS_EVALUATION_PROGRAM_NV
_m[0x8E87] = (1,) # GL_TESS_EVALUATION_SHADER
_m[0x829D] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TESS_EVALUATION_TEXTURE
_m[0x8E76] = (1,) # GL_TESS_GEN_MODE
_m[0x8E79] = (1,) # GL_TESS_GEN_POINT_MODE
_m[0x8E77] = (1,) # GL_TESS_GEN_SPACING
_m[0x8E78] = (1,) # GL_TESS_GEN_VERTEX_ORDER
_m[0x0DE0] = (1,) # GL_TEXTURE_1D
_m[0x8C18] = (1,) # GL_TEXTURE_1D_ARRAY
_m[0x8068] = (1,) # GL_TEXTURE_1D_BINDING_EXT
_m[0x875D] = (1,) # GL_TEXTURE_1D_STACK_BINDING_MESAX
_m[0x8759] = (1,) # GL_TEXTURE_1D_STACK_MESAX
_m[0x0DE1] = (1,) # GL_TEXTURE_2D
_m[0x8C1A] = (1,) # GL_TEXTURE_2D_ARRAY
_m[0x8069] = (1,) # GL_TEXTURE_2D_BINDING_EXT
_m[0x875E] = (1,) # GL_TEXTURE_2D_STACK_BINDING_MESAX
_m[0x875A] = (1,) # GL_TEXTURE_2D_STACK_MESAX
_m[0x806F] = (1,) # GL_TEXTURE_3D
_m[0x806A] = (1,) # GL_TEXTURE_3D_BINDING_EXT
_m[0x806F] = (1,) # GL_TEXTURE_3D_EXT
_m[0x806F] = (1,) # GL_TEXTURE_3D_OES
_m[0x814F] = (1,) # GL_TEXTURE_4D_BINDING_SGIS
_m[0x8134] = (1,) # GL_TEXTURE_4D_SGIS
_m[0x805F] = (1,) # GL_TEXTURE_ALPHA_SIZE
_m[0x8C13] = (1,) # GL_TEXTURE_ALPHA_TYPE
_m[0x834F] = (1,) # GL_TEXTURE_APPLICATION_MODE_EXT
_m[0x813C] = (1,) # GL_TEXTURE_BASE_LEVEL
_m[0x813C] = (1,) # GL_TEXTURE_BASE_LEVEL_SGIS
_m[0x8068] = (1,) # GL_TEXTURE_BINDING_1D
_m[0x8C1C] = (1,) # GL_TEXTURE_BINDING_1D_ARRAY
_m[0x8C1C] = (1,) # GL_TEXTURE_BINDING_1D_ARRAY_EXT
_m[0x8069] = (1,) # GL_TEXTURE_BINDING_2D
_m[0x8C1D] = (1,) # GL_TEXTURE_BINDING_2D_ARRAY
_m[0x8C1D] = (1,) # GL_TEXTURE_BINDING_2D_ARRAY_EXT
_m[0x9104] = (1,) # GL_TEXTURE_BINDING_2D_MULTISAMPLE
_m[0x9105] = (1,) # GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY
_m[0x806A] = (1,) # GL_TEXTURE_BINDING_3D
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER_ARB
_m[0x8C2C] = (1,) # GL_TEXTURE_BINDING_BUFFER_EXT
_m[0x8514] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP
_m[0x8514] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARB
_m[0x900A] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARRAY
_m[0x900A] = (1,) # GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB
_m[0x8D67] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/OES/OES_EGL_image_external.txt # GL_TEXTURE_BINDING_EXTERNAL_OES
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE_ARB
_m[0x84F6] = (1,) # GL_TEXTURE_BINDING_RECTANGLE_NV
_m[0x8E53] = (1,) # GL_TEXTURE_BINDING_RENDERBUFFER_NV
_m[0x805E] = (1,) # GL_TEXTURE_BLUE_SIZE
_m[0x8C12] = (1,) # GL_TEXTURE_BLUE_TYPE
_m[0x1005] = (1,) # GL_TEXTURE_BORDER
_m[0x1004] = (4,) # GL_TEXTURE_BORDER_COLOR
_m[0x1004] = (4,) # GL_TEXTURE_BORDER_COLOR_NV
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER_ARB
_m[0x8C2A] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/EXT/EXT_texture_buffer.txt # GL_TEXTURE_BUFFER_BINDING_EXT
_m[0x8C2A] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/OES/OES_texture_buffer.txt # GL_TEXTURE_BUFFER_BINDING_OES
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB
_m[0x8C2D] = (1,) # GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT
_m[0x8C2A] = (1,) # GL_TEXTURE_BUFFER_EXT
_m[0x8C2E] = (1,) # GL_TEXTURE_BUFFER_FORMAT_ARB
_m[0x8C2E] = (1,) # GL_TEXTURE_BUFFER_FORMAT_EXT
_m[0x919D] = (1,) # GL_TEXTURE_BUFFER_OFFSET
_m[0x919F] = (1,) # GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT
_m[0x919E] = (1,) # GL_TEXTURE_BUFFER_SIZE
_m[0x8171] = (2,) # GL_TEXTURE_CLIPMAP_CENTER_SGIX
_m[0x8176] = (1,) # GL_TEXTURE_CLIPMAP_DEPTH_SGIX
_m[0x8172] = (1,) # GL_TEXTURE_CLIPMAP_FRAME_SGIX
_m[0x8173] = (2,) # GL_TEXTURE_CLIPMAP_OFFSET_SGIX
_m[0x8174] = (3,) # GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX
_m[0x9046] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/texture_multisample.txt # GL_TEXTURE_COLOR_SAMPLES_NV
_m[0x80BC] = (1,) # GL_TEXTURE_COLOR_TABLE_SGI
_m[0x81EF] = (4,) # GL_TEXTURE_COLOR_WRITEMASK_SGIS
_m[0x80BF] = (1,) # GL_TEXTURE_COMPARE_FAIL_VALUE_ARB
_m[0x884D] = (1,) # GL_TEXTURE_COMPARE_FUNC
_m[0x884C] = (1,) # GL_TEXTURE_COMPARE_MODE
_m[0x819B] = (1,) # GL_TEXTURE_COMPARE_OPERATOR_SGIX
_m[0x819A] = (1,) # GL_TEXTURE_COMPARE_SGIX
_m[0x86A1] = (1,) # GL_TEXTURE_COMPRESSED
_m[0x82B2] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT
_m[0x82B3] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TEXTURE_COMPRESSED_BLOCK_SIZE
_m[0x82B1] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TEXTURE_COMPRESSED_BLOCK_WIDTH
_m[0x86A0] = (1,) # GL_TEXTURE_COMPRESSED_IMAGE_SIZE
_m[0x84EF] = (1,) # GL_TEXTURE_COMPRESSION_HINT
_m[0x84EF] = (1,) # GL_TEXTURE_COMPRESSION_HINT_ARB
_m[0x8078] = (1,) # GL_TEXTURE_COORD_ARRAY
_m[0x889A] = (1,) # GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING
_m[0x889A] = (1,) # GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB
_m[0x808B] = (1,) # GL_TEXTURE_COORD_ARRAY_COUNT_EXT
_m[0x8078] = (1,) # GL_TEXTURE_COORD_ARRAY_EXT
_m[0x83F8] = (1,)#TODO Review http://www.opengl.org/registry/specs//INTEL/parallel_arrays.txt # GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL
_m[0x8092] = (1,) # GL_TEXTURE_COORD_ARRAY_POINTER
_m[0x8088] = (1,) # GL_TEXTURE_COORD_ARRAY_SIZE
_m[0x8088] = (1,) # GL_TEXTURE_COORD_ARRAY_SIZE_EXT
_m[0x808A] = (1,) # GL_TEXTURE_COORD_ARRAY_STRIDE
_m[0x808A] = (1,) # GL_TEXTURE_COORD_ARRAY_STRIDE_EXT
_m[0x8089] = (1,) # GL_TEXTURE_COORD_ARRAY_TYPE
_m[0x8089] = (1,) # GL_TEXTURE_COORD_ARRAY_TYPE_EXT
_m[0x9045] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/texture_multisample.txt # GL_TEXTURE_COVERAGE_SAMPLES_NV
_m[0x8B9D] = (4,) # GL_TEXTURE_CROP_RECT_OES
_m[0x8513] = (1,) # GL_TEXTURE_CUBE_MAP
_m[0x8513] = (1,) # GL_TEXTURE_CUBE_MAP_ARB
_m[0x9009] = (1,) # GL_TEXTURE_CUBE_MAP_ARRAY
_m[0x884F] = (1,) # GL_TEXTURE_CUBE_MAP_SEAMLESS
_m[0x8071] = (1,) # GL_TEXTURE_DEPTH
_m[0x8071] = (1,) # GL_TEXTURE_DEPTH_EXT
_m[0x884A] = (1,) # GL_TEXTURE_DEPTH_SIZE
_m[0x8C16] = (1,) # GL_TEXTURE_DEPTH_TYPE
_m[0x871D] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/texture_shader.txt # GL_TEXTURE_DS_SIZE_NV
_m[0x871E] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/texture_shader.txt # GL_TEXTURE_DT_SIZE_NV
_m[0x2201] = (4,) # GL_TEXTURE_ENV_COLOR
_m[0x2200] = (1,) # GL_TEXTURE_ENV_MODE
_m[0x9107] = (1,) # GL_TEXTURE_FIXED_SAMPLE_LOCATIONS
_m[0x888C] = (1,)#TODO Review http://www.opengl.org/registry/specs//NV/float_buffer.txt # GL_TEXTURE_FLOAT_COMPONENTS_NV
_m[0x8BFD] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/QCOM/QCOM_texture_foveated.txt # GL_TEXTURE_FOVEATED_FEATURE_QUERY_QCOM
_m[0x8BFE] = (1,)#TODO Review /home/mcfletch/OpenGL-dev/pyopengl/src/khronosapi/extensions/QCOM/QCOM_texture_foveated.txt # GL_TEXTURE_FOVEATED_NUM_FOCAL_POINTS_QUERY_QCOM
_m[0x87FC] = (1,) # GL_TEXTURE_FREE_MEMORY_ATI
_m[0x82A2] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TEXTURE_GATHER
_m[0x82A3] = (1,)#TODO Review http://www.opengl.org/registry/specs//ARB/internalformat_query2.txt # GL_TEXTURE_GATHER_SHADOW
_m[0x2500] = (1,) # GL_TEXTURE_GEN_MODE
_m[0x0C63] = (1,) # GL_TEXTURE_GEN_Q
_m[0x0C62] = (1,) | |
# Copyright 2018 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import numpy as np
import six
import tensorflow as tf
from enum import Enum
from py_proto import mace_pb2
from transform import base_converter
from transform.base_converter import PoolingType
from transform.base_converter import PaddingMode
from transform.base_converter import ActivationType
from transform.base_converter import EltwiseType
from transform.base_converter import PadType
from transform.base_converter import FrameworkType
from transform.base_converter import ReduceType
from transform.base_converter import DataFormat
from transform.base_converter import MaceOp
from transform.base_converter import MaceKeyword
from transform.base_converter import ConverterUtil
from utils.util import mace_check
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.tools.graph_transforms import TransformGraph
tf_padding_str = 'padding'
tf_strides_str = 'strides'
tf_dilations_str = 'dilations'
tf_data_format_str = 'data_format'
tf_kernel_str = 'ksize'
tf_epsilon_str = 'epsilon'
tf_alpha_str = 'alpha'
tf_is_training_str = 'is_training'
tf_align_corners = 'align_corners'
tf_block_size = 'block_size'
tf_squeeze_dims = 'squeeze_dims'
tf_axis = 'axis'
TFSupportedOps = [
'Conv2D',
'DepthwiseConv2dNative',
'Conv2DBackpropInput',
'BiasAdd',
'Add',
'Sub',
'Mul',
'Div',
'Min',
'Minimum',
'Max',
'Maximum',
'Neg',
'Abs',
'Pow',
'RealDiv',
'Square',
'SquaredDifference',
'Rsqrt',
'Sum',
'Equal',
'Relu',
'LeakyRelu',
'Relu6',
'Tanh',
'Sigmoid',
'Fill',
'FusedBatchNorm',
'AvgPool',
'MaxPool',
'ExpandDims',
'Squeeze',
'MatMul',
'BatchMatMul',
'Identity',
'Reshape',
'Shape',
'Transpose',
'Softmax',
'ResizeBicubic',
'ResizeBilinear',
'ResizeNearestNeighbor',
'Placeholder',
'SpaceToBatchND',
'BatchToSpaceND',
'DepthToSpace',
'SpaceToDepth',
'Pad',
'PadV2',
'ConcatV2',
'Mean',
'Prod',
'Const',
'Gather',
'GatherV2',
'StridedSlice',
'Slice',
'ReverseV2',
'Stack',
'Pack',
'Unstack',
'Unpack',
'Cast',
'ArgMax',
'Split',
'FakeQuantWithMinMaxVars',
'FakeQuantWithMinMaxArgs',
'FloorDiv',
'Sqrt',
'MirrorPad',
'Cumsum',
'OneHot',
'Tile',
]
TFOpType = Enum('TFOpType', [(op, op) for op in TFSupportedOps], type=str)
TFSupportedOps = [six.b(op) for op in TFSupportedOps]
TFTransformGraphOptions = [
'strip_unused_nodes',
'remove_nodes(op=Identity, op=CheckNumerics)',
'fold_constants(ignore_errors=true)',
'fold_batch_norms',
'fold_old_batch_norms',
'remove_control_dependencies',
'strip_unused_nodes',
'sort_by_execution_order'
]
class TensorflowConverter(base_converter.ConverterInterface):
"""A class for convert tensorflow frozen model to mace model.
We use tensorflow engine to infer op output shapes, since they are of
too many types."""
padding_mode = {
'VALID': PaddingMode.VALID,
'SAME': PaddingMode.SAME,
'FULL': PaddingMode.FULL
}
padding_mode = {six.b(k): v for k, v in six.iteritems(padding_mode)}
pooling_type_mode = {
TFOpType.AvgPool.name: PoolingType.AVG,
TFOpType.MaxPool.name: PoolingType.MAX
}
eltwise_type = {
TFOpType.Add.name: EltwiseType.SUM,
TFOpType.Sub.name: EltwiseType.SUB,
TFOpType.Mul.name: EltwiseType.PROD,
TFOpType.Div.name: EltwiseType.DIV,
TFOpType.Minimum.name: EltwiseType.MIN,
TFOpType.Maximum.name: EltwiseType.MAX,
TFOpType.Neg.name: EltwiseType.NEG,
TFOpType.Abs.name: EltwiseType.ABS,
TFOpType.Pow.name: EltwiseType.POW,
TFOpType.RealDiv.name: EltwiseType.DIV,
TFOpType.FloorDiv.name: EltwiseType.FLOOR_DIV,
TFOpType.SquaredDifference.name: EltwiseType.SQR_DIFF,
TFOpType.Square.name: EltwiseType.POW,
TFOpType.Rsqrt.name: EltwiseType.POW,
TFOpType.Sqrt.name: EltwiseType.POW,
TFOpType.Equal.name: EltwiseType.EQUAL,
}
activation_type = {
TFOpType.Relu.name: ActivationType.RELU,
TFOpType.Relu6.name: ActivationType.RELUX,
TFOpType.Tanh.name: ActivationType.TANH,
TFOpType.Sigmoid.name: ActivationType.SIGMOID,
TFOpType.LeakyRelu.name: ActivationType.LEAKYRELU,
}
reduce_math_type = {
TFOpType.Min.name: ReduceType.MIN,
TFOpType.Max.name: ReduceType.MAX,
TFOpType.Mean.name: ReduceType.MEAN,
TFOpType.Prod.name: ReduceType.PROD,
TFOpType.Sum.name: ReduceType.SUM,
}
pad_type = {
'CONSTANT': PadType.CONSTANT,
'REFLECT': PadType.REFLECT,
'SYMMETRIC': PadType.SYMMETRIC
}
def __init__(self, option, src_model_file):
self._op_converters = {
TFOpType.Conv2D.name: self.convert_conv2d,
TFOpType.DepthwiseConv2dNative.name: self.convert_conv2d,
TFOpType.Conv2DBackpropInput.name: self.convert_conv2d,
TFOpType.BiasAdd.name: self.convert_biasadd,
TFOpType.Add.name: self.convert_add,
TFOpType.Sub.name: self.convert_elementwise,
TFOpType.Mul.name: self.convert_elementwise,
TFOpType.Div.name: self.convert_elementwise,
TFOpType.Minimum.name: self.convert_elementwise,
TFOpType.Maximum.name: self.convert_elementwise,
TFOpType.Neg.name: self.convert_elementwise,
TFOpType.Abs.name: self.convert_elementwise,
TFOpType.Pow.name: self.convert_elementwise,
TFOpType.RealDiv.name: self.convert_elementwise,
TFOpType.SquaredDifference.name: self.convert_elementwise,
TFOpType.Square.name: self.convert_elementwise,
TFOpType.Rsqrt.name: self.convert_elementwise,
TFOpType.Equal.name: self.convert_elementwise,
TFOpType.Min.name: self.convert_reduce,
TFOpType.Max.name: self.convert_reduce,
TFOpType.Mean.name: self.convert_reduce,
TFOpType.Prod.name: self.convert_reduce,
TFOpType.Relu.name: self.convert_activation,
TFOpType.LeakyRelu.name: self.convert_activation,
TFOpType.Relu6.name: self.convert_activation,
TFOpType.Tanh.name: self.convert_activation,
TFOpType.Sigmoid.name: self.convert_activation,
TFOpType.Fill.name: self.convert_fill,
TFOpType.FusedBatchNorm.name: self.convert_fused_batchnorm,
TFOpType.AvgPool.name: self.convert_pooling,
TFOpType.MaxPool.name: self.convert_pooling,
TFOpType.MatMul.name: self.convert_matmul,
TFOpType.BatchMatMul.name: self.convert_matmul,
TFOpType.Identity.name: self.convert_identity,
TFOpType.Reshape.name: self.convert_reshape,
TFOpType.Shape.name: self.convert_shape,
TFOpType.ExpandDims.name: self.convert_expand_dims,
TFOpType.Squeeze.name: self.convert_squeeze,
TFOpType.Transpose.name: self.convert_transpose,
TFOpType.Softmax.name: self.convert_softmax,
TFOpType.ResizeBicubic.name: self.convert_resize_bicubic,
TFOpType.ResizeBilinear.name: self.convert_resize_bilinear,
TFOpType.ResizeNearestNeighbor.name: self.convert_resize_nearest_neighbor, # noqa
TFOpType.Placeholder.name: self.convert_nop,
TFOpType.SpaceToBatchND.name: self.convert_space_batch,
TFOpType.BatchToSpaceND.name: self.convert_space_batch,
TFOpType.DepthToSpace.name: self.convert_space_depth,
TFOpType.SpaceToDepth.name: self.convert_space_depth,
TFOpType.Pad.name: self.convert_pad,
TFOpType.PadV2.name: self.convert_pad,
TFOpType.ConcatV2.name: self.convert_concat,
TFOpType.Const.name: self.convert_nop,
TFOpType.Gather.name: self.convert_gather,
TFOpType.GatherV2.name: self.convert_gather,
TFOpType.StridedSlice.name: self.convert_stridedslice,
TFOpType.Slice.name: self.convert_slice,
TFOpType.ReverseV2.name: self.convert_reverse,
TFOpType.Pack.name: self.convert_stack,
TFOpType.Stack.name: self.convert_stack,
TFOpType.Unpack.name: self.convert_unstack,
TFOpType.Unstack.name: self.convert_unstack,
TFOpType.Cast.name: self.convert_cast,
TFOpType.ArgMax.name: self.convert_argmax,
TFOpType.Split.name: self.convert_split,
TFOpType.FakeQuantWithMinMaxVars.name: self.convert_fake_quantize,
TFOpType.FakeQuantWithMinMaxArgs.name: self.convert_fake_quantize,
TFOpType.FloorDiv.name: self.convert_elementwise,
TFOpType.Sqrt.name: self.convert_elementwise,
TFOpType.MirrorPad.name: self.convert_pad,
TFOpType.Cumsum.name: self.convert_cumsum,
TFOpType.OneHot.name: self.convert_one_hot,
TFOpType.Sum.name: self.convert_reduce,
TFOpType.Tile.name: self.convert_tile,
}
self._option = option
self._mace_net_def = mace_pb2.NetDef()
ConverterUtil.set_filter_format(self._mace_net_def, DataFormat.HWIO)
ConverterUtil.add_data_format_arg(self._mace_net_def, DataFormat.NHWC)
# import tensorflow graph
tf_graph_def = tf.GraphDef()
with tf.gfile.Open(src_model_file, 'rb') as f:
tf_graph_def.ParseFromString(f.read())
self._placeholders = {}
self._skip_tensor = set()
self._output_shape = {}
print("Run transform_graph: %s" % TFTransformGraphOptions)
try:
print("output keys: ", option.output_nodes.keys())
transformed_graph_def = TransformGraph(tf_graph_def,
option.input_nodes.keys(),
option.output_nodes.keys(),
TFTransformGraphOptions)
except Exception as ex:
print("Failed to transform graph using tf tool: %s" % ex)
transformed_graph_def = tf_graph_def
# To check optimized model, uncomment following code.
# tf.io.write_graph(
# transformed_graph_def,
# ".",
# os.path.basename(src_model_file)[:-3] + "_opt.pb",
# as_text=False
# )
self.add_shape_info(transformed_graph_def)
# reset default graph to clear earlier import
tf.reset_default_graph()
with tf.Session() as session:
with session.graph.as_default() as graph:
tf.import_graph_def(transformed_graph_def, name='')
self._tf_graph = graph
self.update_output_shapes(session)
# we have polluted graph with 'shape' ops, so reset it and reload it
# again
tf.reset_default_graph()
with tf.Session() as session:
with session.graph.as_default() as graph:
tf.import_graph_def(transformed_graph_def, name='')
self._tf_graph = graph
def run(self):
with tf.Session() as session:
self.convert_ops(session)
self.replace_input_output_tensor_name()
return self._mace_net_def
def replace_input_output_tensor_name(self):
for op in self._mace_net_def.op:
for i in six.moves.range(len(op.input)):
if op.input[i][-2:] == ':0':
op_name = op.input[i][:-2]
if op_name in self._option.input_nodes \
or op_name in self._option.output_nodes:
op.input[i] = op_name
for i in six.moves.range(len(op.output)):
if op.output[i][-2:] == ':0':
op_name = op.output[i][:-2]
if op_name in self._option.output_nodes:
op.output[i] = op_name
def add_shape_info(self, tf_graph_def):
for node in tf_graph_def.node:
for input_node in self._option.input_nodes.values():
if node.name == input_node.name \
or node.name + ':0' == input_node.name:
input_shape = input_node.shape
if input_node.data_format == DataFormat.OIHW \
and len(input_shape) == 4:
# OIHW -> HWIO
input_shape = [input_shape[2], input_shape[3],
input_shape[1], input_shape[0]]
del node.attr['shape'].shape.dim[:]
node.attr['shape'].shape.dim.extend([
tensor_shape_pb2.TensorShapeProto.Dim(size=i) for i in
input_shape
])
self._placeholders[node.name + ':0'] = \
np.zeros(shape=input_shape, dtype=float)
@staticmethod
def get_scope(tensor_name):
idx = tensor_name.rfind('/')
if idx == -1:
return tensor_name
else:
return tensor_name[:idx]
def update_output_shapes(self, sess):
tensors = []
shape_tensors = []
for tf_op in self._tf_graph.get_operations():
for output in tf_op.outputs:
tensors.append(output.name)
shape_tensors.append(tf.shape(output))
tensor_shapes = sess.run(shape_tensors,
feed_dict=self._placeholders)
for i in range(len(tensors)):
self._output_shape[tensors[i]] = tensor_shapes[i]
def convert_ops(self, sess):
for tf_op in self._tf_graph.get_operations():
mace_check(tf_op.type in self._op_converters,
"Mace does not support tensorflow op type %s yet"
% tf_op.type)
self._op_converters[tf_op.type](tf_op)
self.convert_tensors()
def convert_tensors(self):
for tf_op in self._tf_graph.get_operations():
if tf_op.type != TFOpType.Const.name:
continue
output_name = tf_op.outputs[0].name
if output_name not in self._skip_tensor:
tensor = self._mace_net_def.tensors.add()
tensor.name = tf_op.outputs[0].name
tf_tensor = tf_op.outputs[0].eval()
tensor.dims.extend(list(tf_tensor.shape))
tf_dt = tf_op.get_attr('dtype')
if tf_dt == tf.float32:
tensor.data_type = mace_pb2.DT_FLOAT
tensor.float_data.extend(tf_tensor.astype(np.float32).flat)
elif tf_dt == tf.int32:
tensor.data_type = mace_pb2.DT_INT32
tensor.int32_data.extend(tf_tensor.astype(np.int32).flat)
else:
mace_check(False,
"Not supported tensor type: %s" % tf_dt.name)
def add_tensor(self, name, shape, data_type, value):
tensor = self._mace_net_def.tensors.add()
tensor.name = name
tensor.dims.extend(list(shape))
tensor.data_type = data_type
tensor.float_data.extend(value.flat)
# this function tries to infer tensor shape, but some dimension shape
# may be undefined due to variance of input length
def infer_tensor_shape(self, tensor, output_shape=None):
shape = None
if tensor.name in self._output_shape:
shape = self._output_shape[tensor.name]
else:
shape = tensor.shape.as_list()
if output_shape:
output_shape.dims.extend(shape)
return shape
def convert_nop(self, tf_op):
pass
def convert_general_op(self, tf_op):
op = self._mace_net_def.op.add()
op.name = tf_op.name
op.type = tf_op.type
op.input.extend([tf_input.name for tf_input in tf_op.inputs])
op.output.extend([tf_output.name for tf_output in tf_op.outputs])
for tf_output in tf_op.outputs:
output_shape = op.output_shape.add()
self.infer_tensor_shape(tf_output, output_shape)
data_type_arg = op.arg.add()
data_type_arg.name = 'T'
try:
dtype = tf_op.get_attr('T')
if dtype == tf.int32:
data_type_arg.i = mace_pb2.DT_INT32
elif dtype == tf.float32:
data_type_arg.i = self._option.data_type
else:
mace_check(False, "data type %s not supported" % dtype)
except ValueError:
try:
dtype = tf_op.get_attr('SrcT')
if dtype == tf.int32 or dtype == tf.bool:
data_type_arg.i = mace_pb2.DT_INT32
elif dtype == tf.float32:
data_type_arg.i = self._option.data_type
else:
mace_check(False, "data type %s not supported" % dtype)
except ValueError:
data_type_arg.i = self._option.data_type
framework_type_arg = op.arg.add()
framework_type_arg.name = MaceKeyword.mace_framework_type_str
framework_type_arg.i = FrameworkType.TENSORFLOW.value
ConverterUtil.add_data_format_arg(op, DataFormat.NHWC)
return op
def convert_identity(self, tf_op):
op = self.convert_general_op(tf_op)
op.type = 'Identity'
def convert_conv2d(self, tf_op):
op = self.convert_general_op(tf_op)
if tf_op.type == TFOpType.DepthwiseConv2dNative.name:
op.type = MaceOp.DepthwiseConv2d.name
elif tf_op.type == TFOpType.Conv2DBackpropInput.name:
op.type = MaceOp.Deconv2D.name
else:
op.type = MaceOp.Conv2D.name
padding_arg = op.arg.add()
padding_arg.name = MaceKeyword.mace_padding_str
padding_arg.i = self.padding_mode[tf_op.get_attr(tf_padding_str)].value
strides_arg = op.arg.add()
strides_arg.name = MaceKeyword.mace_strides_str
strides_arg.ints.extend(tf_op.get_attr(tf_strides_str)[1:3])
if op.type != MaceOp.Deconv2D.name:
dilation_arg = op.arg.add()
dilation_arg.name = MaceKeyword.mace_dilations_str
try:
dilation_val = tf_op.get_attr(tf_dilations_str)[1:3]
except ValueError:
dilation_val = [1, 1]
dilation_arg.ints.extend(dilation_val)
else:
try:
dilation_val = tf_op.get_attr(tf_dilations_str)[1:3]
except ValueError:
dilation_val = [1, 1]
mace_check(dilation_val[0] == 1 and dilation_val[1] == 1,
"Mace only supports dilation == 1 conv2d_transpose.")
mace_check(len(tf_op.inputs) >= 3,
"deconv should have (>=) 3 inputs.")
del op.input[:]
op.input.extend([tf_op.inputs[2].name,
tf_op.inputs[1].name,
tf_op.inputs[0].name])
def convert_elementwise(self, tf_op):
op = self.convert_general_op(tf_op)
op.type = MaceOp.Eltwise.name
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_element_type_str
type_arg.i = self.eltwise_type[tf_op.type].value
def check_is_scalar(tf_op):
if len(tf_op.inputs) == 1:
return len(self.infer_tensor_shape(tf_op.inputs[0])) == 0
elif len(tf_op.inputs) == 2:
return len(self.infer_tensor_shape(tf_op.inputs[0])) == 0 and \
len(self.infer_tensor_shape(tf_op.inputs[1])) == 0
if check_is_scalar(tf_op):
op.type = MaceOp.ScalarMath.name
else:
op.type = MaceOp.Eltwise.name
if | |
header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_limit_range(self, namespace, name, **kwargs):
"""
watch changes to an object of kind LimitRange
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_limit_range(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the LimitRange (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_limit_range" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_limit_range`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_namespaced_limit_range`")
resource_path = '/api/v1/watch/namespaces/{namespace}/limitranges/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_persistent_volume_claim_list(self, namespace, **kwargs):
"""
watch individual changes to a list of PersistentVolumeClaim
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_persistent_volume_claim_list(namespace, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_persistent_volume_claim_list" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_persistent_volume_claim_list`")
resource_path = '/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_persistent_volume_claim(self, namespace, name, **kwargs):
"""
watch changes to an object of kind PersistentVolumeClaim
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.watch_namespaced_persistent_volume_claim(namespace, name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str name: name of the PersistentVolumeClaim (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history.
:param int timeout_seconds: Timeout for the list/watch call.
:return: JsonWatchEvent
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method watch_namespaced_persistent_volume_claim" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `watch_namespaced_persistent_volume_claim`")
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `watch_namespaced_persistent_volume_claim`")
resource_path = '/api/v1/watch/namespaces/{namespace}/persistentvolumeclaims/{name}'.replace('{format}', 'json')
method = 'GET'
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
if 'name' in params:
path_params['name'] = params['name']
query_params = {}
if 'pretty' in params:
query_params['pretty'] = params['pretty']
if 'label_selector' in params:
query_params['labelSelector'] = params['label_selector']
if 'field_selector' in params:
query_params['fieldSelector'] = params['field_selector']
if 'watch' in params:
query_params['watch'] = params['watch']
if 'resource_version' in params:
query_params['resourceVersion'] = params['resource_version']
if 'timeout_seconds' in params:
query_params['timeoutSeconds'] = params['timeout_seconds']
header_params = {}
form_params = {}
files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = []
response = self.api_client.call_api(resource_path, method,
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=files,
response_type='JsonWatchEvent',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def watch_namespaced_pod_list(self, namespace, **kwargs):
"""
watch individual changes to a list of Pod
This method makes a synchronous | |
= secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.username = username
self.password = password
self.database = database
self.port_override = port_override
self.port = port
self.override_database = override_database
def __repr__(self):
return '<sdm.AuroraPostgres ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'database: ' + repr(self.database) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'override_database: ' + repr(self.override_database) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'username': self.username,
'password': <PASSWORD>,
'database': self.database,
'port_override': self.port_override,
'port': self.port,
'override_database': self.override_database,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
username=d.get('username'),
password=<PASSWORD>('password'),
database=d.get('database'),
port_override=d.get('port_override'),
port=d.get('port'),
override_database=d.get('override_database'),
)
class Greenplum:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param hostname:
:param username:
:param password:
:param database:
:param port_override:
:param port:
:param override_database:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'username',
'password',
'database',
'port_override',
'port',
'override_database',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
hostname=None,
username=None,
password=<PASSWORD>,
database=None,
port_override=None,
port=None,
override_database=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.username = username
self.password = password
self.database = database
self.port_override = port_override
self.port = port
self.override_database = override_database
def __repr__(self):
return '<sdm.Greenplum ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'database: ' + repr(self.database) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'override_database: ' + repr(self.override_database) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'username': self.username,
'password': self.password,
'database': self.database,
'port_override': self.port_override,
'port': self.port,
'override_database': self.override_database,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
username=d.get('username'),
password=d.get('password'),
database=d.get('database'),
port_override=d.get('port_override'),
port=d.get('port'),
override_database=d.get('override_database'),
)
class Cockroach:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param hostname:
:param username:
:param password:
:param database:
:param port_override:
:param port:
:param override_database:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'username',
'password',
'database',
'port_override',
'port',
'override_database',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
hostname=None,
username=None,
password=<PASSWORD>,
database=None,
port_override=None,
port=None,
override_database=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.username = username
self.password = password
self.database = database
self.port_override = port_override
self.port = port
self.override_database = override_database
def __repr__(self):
return '<sdm.Cockroach ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'database: ' + repr(self.database) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'override_database: ' + repr(self.override_database) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'username': self.username,
'password': <PASSWORD>,
'database': self.database,
'port_override': self.port_override,
'port': self.port,
'override_database': self.override_database,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
username=d.get('username'),
password=d.get('password'),
database=d.get('database'),
port_override=d.get('port_override'),
port=d.get('port'),
override_database=d.get('override_database'),
)
class Redshift:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param hostname:
:param username:
:param password:
:param database:
:param port_override:
:param port:
:param override_database:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'username',
'password',
'database',
'port_override',
'port',
'override_database',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
tags=None,
secret_store_id=None,
egress_filter=None,
hostname=None,
username=None,
password=<PASSWORD>,
database=None,
port_override=None,
port=None,
override_database=None,
):
self.id = id
self.name = name
self.healthy = healthy
self.tags = tags
self.secret_store_id = secret_store_id
self.egress_filter = egress_filter
self.hostname = hostname
self.username = username
self.password = password
self.database = database
self.port_override = port_override
self.port = port
self.override_database = override_database
def __repr__(self):
return '<sdm.Redshift ' + \
'id: ' + repr(self.id) + ' ' +\
'name: ' + repr(self.name) + ' ' +\
'healthy: ' + repr(self.healthy) + ' ' +\
'tags: ' + repr(self.tags) + ' ' +\
'secret_store_id: ' + repr(self.secret_store_id) + ' ' +\
'egress_filter: ' + repr(self.egress_filter) + ' ' +\
'hostname: ' + repr(self.hostname) + ' ' +\
'username: ' + repr(self.username) + ' ' +\
'password: ' + repr(self.password) + ' ' +\
'database: ' + repr(self.database) + ' ' +\
'port_override: ' + repr(self.port_override) + ' ' +\
'port: ' + repr(self.port) + ' ' +\
'override_database: ' + repr(self.override_database) + ' ' +\
'>'
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'healthy': self.healthy,
'tags': self.tags,
'secret_store_id': self.secret_store_id,
'egress_filter': self.egress_filter,
'hostname': self.hostname,
'username': self.username,
'password': self.password,
'database': self.database,
'port_override': self.port_override,
'port': self.port,
'override_database': self.override_database,
}
@classmethod
def from_dict(cls, d):
return cls(
id=d.get('id'),
name=d.get('name'),
healthy=d.get('healthy'),
tags=d.get('tags'),
secret_store_id=d.get('secret_store_id'),
egress_filter=d.get('egress_filter'),
hostname=d.get('hostname'),
username=d.get('username'),
password=<PASSWORD>('password'),
database=d.get('database'),
port_override=d.get('port_override'),
port=d.get('port'),
override_database=d.get('override_database'),
)
class Citus:
"""
:param id: Unique identifier of the Resource.
:param name: Unique human-readable name of the Resource.
:param healthy: True if the datasource is reachable and the credentials are valid.
:param tags: Tags is a map of key, value pairs.
:param secret_store_id: ID of the secret store containing credentials for this resource, if any.
:param egress_filter: A filter applied to the routing logic to pin datasource to nodes.
:param hostname:
:param username:
:param password:
:param database:
:param port_override:
:param port:
:param override_database:
"""
__slots__ = [
'id',
'name',
'healthy',
'tags',
'secret_store_id',
'egress_filter',
'hostname',
'username',
'password',
'database',
'port_override',
'port',
'override_database',
]
def __init__(
self,
id=None,
name=None,
healthy=None,
| |
# Copyright (c) 2017 Intel Corporation. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
import os
import logging
import settings
import threading
from chroma_core.lib.storage_plugin.base_resource import BaseStorageResource
from chroma_core.services.stats import StatsQueue
from chroma_core.lib.storage_plugin.api import identifiers
class ResourceNotFound(Exception):
pass
class ResourceIndex(object):
def __init__(self):
# Map (local_id) to resource
self._local_id_to_resource = {}
# Map (id_tuple, klass) to resource
self._resource_id_to_resource = {}
def add(self, resource):
self._local_id_to_resource[resource._handle] = resource
# Why don't we need a scope resource in here?
# Because if it's a ScopedId then only items for that
# scannable will be in this ResourceIndex (index is per
# plugin instance), and if it's a GlobalId then it doesn't
# have a scope.
resource_id = (resource.id_tuple(), resource.__class__)
if resource_id in self._resource_id_to_resource:
raise RuntimeError("Duplicate resource added to index")
self._resource_id_to_resource[resource_id] = resource
def remove(self, resource):
resource_id = (resource.id_tuple(), resource.__class__)
if not resource_id in self._resource_id_to_resource:
raise RuntimeError("Remove non-existent resource")
del self._local_id_to_resource[resource._handle]
del self._resource_id_to_resource[resource_id]
def get(self, klass, **attrs):
id_tuple = klass(**attrs).id_tuple()
try:
return self._resource_id_to_resource[(id_tuple, klass)]
except KeyError:
raise ResourceNotFound()
def find_by_attr(self, klass, **attrs):
for resource_tuple in self._resource_id_to_resource:
# Must compare klass before values, because values will only be valid for that klass.
if resource_tuple[1] == klass and klass.compare_id_tuple(resource_tuple[0],
klass.attrs_to_id_tuple(attrs, True),
True):
yield self._resource_id_to_resource[resource_tuple]
def all(self):
return self._local_id_to_resource.values()
class BaseStoragePlugin(object):
#: Set to true for plugins which should not be shown in the user interface
internal = False
_log = None
_log_format = None
def __init__(self, resource_manager, scannable_id=None):
from chroma_core.lib.storage_plugin.manager import storage_plugin_manager
storage_plugin_manager.register_plugin(self)
self._initialized = False
self._resource_manager = resource_manager
self._handle_lock = threading.Lock()
self._instance_lock = threading.Lock()
self._handle_counter = 0
self._index = ResourceIndex()
self._scannable_id = scannable_id
self._resource_lock = threading.Lock()
self._delta_new_resources = []
self._delta_delete_local_resources = []
self._delta_delete_global_resources = []
self._alerts_lock = threading.Lock()
self._delta_alerts = set()
self._alerts = {}
# Should changes to resources be delta'd so that only changes are reported to the resource manager. This
# required because it may be that at boot time (for example) we want all the changes everytime but once the
# system is quiescent we only want the deltas.
self._calc_changes_delta = True
self._session_open = False
self._update_period = settings.PLUGIN_DEFAULT_UPDATE_PERIOD
from chroma_core.lib.storage_plugin.query import ResourceQuery
root_resource = ResourceQuery().get_resource(scannable_id)
root_resource._handle = self._generate_handle()
root_resource._handle_global = False
self._root_resource = root_resource
##############################################################################################
# Methods below are not called from within IML and not to be used by the plugins themselves. #
##############################################################################################
def do_agent_session_start(self, data):
"""
Start a session based on information sent from an agent plugin.
:param data: Arbitrary JSON-serializable data sent by plugin.
:return No return value
"""
self._initial_populate(self.agent_session_start, self._root_resource.host_id, data)
def do_agent_session_continue(self, data):
"""
Continue a session using information sent from an agent plugin.
This will only ever be called on Plugin instances where `agent_session_start` has
already been called.
:param data: Arbitrary JSON-serializable data sent by plugin.
:return No return value
"""
self._update(self.agent_session_continue, self._root_resource.host_id, data)
def do_initial_scan(self):
"""
Identify all resources present at this time and call register_resource on them.
If you return from this function you must have succeeded in communicating with the scannable resource. Any
resources which were present previously and are absent when initial_scan returns are assumed to be
permanently absent and are deleted. If for any reason you cannot return all resources (for example,
communication failure with a controller), you must raise an exception.
:return: No return value
"""
self._initial_populate(self.initial_scan, self._root_resource)
def do_periodic_update(self):
"""
Perform any required periodic refresh of data and update any resource instances. It is guaranteed that
initial_scan will have been called before this.
:return: No return value
"""
self._update(self.update_scan, self._root_resource)
def do_teardown(self):
"""
Perform any teardown required before terminating.
Guaranteed not to be called concurrently with initial_scan or update_scan.
Guaranteed that initial_scan or update_scan will not be called after this.
Guaranteed that once initial_scan has been entered this function will later be called unless the whole process
terminates prematurely.
This function will be called even if initial_scan or update_scan raises an exception.
:return: No return value
"""
self.teardown()
if self._session_open:
self._resource_manager.session_close(self._scannable_id)
self._session_open = False
##################################################################################################
# Methods below are not used by the plugins themselves, but are private to its internal workings #
##################################################################################################
def _initial_populate(self, fn, *args):
if self._initialized:
raise RuntimeError("Tried to initialize %s twice!" % self)
self._initialized = True
self._index.add(self._root_resource)
fn(*args)
self._resource_manager.session_open(
self,
self._scannable_id,
self._index.all(),
self._update_period)
self._session_open = True
self._delta_new_resources = []
# Creates, deletes, attrs, parents are all handled in session_open
# the rest we do manually.
self._commit_resource_statistics()
self._check_alert_conditions()
self._commit_alerts()
def _generate_handle(self):
with self._handle_lock:
self._handle_counter += 1
return self._handle_counter
def _update(self, fn, *args):
# Be sure that the session and the plugin match - HYD-3068 was a case where they didn't.
assert(self == self._resource_manager._sessions[self._scannable_id]._plugin_instance)
fn(*args)
# Resources created since last update
with self._resource_lock:
self._commit_resource_creates()
self._commit_resource_deletes()
self._commit_resource_updates()
self._commit_resource_statistics()
self._check_alert_conditions()
self._commit_alerts()
def _check_alert_conditions(self):
for resource in self._index.all():
# Check if any AlertConditions are matched
for ac in resource._meta.alert_conditions:
alert_list = ac.test(resource)
for name, attribute, active, severity in alert_list:
self._notify_alert(active, severity, resource, name, attribute)
def _commit_resource_creates(self):
if len(self._delta_new_resources) > 0:
self._resource_manager.session_add_resources(self._scannable_id, self._delta_new_resources)
self._delta_new_resources = []
def _commit_resource_deletes(self):
# Resources deleted since last update
if len(self._delta_delete_local_resources) > 0:
self._resource_manager.session_remove_local_resources(self._scannable_id,
self._delta_delete_local_resources)
self._delta_delete_local_resources = []
if len(self._delta_delete_global_resources) > 0:
self._resource_manager.session_remove_global_resources(self._scannable_id,
self._delta_delete_global_resources)
self._delta_delete_global_resources = []
def _commit_resource_updates(self):
# Resources with changed attributes
for resource in self._index.all():
deltas = resource.flush_deltas()
# If there were changes to attributes
if len(deltas['attributes']) > 0:
self._resource_manager.session_update_resource(
self._scannable_id, resource._handle, deltas['attributes'])
# If there were parents added or removed
if len(deltas['parents']) > 0:
for parent_resource in deltas['parents']:
if parent_resource in resource._parents:
# If it's in the parents of the resource then it's an add
self._resource_manager.session_resource_add_parent(
self._scannable_id, resource._handle,
parent_resource._handle)
else:
# Else if's a remove
self._resource_manager.session_resource_remove_parent(
self._scannable_id, resource._handle,
parent_resource._handle)
def _commit_alerts(self):
with self._alerts_lock:
for (resource, attribute, alert_class, severity) in self._delta_alerts:
active = self._alerts[(resource, attribute, alert_class, severity)]
self._resource_manager.session_notify_alert(
self._scannable_id, resource._handle,
active, severity, alert_class, attribute)
self._delta_alerts.clear()
def _commit_resource_statistics(self):
samples = []
for resource in self._index.all():
r_stats = resource.flush_stats()
if r_stats and settings.STORAGE_PLUGIN_ENABLE_STATS:
samples += self._resource_manager.session_get_stats(self._scannable_id, resource._handle, r_stats)
if samples:
StatsQueue().put(samples)
return len(samples)
def _notify_alert(self, active, severity, resource, alert_name, attribute = None):
# This will be flushed through to the database by update_scan
key = (resource, attribute, alert_name, severity)
with self._alerts_lock:
try:
existing = self._alerts[key]
if existing == active:
return
except KeyError:
pass
self._alerts[key] = active
self._delta_alerts.add(key)
def _register_resource(self, resource):
"""Register a newly created resource:
* Assign it a local ID
* Add it to the local indices
* Mark it for inclusion in the next update to global state"""
assert(isinstance(resource, BaseStorageResource))
assert(not resource._handle)
resource.validate()
resource._handle = self._generate_handle()
resource._handle_global = False
self._index.add(resource)
self._delta_new_resources.append(resource)
############################################################
# Methods below are implemented by the plugins themselves. #
############################################################
def initial_scan(self, root_resource):
"""
Required
Identify all resources present at this time and call register_resource on them.
If you return from this function you must have succeeded in communicating with the scannable resource. Any
resources which were present previously and are absent when initial_scan returns are assumed to be
permanently absent and are deleted. If for any reason you cannot return all resources (for example,
communication failure with a controller), you must raise an exception.
:param root_resource: All resources of the plugin for each host are children of this resource.
:return: No return value
"""
raise NotImplementedError
def update_scan(self, root_resource):
"""
Optional
Perform any required periodic refresh of data and update any resource instances. It is guaranteed that
initial_scan will have been called before this.
:param root_resource: All resources of the plugin for each host are children of this resource.
:return: No return value
"""
pass
def agent_session_start(self, host_id, data):
"""
Optional
Start a session based on information sent from an agent plugin.
:param host_id: ID of the host from which the agent information was sent -- this is
a database identifier which is mainly useful for constructing DeviceNode
resources.
:param data: Arbitrary JSON-serializable data sent by plugin.
:return No return value
"""
pass
def agent_session_continue(self, host_id, data):
"""
Optional
Continue a session using information sent from an agent plugin.
This will only ever be called | |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""main module."""
import logging
import os
import pathlib
import platform
import pprint
import shutil
import traceback
from logging.handlers import TimedRotatingFileHandler
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Iterator, List, Optional, Tuple
from urllib.parse import urlparse
import cfscrape
import click
import mechanicalsoup
import requests
import structlog
import werkzeug
from bs4 import BeautifulSoup
from flask import Flask
from flask import __version__ as flask_version # type: ignore
from flask import cli as flask_cli
from flask import send_from_directory
from flask_admin import Admin
from flask_restful import Api
from hydrus import Client
from hydrus.utils import yield_chunks
from . import models, parse, views
from .__init__ import __version__, db_version
from .models import iqdb_url_dict
from .utils import default_db_path, thumb_folder, user_data_dir
db = "~/images/! tagged"
DEFAULT_PLACE = "iqdb"
minsim = 75
services = ["1", "2", "3", "4", "5", "6", "10", "11"]
forcegray = False
log = structlog.getLogger()
def get_iqdb_result(image: str, iqdb_url: str = "http://iqdb.org/") -> Any:
"""Get iqdb result."""
files = {"file": open(image, "rb")}
resp = requests.post(iqdb_url, files=files, timeout=10)
html_text = BeautifulSoup(resp.text, "lxml")
return parse.parse_result(html_text)
def init_program(db_path: str = default_db_path) -> None:
"""Init program."""
# create user data dir
pathlib.Path(user_data_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(thumb_folder).mkdir(parents=True, exist_ok=True)
models.init_db(db_path, db_version)
def write_url_from_match_result(match_result: models.ImageMatch, folder: str = None) -> None:
"""Write url from match result."""
netloc = urlparse(match_result.link).netloc
sanitized_netloc = netloc.replace(".", "_")
text_file_basename = sanitized_netloc + ".txt"
text_file = os.path.join(folder, text_file_basename) if folder is not None else text_file_basename
with open(text_file, "a") as f:
f.write(match_result.link)
f.write("\n")
def get_result_on_windows(
image: str,
place: str,
resize: Optional[bool] = False,
size: Optional[Tuple[int, int]] = None,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
) -> List[models.ImageMatch]:
"""Get result on Windows.
Args:
image: image path
place: iqdb place code
resize: resize the image
size: resized image size
browser: browser instance
Returns:
matching items
"""
result = []
# temp_f
with NamedTemporaryFile(mode="w+t", delete=False) as temp_f, NamedTemporaryFile(mode="w+t", delete=False) as thumb_temp_f:
temp_file_name = temp_f.name
thumb_temp_file_name = thumb_temp_f.name
# copy to temp file
shutil.copyfile(image, temp_f.name)
# get image to be posted based on user input
try:
post_img = models.get_posted_image(img_path=temp_f.name, resize=resize, size=size, thumb_path=thumb_temp_f.name)
except OSError as e:
raise OSError(str(e) + " when processing {}".format(image)) from e
# append data to result
for img_m_rel_set in post_img.imagematchrelationship_set:
for item_set in img_m_rel_set.imagematch_set:
if item_set.search_place_verbose == place:
result.append(item_set)
if not result:
url, im_place = iqdb_url_dict[place]
use_requests = place != "e621"
post_img_path = temp_f.name if not resize else thumb_temp_f.name
page = models.get_page_result(image=post_img_path, url=url, browser=browser, use_requests=use_requests)
# if ok, will output: <Response [200]>
page_soup = BeautifulSoup(page, "lxml")
result = list(parse.get_or_create_image_match_from_page(page=page_soup, image=post_img, place=im_place))
result = [x[0] for x in result]
for item in [temp_file_name, thumb_temp_file_name]:
try:
os.remove(item)
except Exception: # pylint: disable=broad-except
log.exception("error removing {}".format(item))
return result
def run_program_for_single_img( # pylint: disable=too-many-branches, too-many-statements
image: str,
resize: bool = False,
size: Optional[Tuple[int, int]] = None,
place: str = DEFAULT_PLACE,
match_filter: Optional[str] = None,
browser: Optional[mechanicalsoup.StatefulBrowser] = None,
scraper: Optional[cfscrape.CloudflareScraper] = None,
disable_tag_print: Optional[bool] = False,
write_tags: Optional[bool] = False,
write_url: Optional[bool] = False,
minimum_similarity: Optional[int] = None,
) -> Dict[str, Any]:
"""Run program for single image.
Args:
image: image path
resize: resize the image
size: resized image size
place: iqdb place, see `iqdb_url_dict`
match_filter: whitelist matched items
browser: mechanicalsoup browser instance
scraper: cfscrape instance
disable_tag_print: don't print the tag
write_tags: write tags as hydrus tag file
write_url: write matching items' url to file
minimum_similarity: filter result items with minimum similarity
Returns:
iqdb result and collected errors
"""
# compatibility
br = browser # type: ignore
error_set = [] # List[Exception]
tag_textfile = image + ".txt"
folder = os.path.dirname(image)
result = [] # type: List[models.ImageMatch]
if platform.system() == "Windows":
result = get_result_on_windows(image, place, resize=resize, size=size, browser=br)
else:
with NamedTemporaryFile(delete=False) as temp, NamedTemporaryFile(delete=False) as thumb_temp:
shutil.copyfile(image, temp.name)
try:
post_img = models.get_posted_image(
img_path=temp.name,
resize=resize,
size=size,
thumb_path=thumb_temp.name,
)
except OSError as e:
raise OSError(str(e) + " when processing {}".format(image)) from e
for img_m_rel_set in post_img.imagematchrelationship_set:
for item_set in img_m_rel_set.imagematch_set:
if item_set.search_place_verbose == place:
result.append(item_set)
if not result:
url, im_place = iqdb_url_dict[place]
use_requests = place != "e621"
post_img_path = temp.name if not resize else thumb_temp.name
page = models.get_page_result(image=post_img_path, url=url, browser=br, use_requests=use_requests)
# if ok, will output: <Response [200]>
page_soup = BeautifulSoup(page, "lxml")
result = list(parse.get_or_create_image_match_from_page(page=page_soup, image=post_img, place=im_place))
result = [x[0] for x in result]
if match_filter == "best-match":
result = [x for x in result if x.status == x.STATUS_BEST_MATCH]
if minimum_similarity:
result = [x for x in result if float(x.similarity) >= minimum_similarity]
log.debug("Number of valid result", n=len(result))
match_result_tag_pairs = [] # type: List[Tuple[models.Match, List[models.Tag]]]
for item in result:
match_result = item.match.match_result # type: models.Match
url = match_result.link
log.debug("match status", similarity=item.similarity, status=item.status_verbose)
log.debug("url", v=url)
try:
tags = models.get_tags_from_match_result(match_result, browser, scraper)
tags_verbose = [x.full_name for x in tags]
match_result_tag_pairs.append((match_result, tags))
log.debug("{} tag(s) founds".format(len(tags_verbose)))
if tags and not disable_tag_print:
print("\n".join(tags_verbose))
if tags and write_tags:
with open(tag_textfile, "a") as f:
f.write("\n".join(tags_verbose))
f.write("\n")
log.debug("tags written")
if write_url:
write_url_from_match_result(match_result, folder)
except Exception as e: # pylint:disable=broad-except
log.error("Error", e=str(e))
error_set.append(e)
return {"error": error_set, "match result tag pairs": match_result_tag_pairs}
def thumb(basename: str) -> Any:
"""Get thumbnail."""
return send_from_directory(thumb_folder, basename)
def create_app(script_info: Optional[Any] = None) -> Any:
"""Create app."""
app = Flask(__name__)
# logging
if not os.path.exists(user_data_dir):
os.makedirs(user_data_dir)
log_dir = os.path.join(user_data_dir, "log")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
peewee_logger = logging.getLogger("peewee")
peewee_logger.setLevel(logging.INFO)
chardet_logger = logging.getLogger("chardet")
chardet_logger.setLevel(logging.INFO)
default_log_file = os.path.join(log_dir, "iqdb_tagger_server.log")
file_handler = TimedRotatingFileHandler(default_log_file, "midnight")
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter("<%(asctime)s> <%(levelname)s> %(message)s"))
app.logger.addHandler(file_handler)
app.logger.addHandler(peewee_logger)
app.logger.addHandler(chardet_logger)
# reloader
reloader = app.config["TEMPLATES_AUTO_RELOAD"] = bool(os.getenv("IQDB_TAGGER_RELOADER")) or app.config["TEMPLATES_AUTO_RELOAD"] # NOQA
if reloader:
app.jinja_env.auto_reload = True
app.config["SECRET_KEY"] = os.getenv("IQDB_TAGGER_SECRET_KEY") or os.urandom(24)
app.config["WTF_CSRF_ENABLED"] = False
# debug
debug = app.config["DEBUG"] = bool(os.getenv("IQDB_TAGGER_DEBUG")) or app.config["DEBUG"]
if debug:
app.config["DEBUG"] = True
app.config["LOGGER_HANDLER_POLICY"] = "debug"
logging.basicConfig(level=logging.DEBUG)
pprint.pprint(app.config)
print("Log file: {}".format(default_log_file))
print("script info:{}".format(script_info))
db_path = os.getenv("IQDB_TAGGER_DB_PATH") or default_db_path
init_program()
models.init_db(db_path)
# app and db
app.app_context().push()
@app.shell_context_processor
def shell_context() -> Dict["str", Any]: # pylint: disable=unused-variable
return {"app": app}
# api
api = Api(app)
api.add_resource(views.MatchViewList, "/api/matchview")
# flask-admin
app_admin = Admin(
app,
name="IQDB Tagger",
template_mode="bootstrap3",
index_view=views.HomeView(name="Home", template="iqdb_tagger/index.html", url="/"),
)
app_admin.add_view(views.MatchView())
# app_admin.add_view(ModelView(ImageMatch, category='DB'))
# app_admin.add_view(ModelView(ImageMatchRelationship, category='DB'))
# app_admin.add_view(ModelView(ImageModel, category='DB'))
# app_admin.add_view(ModelView(MatchTagRelationship, category='DB'))
# routing
app.add_url_rule("/thumb/<path:basename>", view_func=thumb)
return app
class FlaskGroup(flask_cli.FlaskGroup):
"""Custom Flask Group."""
def __init__(self, **kwargs: Any) -> None:
"""Class init."""
super().__init__(**kwargs)
if hasattr(self.params[0], "help"):
self.params[0].help = "Show the program version" # type: ignore
self.params[0].callback = get_version
def get_version(ctx: Any, _: Any, value: Any):
"""Get version."""
if not value or ctx.resilient_parsing:
return
message = "%(app_name)s %(app_version)s\nPython %(python)s\nFlask %(flask)s\nWerkzeug %(werkzeug)s"
click.echo(
message
% {
"app_name": "Iqdb-Tagger",
"app_version": __version__,
"python": platform.python_version(),
"flask": flask_version,
"werkzeug": werkzeug.__version__,
},
color=ctx.color,
)
ctx.exit()
@click.group(cls=FlaskGroup, create_app=create_app)
def cli() -> None:
"""Run cli. This is a management script for application."""
@cli.command()
@click.version_option()
@click.option(
"--place",
type=click.Choice(iqdb_url_dict.<EMAIL>()),
default=DEFAULT_PLACE,
help="Specify iqdb place, default:{}".format(DEFAULT_PLACE),
)
@click.option("--minimum-similarity", type=float, help="Minimum similarity.")
@click.option("--resize", is_flag=True, help="Use resized image.")
@click.option("--size", help="Specify resized image, format: 'w,h'.")
@click.option("--db-path", help="Specify Database path.")
@click.option(
"--match-filter",
type=click.Choice(["default", "best-match"]),
default="default",
help="Filter the result.",
)
@click.option("--write-tags", is_flag=True, help="Write best match's tags to text.")
@click.option("--write-url", is_flag=True, help="Write match url to text.")
@click.option(
"--input-mode",
type=click.Choice(["default", "folder"]),
default="default",
help="Set input mode.",
)
@click.option("--verbose", "-v", is_flag=True, help="Verbose output.")
@click.option("--debug", "-d", is_flag=True, help="Print debug output.")
@click.option("--abort-on-error", is_flag=True, help="Stop program when error occured") # pylint: disable=too-many-branches
@click.argument("prog-input")
def cli_run(
prog_input: str = None,
resize: bool = False,
size: Optional[str] = None,
db_path: str = default_db_path,
place: str = DEFAULT_PLACE,
match_filter: str = "default",
input_mode: str = "default",
verbose: bool = False,
debug: bool = False,
abort_on_error: bool = False,
write_tags: bool = False,
write_url: bool = False,
minimum_similarity: bool = None,
) -> None:
"""Get similar image from iqdb."""
assert prog_input is not None, "Input is not a valid path"
# logging
log_level = None
if verbose:
log_level = logging.INFO
if debug:
log_level = logging.DEBUG
if log_level:
logging.basicConfig(
handlers=[logging.FileHandler(os.path.join(user_data_dir, "output.log"), "w", "utf-8")],
level=log_level,
)
init_program(db_path)
br = mechanicalsoup.StatefulBrowser(soup_config={"features": "lxml"})
br.raise_on_404 = True
scraper = cfscrape.CloudflareScraper()
# variable used in both input mode
error_set = []
size_tuple: Optional[Tuple[int, int]] = None
if size is not None:
size_tuple = tuple(map(int, size.split(",", 1))) # type: ignore
if input_mode == "folder":
assert os.path.isdir(prog_input), "Input is not valid folder"
files = [os.path.join(prog_input, x) for x in os.listdir(prog_input)]
if not files:
print("No files found.")
return
sorted_files = sorted(files, key=lambda x: os.path.splitext(x)[1])
for idx, ff in enumerate(sorted_files):
log.debug("file", f=os.path.basename(ff), idx=idx, total=len(files))
result = {}
try:
result = run_program_for_single_img(
ff,
resize,
size_tuple,
place,
match_filter,
browser=br,
scraper=scraper,
disable_tag_print=True,
write_tags=write_tags,
write_url=write_url,
minimum_similarity=minimum_similarity,
)
except Exception as e: # pylint:disable=broad-except
if abort_on_error:
raise e
error_set.append((ff, e))
if result is not None and result.get("error"):
| |
# Copyright (c) 2007-2019 The Psi4 Developers.
# Copyright (c) 2014-2018, The Psi4NumPy Developers.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Psi4NumPy Developers nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import psi4
def build_grid(wfn, L, D):
"""
Creates origin and extent of the cube file
Parameters
----------
wfn : psi4.core.Wavefunction
Wavefunction object from Psi4 energy calculation
L : List
Spacial Extent for x,y,z directions
D : List
Grid Spacing in bohrs for x,y,z directions
Returns
-------
O : List
Origin for the cubefile
N : List
Number of points for each coordinate
"""
geometry = wfn.molecule().full_geometry().np
Xmin = np.zeros(3)
Xmax = np.zeros(3)
Xdel = np.zeros(3)
N = np.zeros(3)
O = np.zeros(3)
for k in [0,1,2]:
Xmin[k] = Xmax[k] = geometry[0,k]
for atom in range(len(geometry)):
Xmin[k] = geometry[atom, k] if Xmin[k] > geometry[atom, k] else Xmin[k]
Xmax[k] = geometry[atom, k] if Xmax[k] < geometry[atom, k] else Xmax[k]
Xdel[k] = Xmax[k] - Xmin[k]
N[k] = int((Xmax[k] - Xmin[k] + 2.0 * L[k]) / D[k])
if D[k] * N[k] < (Xmax[k] - Xmin[k] + 2.0 * L[k]):
N[k] += 1
O[k] = Xmin[k] - (D[k] * N[k] - (Xmax[k] - Xmin[k])) / 2.0
return O, N
def populate_grid(wfn, O, N, D):
"""
Build cube grid
Parameters
----------
wfn : psi4.core.Wavefunction
Wavefunction object from Psi4 energy calculation
O : List
Origin for the cubefile
N : List
Number of points for each coordinate
D : List
Grid Spacing in bohrs for x,y,z directions
Returns
-------
block : List
Set of psi4.core.BlockOPoints for cube grid
points : psi4.core.RKSFunctions
nxyz : integer
number of points in each direction for rectangular grid
npoints : int
total number of points in grid
"""
epsilon = psi4.core.get_global_option("CUBIC_BASIS_TOLERANCE")
basis = psi4.core.BasisSet.build(wfn.molecule(), 'ORBITAL', wfn.basisset().name())
extens = psi4.core.BasisExtents(basis, epsilon)
npoints = (N[0]) * (N[1]) * (N[2])
x = np.zeros(int(npoints))
y = np.zeros(int(npoints))
z = np.zeros(int(npoints))
w = np.zeros(int(npoints))
max_points = psi4.core.get_global_option("CUBIC_BlOCK_MAX_POINTS")
nxyz = int(np.round(max_points**(1/3)))
block = []
offset = 0
i_start = 0
j_start = 0
k_start = 0
for i in range(i_start, int(N[0] + 1), nxyz):
ni = int(N[0]) - i if i + nxyz > N[0] else nxyz
for j in range(j_start, int(N[1] + 1), nxyz):
nj = int(N[1]) - j if j + nxyz > N[1] else nxyz
for k in range(k_start, int(N[2] + 1), nxyz):
nk = int(N[2]) - k if k + nxyz > N[2] else nxyz
x_in, y_in, z_in, w_in = [], [], [], []
block_size = 0
for ii in range(i , i + ni):
for jj in range(j, j + nj):
for kk in range(k, k + nk):
x[offset] = O[0] + ii * D[0]
y[offset] = O[1] + jj * D[1]
z[offset] = O[2] + kk * D[2]
w[offset] = D[0] * D[1] * D[2]
x_in.append(x[offset])
y_in.append(y[offset])
z_in.append(z[offset])
w_in.append(w[offset])
offset += 1
block_size += 1
x_out = psi4.core.Vector.from_array(np.array(x_in))
y_out = psi4.core.Vector.from_array(np.array(y_in))
z_out = psi4.core.Vector.from_array(np.array(z_in))
w_out = psi4.core.Vector.from_array(np.array(w_in))
block.append(psi4.core.BlockOPoints(x_out, y_out, z_out, w_out, extens))
max_functions = 0
for i in range(max_functions, len(block)):
max_functions = max_functions if max_functions > len(block[i].functions_local_to_global()) else len(block[i].functions_local_to_global())
points = psi4.core.RKSFunctions(basis, int(npoints), max_functions)
points.set_ansatz(0)
return block, points, nxyz, npoints
def add_density(npoints, points, block, matrix):
"""
Computes density in new grid
Parameters
----------
npoints: int
total number of points
points : psi4.core.RKSFunctions
block : list
Set of psi4.core.BlockOPoints for cube grid
matrix : psi4.core.Matrix
One-particle density matrix
Returns
-------
v : numpy array
Array with density values on the grid
"""
v = np.zeros(int(npoints))
points.set_pointers(matrix)
rho = points.point_values()["RHO_A"]
offset = 0
for i in range(len(block)):
points.compute_points(block[i])
n_points = block[i].npoints()
offset += n_points
v[offset-n_points:offset] = 0.5 * rho.np[:n_points]
return v
def compute_isocontour_range(v, npoints):
"""
Computes threshold for isocontour range
Parameters
----------
v : numpy array
Array with scalar values on the grid
npopints : int
Total number of points on the grid
Returns
-------
values : list
Value of positive and negative isocontour
cumulative_threshold: float
"""
cumulative_threshold = 0.85
sum_weight = 0
#Store the points with their weights and compute the sum of weights
sorted_points = np.zeros((int(npoints),2))
for i in range(0, int(npoints)):
value = v[i]
weight = np.power(np.abs(value), 1.0)
sum_weight += weight
sorted_points[i] = [weight, value]
#Sort the points
sorted_points = sorted_points[np.argsort(sorted_points[:,1])][::-1]
#Determine the positve and negative bounds
sum = 0
negative_isocontour = 0.0
positive_isocontour = 0.0
for i in range(len(sorted_points)):
if sorted_points[i,1] >= 0:
positive_isocontour = sorted_points[i,1]
if sorted_points[i,1] < 0:
negative_isocontour = sorted_points[i,1]
sum += sorted_points[i,0] / sum_weight
if sum > cumulative_threshold:
break
values = [positive_isocontour, negative_isocontour]
return values, cumulative_threshold
def write_cube_file(wfn, O, N, D, nxyz, npoints, v, name, header):
#Reorder the grid
v2 = np.zeros_like(v)
offset = 0
for istart in range(0, int(N[0]+1), nxyz):
ni = int(N[0]) - istart if istart + nxyz > N[0] else nxyz
for jstart in range(0, int(N[1] + 1), nxyz):
nj = int(N[1]) - jstart if jstart + nxyz > N[1] else nxyz
for kstart in range(0, int(N[2] + 1), nxyz):
nk = int(N[2]) - kstart if kstart + nxyz > N[2] else nxyz
for i in range(istart, istart + ni):
for j in range(jstart, jstart + nj):
for k in range(kstart, kstart + nk):
index = i * (N[1]) * (N[2]) + j * (N[2]) + k
v2[int(index)] = v[offset]
offset += 1
f = open(F"./{name}.cube","w+")
f.write("Psi4Numpy Gaussian Cube File. \n")
f.write(F"Property: {name}")
f.write(header)
f.write(F"{wfn.molecule().natom()} {format(O[0], '1.5f')} {format(O[1], '1.5f')} {format(O[2], '1.5f')} \n")
f.write(F" {int(N[0])} {D[0]} 0.0 0.0 \n")
f.write(F" {int(N[1])} 0.0 {D[1]} 0.0 \n")
f.write(F" {int(N[2])} 0.0 0.0 {D[2]} \n")
for atom in range(wfn.molecule().natom()):
f.write(F"{wfn.molecule().true_atomic_number(atom)} 0.0 {format(wfn.molecule().x(atom), '8.5f')} {format(wfn.molecule().y(atom), '8.5f')} {format(wfn.molecule().z(atom), '8.5f')} \n")
for i in range(int(npoints)):
f.write(format(v2[i], '1.5e'))
f.write(" ")
if i%6 == 5:
f.write("\n")
f.close()
def compute_density(wfn, O, N, D, npoints, points, nxyz, block, matrix, name=None, write_file=False):
v = add_density(npoints, points, block, matrix)
isocontour_range, threshold = compute_isocontour_range(v, npoints)
density_percent = 100.0 * threshold
header = F"""[e/a0^3]. Isocontour range for {density_percent} of the density ({format(isocontour_range[0], '1.5f')},{format(isocontour_range[1],'1.5f')}) \n"""
if write_file is False:
v2 = np.zeros_like(v)
offset = 0
for istart in range(0, int(N[0] + 1), nxyz):
ni = int(N[0]) - istart if istart + nxyz > N[0] else nxyz
for jstart in range(0, int(N[1] + 1), nxyz):
nj = int(N[1]) - jstart if jstart + nxyz > N[1] else nxyz
for kstart in range(0, int(N[2] + 1), nxyz):
nk = int(N[2]) - kstart if kstart + nxyz > N[2] else nxyz
for i in range(istart, istart + ni):
for j in range(jstart, jstart + nj):
for k in range(kstart, kstart + nk):
index = i * (N[1]) * (N[2]) + j * (N[2]) + k
v2[int(index)] = v[offset]
offset += 1
return np.reshape(v2, (int(N[0]), int(N[1]), int(N[2])))
else:
if name is None:
write_cube_file(wfn, O, N, D, nxyz, npoints, | |
<reponame>fzi-forschungszentrum-informatik/P3IV
# This file is part of the P3IV Simulator (https://github.com/fzi-forschungszentrum-informatik/P3IV),
# copyright by FZI Forschungszentrum Informatik, licensed under the BSD-3 license (see LICENSE file in main directory)
import numpy as np
import math
import matplotlib.image as mpimg
from matplotlib.transforms import Affine2D
from matplotlib.patches import Polygon, PathPatch
from matplotlib.path import Path
from matplotlib.collections import PatchCollection
from p3iv_utils_polyvision_pyapi.pypolyvision import VisibleArea, checkInside
from p3iv_utils_polyvision.fov_wedge import generateFoVWedge
precision = 6
class Car(object):
"""
A Car
"""
def __init__(self, fieldOfViewContour, position, yawAngle):
self._fieldOfView = fieldOfViewContour
self._position = np.array([0, 0])
self._yawAngle = 0
self.updateCarPose(position, yawAngle)
@property
def position(self):
"""The position of the Car as 1x2 NumpyArray"""
return self._position
# no setter: use updateCarPose
# @position.setter
# def position(self, newPos):
# if newPos.shape == (1, 2):
# self._position = newPos
# self._fov = affineTransformationOfPoylgonList(
# self._fieldOfView, self._yawAngle, self._position, precision-3)
@property
def yawAngle(self):
"""The yaw angle of the car"""
return self._yawAngle
# no setter: use updateCarPose
# @yawAngle.setter
# def yawAngle(self, newYawAngle):
# self._yawAngle = newYawAngle
# self._fov = affineTransformationOfPoylgonList(
# self._fieldOfView, self._yawAngle, self._position, precision-3)
@property
def fieldOfView(self):
"""The field of view contour"""
return self._fieldOfView
# @fieldOfView.setter
# def fieldOfView(self, newFieldOfViewContour):
# self._fieldOfView = newFieldOfViewContour
def updateCarPose(self, deltaPosition, deltaYawAngle):
self._position += deltaPosition
self._yawAngle += deltaYawAngle
# affine transformation of field of view
self._fieldOfView = affineTransformationOfPoylgonList(
self._fieldOfView, deltaYawAngle, deltaPosition, precision - 1
)
def getVisibleArea(self, perceptedPolygons):
# create visibleArea object for calculations
visA = VisibleArea(self._position, self._fieldOfView, perceptedPolygons)
# calculate visible area
visA.calculateVisibleArea()
results = [visA.getFieldsOfView(), visA.getOpaquePolygons(), visA.getVisibleAreas(), visA.getNonVisibleAreas()]
return results
def getVisibilityBorder(self, perceptedPolygons, centerline):
# create visibleArea object for calculations
visA = VisibleArea(self._position, self._fieldOfView, perceptedPolygons)
# calculate visible area
return visA.getVisibilityBorder(centerline)
def affineTransformationOfPoylgonList(polylist, angle, offset, precision):
for i in range(0, len(polylist)):
polylist[i] = affineTransformationOfPolygon(polylist[i], angle, offset, precision)
return polylist
def affineTransformationOfPolygon(polygon, angle, offset, precision):
def pointTrafo(p):
return affineTransformation(p, angle, offset, precision)
# apply transformation on each point in the polygon (each line in the numpyArray)
transformedPolygon = np.apply_along_axis(pointTrafo, 1, polygon)
return transformedPolygon
def affineTransformation(point, angle, offset, precision):
c = math.cos(math.radians(angle))
s = math.sin(math.radians(angle))
rotateMatrix = np.array([[c, -s], [s, c]])
p = np.dot(rotateMatrix, point)
p += offset
p = np.round(p, precision)
return p
def generatePolygonPatchCollection(listOfNumpyPolygons, colorV="blue", alphaV=0.4):
polygons = []
for p in listOfNumpyPolygons:
polygons.append(Polygon(p, True))
return PatchCollection(polygons, alpha=alphaV, color=colorV)
def main():
# plotting
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
# ax.autoscale_view()
ax.set_xlim(-15, 15)
ax.set_ylim(-15, 15)
ax.set_aspect("equal")
plt.autoscale(False)
plt.grid()
plt.show()
def testFoVWedge():
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
opening_angle = 90
visible_range = 4
fov1 = generateFoVWedge(opening_angle, visible_range, directionAngle=90)
fov2 = generateFoVWedge(135, 2, directionAngle=270)
fovs = [fov1, fov2]
fovPatchCol = generatePolygonPatchCollection(fovs)
ax.add_collection(fovPatchCol)
print(checkInside(np.array([0, 2]), list(fov1)))
# ax.autoscale_view()
ax.set_xlim(-12, 12)
ax.set_ylim(-12, 12)
ax.set_aspect("equal")
plt.autoscale(False)
plt.grid()
plt.show()
def testVisualisation0():
origin = np.array([0, 0])
fov1 = np.array([[0, 0], [-4, 10], [4, 10]])
fov2 = np.array([[0, 0], [-6, -4], [6, -4]])
fov = [fov1, fov2]
obs1 = np.array([[-1, 5], [1, 5], [0, 7]])
# obs1 = np.array([[-3, 5],
# [-0.5, 5],
# [-1, 7]])
# obs1 = np.array([[-3, 5],
# [-1, 5],
# [-2, 7]])
obs1 = obs1 + np.array([0, 0])
obs = [obs1]
# obs = []
# plot results
car = Car(fov, origin, 0)
genericTestPlot(obs, car)
def testVisualisation1():
# initialization car
fov1 = generateFoVWedge(40, 10, directionAngle=0)
fov2 = generateFoVWedge(140, 4, directionAngle=180)
fov3 = generateFoVWedge(25, 7, directionAngle=-30)
fov4 = generateFoVWedge(25, 7, directionAngle=30)
fov5 = generateFoVWedge(40, 7, directionAngle=180)
fov = [fov1, fov2, fov3, fov4, fov5]
car = Car(fov, np.array([0, 0]), 0)
car.updateCarPose(np.array([0, 0]), -90)
# initialization obstacles
p1 = np.array([[-1, 5], [1, 5], [0, 8]])
p2 = np.array([[-5, 5], [-3, 5], [-3, 3], [-5, 3]])
p3 = np.array([[-2.2, -3.8], [-0.8, -2.2], [-1.5, -6], [-2.5, -4.5], [-3, -3]])
p4 = np.array(
[
[8, 4],
[8, 2],
[5, 0],
[10, 1],
[9.333333, 0], # this is the intersection point
[8, -2],
[7, -4],
[11, -1],
[11, 2],
]
)
p5 = np.array([[-4, 0], [-6, 0], [-6, -1.5]])
obs = [p1, p2, p3, p4, p5]
# plot results
genericTestPlot(obs, car)
def testVisualisation2Shadowing():
# initialize car
fov1 = np.array([[0, 0], [2, 12], [14, 8], [14, -8], [2, -12]])
fov = [fov1]
car = Car(fov, np.array([0, 0]), 0)
car.updateCarPose(np.array([0, 0]), 0)
# initialize obs
p1 = np.array(
[[2, 7], [6, 3], [2, 5], [2, 1], [4, 3], [4, -2], [2, -1], [2, -4], [7, -1], [2, -8], [9, -8], [9, 7]]
)
obs = [p1]
# plot results
genericTestPlot(obs, car)
def testVisualisation3():
# initialize car
fov1 = np.array([[0, 0], [2, 12], [14, 8], [14, -8], [2, -12]])
fov = [fov1]
car = Car(fov, np.array([0, 0]), 0)
car.updateCarPose(np.array([0, 0]), 0)
# initialize obs
p1 = np.array(
[
[2, 3],
[3, 3],
[3, 4],
[3, 6],
[5, 6],
[6, 6],
[6, 3],
[6, 0],
[6, -2],
[4, -2],
[3, -2],
[3, 1],
[2, 1],
[2, -3],
[7, -3],
[7, 7],
[2, 7],
]
)
# p1 = affineTransformationOfPolygon(p1,0,np.array([0,-2]),precision)
obs = [p1]
# plot results
genericTestPlot(obs, car)
def testVisualisation4OriginInsideNonConvexPoly():
# fov1 = generateFoVWedge(40, 10, 0.7, 20, 0)
# fov2 = generateFoVWedge(140, 4, 0.9, 20, 180)
# fov3 = generateFoVWedge(25, 7, 0.7, 10, -30)
# fov4 = generateFoVWedge(25, 7, 0.7, 10, 30)
# fov5 = generateFoVWedge(40, 7, 0.7, 20, 180)
# fov = [fov1, fov2, fov3, fov4, fov5]
fov1 = generateFoVWedge(50, 10, 0.7, 1)
fov = [fov1]
car = Car(fov, np.array([0, 0]), 0)
# initialization obstacles
p1 = np.array(
[
[2, 3],
[3, 3],
[3, 4],
[3, 6],
[5, 6],
[6, 6],
[6, 3],
[6, 0],
[6, -2],
[4, -2],
[3, -2],
[3, 1],
[2, 1],
[2, -3],
[7, -3],
[7, 7],
[2, 7],
]
)
p1 = affineTransformationOfPolygon(p1, 0, np.array([-4, -2]), precision)
obs = [p1]
# plot results
genericTestPlot(obs, car)
def testVisualisation5OriginInsideNonConvexPoly():
# initialization car
fov1 = generateFoVWedge(50, 10, 0.7, 1)
fov = [fov1]
car = Car(fov, np.array([0, 0]), 0)
# initialization obstacles
p1 = np.array([[-4, 0], [-5, 3], [1, 6], [6, 3], [0, -3], [-2, -3], [-2, -2], [-1, -2], [2, 2], [-1, 2]])
p1 = affineTransformationOfPolygon(p1, 0, np.array([0, 0]), precision)
obs = [p1]
# plot results
genericTestPlot(obs, car)
def testVisualisation6OriginInsideNonConvexPoly():
# initialization car
fov1 = generateFoVWedge(50, 10, 0.7, 1)
fov = [fov1]
car = Car(fov, np.array([0, 0]), 0)
# initialization obstacles
p1 = np.array([[-1, 2], [-4, 0], [-5, 3], [1, 6], [6, 3], [0, -3], [-2, -3], [-4, -1], [-1, -2], [2, 2]])
p1 = affineTransformationOfPolygon(p1, 0, np.array([0, 0]), precision)
obs = [p1]
# plot results
genericTestPlot(obs, car)
def testVisualisation7OriginInsideObstacle():
origin = np.array([0, 0])
fov1 = np.array([[0, 0], [-4, 10], [4, 10]])
fov2 = np.array([[0, 0], [-6, -4], [6, -4]])
fov = [fov1, fov2]
obs1 = np.array([[-1, 5], [1, 5], [0, -7]])
obs = [obs1]
# plot results
car = Car(fov, origin, 0)
genericTestPlot(obs, car)
def testVisualisation8OriginOutsideFov():
origin = np.array([0, 0])
fov1 = np.array([[0, 3], [-4, 10], [4, 10]])
# fov2 = np.array([[0, 0],
# [-6, -4],
# [6, -4]])
# fov = [fov1, fov2]
fov = [fov1]
obs1 = np.array([[-1, 5], [1, 5], [0, 7]])
obs = [obs1]
# plot results
car = Car(fov, origin, 0)
genericTestPlot(obs, car)
def testVisualisation9EmptyFov():
origin = np.array([0, 0])
fov = []
obs1 = np.array([[-1, 5], [1, 5], [0, 7]])
obs = [obs1]
# plot results
car = Car(fov, origin, 0)
genericTestPlot(obs, car)
def testAnimation1():
# initialization car
fov1 = generateFoVWedge(40, 10, directionAngle=0)
fov2 = generateFoVWedge(140, 4, directionAngle=180)
fov3 = generateFoVWedge(25, 7, directionAngle=-30)
fov4 = generateFoVWedge(25, 7, directionAngle=30)
fov5 = generateFoVWedge(40, 7, directionAngle=180)
fov = [fov1, fov2, fov3, fov4, fov5]
car = Car(fov, np.array([0, 0]), 0)
# initialization obstacles
p1 = np.array([[-1, 5], [1, 5], [0, 8]])
p2 = np.array([[-5, 5], [-3, 5], [-3, 3], [-5, 3]])
p3 = np.array([[-2.2, -3.8], [-0.8, -2.2], [-1.5, -6], [-2.5, -4.5], [-3, -3]])
p4 = np.array([[8, 4], [8, 2], [5, 0], [10, 1], [8, -2], [7, -4], [11, -1], [11, 2]])
p5 = np.array([[-4, 0], [-6, 0], [-6, -1.5]])
obs = [p1, p2, p3, p4, p5]
genericTestAnimation(obs, car)
def testLine():
# initialization car
fov1 = generateFoVWedge(40, 10, directionAngle=0)
fov2 = generateFoVWedge(140, 4, directionAngle=180)
fov3 = generateFoVWedge(25, 7, directionAngle=-30)
fov4 = generateFoVWedge(25, 7, directionAngle=30)
fov5 = generateFoVWedge(40, 7, directionAngle=180)
fov = [fov1, fov2, fov3, fov4, | |
######################################################
# File: fitch.py #
# Author: <NAME> (<EMAIL>) #
# Project: Fitch Proof Automation with State-Search #
# Final Project for CS221: Artificial Intelligence #
######################################################
import util
import random
########################
# Helper Functions #
########################
# Returns a copy of the sentence with extra outer parentheses removed if they exist.
# The general structure of this algorithm is from Gareth Rees on stackoverflow.com:
# http://stackoverflow.com/questions/4284991/parsing-nested-parentheses-in-python-grab-content-by-level
def stripOuterParens(sentence):
if sentence == "": return sentence
start = 0
end = len(sentence)
stack = []
pairs = {}
for i, char in enumerate(sentence):
if char == '(':
stack.append(i)
elif char == ')' and stack:
pairs[stack.pop()] = i
for i in range(0, end):
if i not in pairs.keys() or pairs[i] != (end - 1):
return sentence[start : end]
start = i + 1
end = pairs[i]
raise Exception("Execution should never reach here.")
# Returns True if the parentheses in the string are balanced and False otherwise.
def parensBalanced(string):
stack = []
for i, char in enumerate(string):
if char == '(':
stack.append(i)
elif char == ')':
if not stack: return False
stack.pop()
if stack: return False
return True
# Returns (isImplication, antecedent, consequent) where:
# * isImplication is a boolean specifying whether statement has an outermost implication
# * antecedent is the antecedent of such an implication if it exists
# * consequent is the consequent of such an implication if it exists
def processImplication(sentence):
imp_index = sentence.find(" -> ")
# Scans the string, looking for "phi -> psi" were phi and psi are properly formatted.
# Takes the first example found since implications generally bind to the left.
while imp_index > 0:
antecedent = sentence[:(imp_index)]
consequent = sentence[(imp_index + 4):]
if '(' in antecedent or ')' in antecedent:
if not parensBalanced(antecedent):
imp_index += 4 + sentence[(imp_index + 4):].find(" -> ")
continue
antecedent = stripOuterParens(antecedent.strip())
if '(' in consequent or ')' in consequent:
if not parensBalanced(consequent):
imp_index += 4 + sentence[(imp_index + 4):].find(" -> ")
continue
consequent = stripOuterParens(consequent.strip())
return (True, antecedent, consequent)
return (False, None, None)
################################################
# Fitch Proof Search Problem Formalization #
################################################
# Defines finding a proof in the Fitch system as a search problem
# SearchProblem class from CS221: Artificial Intelligence assignment 3: Text Reconstruction
class FitchProblem(util.SearchProblem):
# @param premises = list [] of statements using the supplied symbolic conventions
# @param goal = a statement to be proved, written using the supplied symbolic convenctions
def __init__(self, premises, goal, symbolSet, statementSet, connectiveSet):
self.premises = premises
self.goal = goal
self.symbols = symbolSet
self.statementSet = statementSet
self.connectiveSet = connectiveSet
# Defines the start state of the search graph given the premises, goal, and symbols
# The start state is a proof consisting only of premises at assumption level 0.
def startState(self):
# A state is defined by a list of tuples, each tuple containing a true statement, its justification, and its proof depth
statements = []
for premise in self.premises:
statements.append((premise, "Premise", 0))
# The full form of a state is tuple([list of statement, justification, depth tuples], sub-proof level).
# Subproof level is initially 0.
return (tuple(statements), 0)
# Defines the end state of the search graph
# The end state is defined to be any proof which contains the goal as a non-premise at sub-proof level 0
def isEnd(self, state):
statements = list(state[0])
if len(statements) == 0: return False
lastStatement = statements[len(statements) - 1]
# For a state to be the end state, must contain the goal and be at base level (not in a subproof)
if lastStatement[0] == self.goal and lastStatement[1] != "Premise" and lastStatement[2] == 0:
return True
return False
# Defines the successor state and costs of the given state, represnting a partial proof
# Successor states are a proof with an added set of lines generated by using one of the Fitch
# rules of inference.
# The proof generator only considers proof steps on symbols contained in the symbol set.
# For the prototype, all paths will have the same cost.
#
# @return a list of possible (action, newState, cost) tuples representing successor states.
# The return type is: list of (string, [(sentence, justification, depth), (sentence, justification, depth), etc.], int) tuples
def succAndCost(self, state):
# Occassionally prints the current state being searched (for testing)
'''
if random.random() > 0.9995:
print state
'''
results = []
allStatements = list(state[0]) # (sentence, justification, depth) tuples
proofDepth = state[1] # the subproof depth of the last statement in the proof
whitespace = ""
for _ in range(proofDepth):
whitespace += " "
# Extract the statements that matter from the list of all statements made in the proof so far
# This includes all statements at level 0 and anything in the scope of the current subproof
# Also makes a list of the sentences alone to help check whether we're being redundent
statements = []
sentences = []
maxDepthAllowed = proofDepth
for i, statement in reversed(list(enumerate(allStatements))):
depth = statement[2]
if depth < maxDepthAllowed:
maxDepthAllowed -= 1
elif depth > maxDepthAllowed:
continue
statements.append(statement)
sentences.append(statement[0])
# Puts all the statements back into the correct ordering
statements.reverse()
# An edge case of sorts: if we already have the answer but as a premise, just reiterate it and move on.
if self.goal in sentences:
succStatements = list(state[0])
succStatements.append((self.goal, "R", state[1]))
succState = (tuple(succStatements), state[1])
results.append((whitespace + "Reiteration: " + self.goal, succState, 1))
return results
# Assumptions
def Acost(depth):
if depth <= 2: return max(1, depth)
return 2**(depth)
# Bias argument allows adjustment of cost if, say, we know the assumption is probably a good idea.
def addAssumption(symbol, bias = 1):
cost = Acost(state[1] + 1) * bias
succStatements = list(state[0])
succStatements.append((symbol, "A", state[1] + 1))
succState = (tuple(succStatements), state[1] + 1)
A_whitespace = " " + whitespace
results.append((A_whitespace + "Assumption: " + symbol, succState, cost))
# Tries to assume the opposite (of a non-implication) or the antecedent (of an implication)
isImplication = False
assumedSomething = False
for sentence in self.statementSet:
if len(allStatements) <= len(self.statementSet) and sentence != self.goal:
if "->" in sentence:
isImplication, antecedent, consequent = processImplication(sentence)
if isImplication:
if sentence not in sentences:
addAssumption(sentence, 0.25)
assumedSomething = True
if antecedent not in sentences:
addAssumption(antecedent, 0.25)
assumedSomething = True
# The following ended up being too specific a strategy for the general problem, so I've omitted it.
'''
if not isImplication:
if ("&&" in sentence or "||" in sentence or "->" in sentence):
addAssumption("~(" + sentence + ")", 0.75)
assumedSomething = True
else:
addAssumption("~" + sentence, 0.75)
assumedSomething = True
'''
# Assumptions of single propositional constants and their negations
for symbol in self.symbols:
cost = 3 if assumedSomething else 1
addAssumption(symbol, cost)
addAssumption("~" + symbol, cost)
# Used for And Introduction and And Elimination
if "&&" in self.connectiveSet:
atoms = set()
conjuncts = set()
# Used for Or Elimination
if "||" in self.connectiveSet:
disjuncts = set()
# Gathers implications -- Used for Negation Introduction (and Or Elimination, if applicable)
phi_to_psi = {} # Dict from string to list(string) representing phi to all psi
phi_to_not_psi = {} # Dict from string to list(string) representing phi to all not psi
# Most of the rules of inference are covered or prepped for within this for loop.
for statement in statements:
if "&&" in self.connectiveSet:
# And Introduction
sentence = statement[0]
atoms.add(sentence)
# And Elimination
sentenceCopy = statement[0]
and_index = sentenceCopy.find(" && ")
while and_index != -1:
conjunct = sentenceCopy[:(and_index)]
# If the parentheses are balanced in the string it is assumed to be a proper conjunct
if parensBalanced(conjunct):
conjuncts.add(conjunct)
sentenceCopy = sentenceCopy[(and_index + 4):]
and_index = sentenceCopy.find(" && ")
# Loop and a half
conjunct = sentenceCopy
if conjunct != "":
if parensBalanced(conjunct):
conjuncts.add(conjunct)
if "||" in self.connectiveSet:
# Or Introduction
sentence = statement[0]
def addDisjunction(disjunction):
# The cost below determines the efficiency of the algorithm to a large degree
cost = 1 if (disjunction == self.goal) else len(disjunction)
succStatements = list(state[0])
succStatements.append((disjunction, "OI", state[1]))
succState = | |
<filename>Model/LowLevelFunctions.py
import json
import mysql.connector
import copy
import ast
import datetime
import Database_credentials as dc
import os
import traceback
import bz2
from Item import Item
import pandas as pd
from threading import Thread
class LowLevelFunctions:
def __init__(self, resources, map_info=None):
self.resources = resources
self.map_info = [] if map_info is None else map_info
self.disc_zaaps = self.get_discovered_zaaps()
def load_map_info(self):
corners = [(0, 0), (1, 0), (0, 1), (0, 2), (13, 0), (12, 1), (13, 1), (13, 2), (13, 37), (13, 38), (12, 39),
(13, 39), (0, 37), (0, 38), (1, 38), (0, 39)]
for map in self.resources.full_map_info:
for pos in corners:
map['cells'][pos[1]][pos[0]] = 2
return self.resources.full_map_info
def get_item_iconid(self, item_id):
i = 0
while i < len(self.resources.full_item_names):
if self.resources.full_item_names[i]['id'] == item_id:
return self.resources.full_item_names[i]['iconId']
i += 1
def cell2coord(self, cell):
return cell % 14 + int((cell//14)/2+0.5), (13 - cell % 14 + int((cell//14)/2))
def coord2cell(self, coord):
i = 0
result = self.cell2coord(i)
while result != coord:
i += 1
result = self.cell2coord(i)
return i
def distance_coords(self, coord_1, coord_2):
return ((coord_2[0]-coord_1[0])**2 + (coord_2[1]-coord_1[1])**2)**0.5
def closest_coord(self, coord, coord_list):
closest = coord_list[0], self.distance_coords(coord, coord_list[0])
for coord_close in coord_list:
if self.distance_coords(coord, coord_close) < closest[1]:
closest = coord_close, self.distance_coords(coord, coord_close)
return closest[0]
def distance_cell(self, cell_1, cell_2):
return self.distance_coords(self.cell2coord(cell_1), self.cell2coord(cell_2))
def closest_cell(self, cell, cell_list):
closest = cell_list[0], self.distance_cell(cell, cell_list[0])
for cell_close in cell_list:
if self.distance_cell(cell, cell_close) < closest[1]:
closest = cell_close, self.distance_cell(cell, cell_close)
return closest[0]
def get_neighbour_cells(self, cell):
neighbours = []
for i in range(560):
if self.distance_cell(cell, i) == 1:
neighbours.append(i)
return neighbours[:]
def get_walkable_neighbour_cells(self, cell, map_coords, worldmap):
walkable_neighbours = []
for neighbour in self.get_neighbour_cells(cell):
if self.flatten_map(self.coord_fetch_map('{};{}'.format(map_coords[0], map_coords[1]), worldmap))[neighbour] == 0:
walkable_neighbours.append(neighbour)
return walkable_neighbours[:]
def get_closest_walkable_neighbour_cell(self, target_cell, player_cell, map_coords, worldmap):
walkable_neighbours = self.get_walkable_neighbour_cells(target_cell, map_coords, worldmap)
if walkable_neighbours:
closest = walkable_neighbours[0], 10000
else:
return False
for walkable_neighbour in walkable_neighbours:
if self.distance_cell(walkable_neighbour, player_cell) < closest[1]:
closest = walkable_neighbour, self.distance_cell(walkable_neighbour, player_cell)
if closest[1] < 10000:
return closest[0]
return False
def get_closest_walkable_cell(self, target_cell, map_coords, worldmap):
map_info = self.flatten_map(self.coord_fetch_map('{};{}'.format(map_coords[0], map_coords[1]), worldmap))
closest = None, 2000
for n_tile in range(len(map_info)):
if (0 < self.distance_cell(target_cell, n_tile) < closest[1]) and map_info[n_tile] == 0:
closest = n_tile, self.distance_cell(target_cell, n_tile)
return closest[0]
def coord_fetch_map(self, coord, worldmap):
# print('Fetching : {}'.format(coord))
if not self.map_info:
self.map_info = self.load_map_info()
maps = []
for map in self.map_info:
if map['coord'] == coord and map['worldMap'] == worldmap:
maps.append(map)
if len(maps) == 1 and maps[0] is not None:
return maps[0]['cells']
elif len(maps) > 1:
for map in maps:
if map['hasPriorityOnWorldMap']:
return map['cells']
def flatten_map(self, map):
flattened = []
for line in map:
flattened += line
return flattened
def get_next_clue_pos(self, clue, current_pos, direction):
clue_possible_pos = self.resources.clues[clue.lower()]
direction_vector = {'n': (0, -1), 's': (0, 1), 'w': (-1, 0), 'e': (1, 0)}[direction]
found, i, checking_pos = False, 1, current_pos
while not found and i <= 10:
checking_pos = [checking_pos[j] + direction_vector[j] for j in range(2)]
if checking_pos in clue_possible_pos:
found = checking_pos
i += 1
if found:
return found
else:
raise RuntimeError('Non existing clue : {}, going {} from {}'.format(clue, direction, current_pos))
def add_discovered_zaap(self, bot_name, zaap_pos):
if list(zaap_pos) in self.resources.zaaps:
if bot_name in self.disc_zaaps.keys():
if list(zaap_pos) not in self.disc_zaaps[bot_name]:
self.disc_zaaps[bot_name].append(zaap_pos)
else:
self.disc_zaaps[bot_name] = [zaap_pos]
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
cursor.execute("""UPDATE BotAccounts SET zaaps='{}' WHERE name='{}'""".format(self.disc_zaaps[bot_name], bot_name))
conn.commit()
conn.close()
def get_discovered_zaaps(self, bot_name=None):
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
if bot_name is not None:
cursor.execute("""SELECT zaaps FROM BotAccounts WHERE name='{}'""".format(bot_name))
conn.close()
zaaps = []
for row in cursor:
zaaps = row[0]
if zaaps:
zaaps = ast.literal_eval(zaaps)
else:
zaaps = []
self.disc_zaaps[bot_name] = copy.deepcopy(zaaps)
return zaaps
else:
cursor.execute("""SELECT zaaps, name FROM BotAccounts""")
conn.close()
zaaps = {}
for row in cursor:
if row[0]:
zaaps[row[1]] = ast.literal_eval(row[0])
else:
zaaps[row[1]] = []
return copy.deepcopy(zaaps)
def get_closest_known_zaap(self, bot_name, pos, forbid=[]):
if bot_name in self.disc_zaaps.keys():
disc_zaaps = self.disc_zaaps[bot_name]
else:
return None
closest = None, 100000
for zaap_pos in disc_zaaps:
if self.distance_coords(pos, zaap_pos) < closest[1] and zaap_pos not in forbid:
closest = zaap_pos, self.distance_coords(pos, zaap_pos)
return closest[0]
def get_closest_unknown_zaap(self, bot_name, pos):
disc_zaaps = []
if bot_name in self.disc_zaaps.keys():
disc_zaaps = self.disc_zaaps[bot_name]
else:
self.disc_zaaps[bot_name] = self.get_discovered_zaaps(bot_name)
zaaps = copy.deepcopy(self.resources.zaaps)
for disc_zaap in disc_zaaps:
del zaaps[zaaps.index(disc_zaap)]
closest = False, 100000
for zaap_pos in zaaps:
if self.distance_coords(pos, zaap_pos) < closest[1]:
closest = zaap_pos, self.distance_coords(pos, zaap_pos)
return closest[0]
def format_worn_stuff(self, inventory):
worn_repr = []
for item in inventory.items:
if item[4] != 63:
worn_repr.append([self.get_item_iconid(item[1]), str(item[1]) + '-' + item[0].replace(' ', '-').replace("'", '')])
return str(worn_repr).replace("'", '"')
def get_inventory_id(self, inventory, general_id):
inv_id = 0
for item in inventory:
if item[1] == general_id:
inv_id = item[2]
return inv_id
def get_number_of_item_in_inventory(self, inventory, general_id):
number = 0
for item in inventory:
if item[1] == general_id:
number = item[3]
return number
def get_weight_of_item_in_inventory(self, inventory, general_id):
weight = 0
for item in inventory:
if item[1] == general_id:
weight = item[5]
return weight
def get_map_dd_tool(self, position):
with open('../Utils/ddTools.json', 'r') as f:
tools = json.load(f)
return tools[str(tuple(position))]
def score_dds(self, dd_list):
n_male, n_female, n_repro = 0, 0, 0
for dd in dd_list:
if dd.sex == 'male':
n_male += 1
else:
n_female += 1
for dd in dd_list:
dd.score = 0
if dd.maturity > 0:
dd.score += 1
if dd.maturity == 100:
dd.score += 1
if dd.level == 5:
dd.score += 1
if dd.fecondation_time != -1:
dd.score += 5
if dd.is_fecondation_ready:
dd.score += 5
if 'Reproductrice' in dd.behaviours:
# Heavily favor this trait so that eventually all DDs are repro.
dd.score += 50
if n_female < n_male and dd.sex == 'female':
dd.score += 1
if n_female > n_male and dd.sex == 'male':
dd.score += 1
if dd.name.lower() == 'bot-mobile':
dd.score += 100
def get_bot_mobile(self, dd_list):
bm_id = False
for dd in dd_list:
if dd['name'].lower() == 'bot-mobile':
bm_id = dd['id']
return bm_id
def add_bot_db(self, username, password, name, server):
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
put = (username, password, name, server, '[]', '[]', '[]')
cursor.execute("""SELECT * FROM BotAccounts WHERE username = %s""", (username,))
things = []
for thing in cursor:
things.append(thing)
if not things:
cursor.execute("""INSERT INTO BotAccounts (username, password, name, server, zaaps, stuff, stats) VALUES (%s, %s, %s, %s, %s, %s, %s)""", put)
conn.commit()
conn.close()
def update_db(self, bot_id, server, name, kamas, level, occupation, current_map='OFFLINE', worldmap=1):
try:
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
put = (bot_id, server, name, kamas, level, occupation, str(current_map), worldmap)
cursor.execute("""INSERT INTO Bots (BotId, Server, Name, Kamas, Level, Occupation, Pos, Worldmap) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""", put)
conn.commit()
conn.close()
except Exception:
print('Could not upload')
with open('../Utils/DatabaseErrorLog.txt', 'a') as f:
f.write('\n\n' + str(datetime.datetime.now()) + '\n')
f.write(traceback.format_exc())
def get_schedule(self, bot_name):
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>, database=dc.database)
cursor = conn.cursor()
cursor.execute("""SELECT schedule FROM BotAccounts WHERE name='{}'""".format(bot_name))
schedule = ''
conn.close()
for row in cursor:
schedule = row[0]
if schedule:
schedules = [ast.literal_eval(schedule)]
else:
schedules = self.resources.default_schedule
schedule = []
for schedule_curr in schedules:
if schedule_curr['idx'] == 0:
schedule = schedule_curr['tasks']
if schedule:
return schedule
else:
raise RuntimeError('Error fetching schedule')
def get_mount_situation(self, bot_name):
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
cursor.execute("""SELECT mount FROM BotAccounts WHERE Name='{}'""".format(bot_name))
conn.close()
mount_situation = None
for row in cursor:
mount_situation = row[0] if row[0] else None
return mount_situation
def set_mount_situation(self, bot_name, situation):
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
cursor.execute("""UPDATE BotAccounts SET mount='{}' WHERE name='{}'""".format(situation, bot_name))
conn.commit()
conn.close()
def log(self, bot, message):
t = Thread(target=self.actual_log, args=(bot, message))
t.start()
t.join()
def actual_log(self, bot, message):
name = bot.credentials['name']
color = bot.interface.color
print(color + message + '\033[0m')
with open('../Utils/BotsLogs/{}.txt'.format(name), 'a') as f:
f.write(str(datetime.datetime.now()) + ' ' + message + '\n')
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
database=dc.database)
cursor = conn.cursor()
try:
if bot.characteristics is not None:
cursor.execute("""UPDATE BotAccounts SET position='{}', stuff='{}', stats='{}', subLeft='{}' WHERE name='{}'""".format(list(bot.position[0]), self.format_worn_stuff(bot.inventory), str(bot.characteristics).replace("'", "''"), bot.subscribed, name))
else:
cursor.execute("""UPDATE BotAccounts SET position='{}' WHERE name='{}'""".format(list(bot.position[0]), name))
except TypeError as e:
# print(traceback.format_exc())
# print("Not uploading that")
pass
except Exception:
with open('../Utils/DatabaseErrorLog.txt', 'a') as f:
f.write('\n\n' + str(datetime.datetime.now()) + '\n')
f.write(traceback.format_exc())
conn.commit()
conn.close()
def push_log_file(self, file_path, logtype, compress=False):
try:
with open(file_path, 'r') as f:
contents = ''.join(f.readlines())
if contents:
try:
if compress:
contents = bz2.compress(contents)
conn = mysql.connector.connect(host=dc.host, user=dc.user, password=<PASSWORD>,
| |
<filename>morph_tools.py
import logging
import os.path
import numpy as np
from morphing import Morph
import cv2
from nilt_base.NILTlogger import get_logger
logger = get_logger("NILTlogger.morph_tool")
COLOR_LOOKUP = {"black": [0, 0, 0],
"white": [255, 255, 255],
"red": [255, 0, 0],
"green": [0, 255, 0],
"blue": [0, 0, 255]}
def make_image_rgb(image):
""" Takes a grayscale numpy image and makes it RGB
If image already has a 3'rd dim with size 3, it does nothing.
Parameters
----------
image : np.ndarray
Image to make RGB
Returns
-------
np.ndarray
array with shape (h,w,3)
"""
if image.ndim < 2 or image.ndim > 3:
raise ValueError(f"Number of dimensions should be 2 or 3, not {image.ndim}")
if image.ndim == 2:
return np.repeat(image[:, :, np.newaxis], 3, axis=2)
if image.shape[2] == 1:
return np.repeat(image, 3, axis=2)
elif image.shape[2] == 3:
return image
raise ValueError(f"Could not convert image with shape {image.shape}")
def load_image(file):
""" Load an image into a numpy array from file
Parameters
----------
file : str
Path to image file
Returns
-------
np.ndarray
"""
im = cv2.imread(file, cv2.IMREAD_COLOR)
return cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
def save_image(path, array, detect_range=True):
""" Save a numpy array as a RGB image.
Parameters
----------
path : str
Path where to save file
array : np.ndarray
image (as a numpy array) to save.
detect_range : bool (optional)
If it should be detected whether the scale is [0,1] or [0, 255], and tru to convert to [0, 255]
Returns
-------
"""
if detect_range:
if np.max(array) <= 1.0:
array *= 255
array = make_image_rgb(array)
succes = cv2.imwrite(path, cv2.cvtColor(array.astype(np.uint8), cv2.COLOR_RGB2BGR))
if not succes:
raise RuntimeError("Image not saved sucessfully")
else:
logging.debug(f"Successfully saved image to {path}")
def interpolate_pct(wanted, source, target):
if source > target:
raise ValueError("Source should be smaller than target value")
return (wanted - source) / (target - source)
def morph(source, target, steps, output_folder, **kwargs):
"""
Parameters
----------
source : str
Path to the source image
target : str
Path to the target image
steps :
Number of images wanted in the sequence
output_folder : str
Folder to save results to
kwargs
Keyword arguments passed to Morph class
Returns
-------
list
List of paths to PNG images generated
list
List of paths to Numpy arrays of images generated
"""
mc = Morph(output_folder=output_folder, **kwargs)
source = mc.load_image_file(source)
target = mc.load_image_file(target)
mc.produce_warp_maps(source, target)
png_image_paths, npy_image_paths = mc.use_warp_maps(source, target, steps)
return png_image_paths, npy_image_paths
def setup_morpher(source, target, output_folder, **kwargs):
"""
Parameters
----------
source : str or np.ndarray
Path to the source image or image as numpy array
target : str or np.ndarray
Path to the target image or image as numpy array
steps :
Number of images wanted in the sequence
output_folder : str
Folder to save results to
kwargs
Keyword arguments passed to Morph class
Returns
-------
Morph
Trained class for morphing
"""
# Prepare images
if isinstance(source, str):
source = load_image(source)
else:
source = make_image_rgb(source)
if isinstance(target, str):
target = load_image(target)
else:
target = make_image_rgb(target)
# Pad them to the same square size
source, target = pad_images_to_same_square(source, target, color="black")
src_name_padded = os.path.join(output_folder, "source_image_padded.png")
trg_name_padded = os.path.join(output_folder, "target_image_padded.png")
save_image(src_name_padded, source)
save_image(trg_name_padded, target)
im_size = source.shape[0]
mc = Morph(output_folder=output_folder, im_sz=im_size, **kwargs)
logger.info("Training model, this might take a while")
source = mc.load_image_file(src_name_padded)
target = mc.load_image_file(trg_name_padded)
mc.produce_warp_maps(source, target)
logger.info("Training Done")
return mc
def single_image_morpher(morph_class, morphed_dim, source_dim, target_dim, scale, save_images=True, name=""):
"""
Parameters
----------
save_images
morph_class : morphing.Morph
A trained instance of the Morph class.
morphed_dim : tuple
Tuple (height, width) in um, dimensions of the wanted morphed image
source_dim : tuple
Tuple (height, width) in um, dimensions of the original source image
target_dim : tuple
Tuple (height, width) in um, dimensions of the original target
scale : float
Resolutions of image as: um pr pixel
save_images : bool or string
Folder to save images to default folder from morph_class or a specific folder
name : str
Name to be used for the file along with dimensions
Returns
-------
np.ndarray
Morhped image
"""
for t in (morphed_dim, source_dim, target_dim):
assert isinstance(t, (tuple, list, np.ndarray)), f"Dimensions must be given as 2-tuples, not {t}"
assert len(t) == 2, f"Dimensions must have length 2, got a dimension of {t}"
height_pct = interpolate_pct(morphed_dim[0], source_dim[0], target_dim[0])
# width_pct = interpolate_pct(morphed_dim[1], source_dim[1], target_dim[1])
# if not np.isclose(width_pct, height_pct):
# logger.debug("Relative height and width placement is not close. Using relative height.")
height_pct = height_pct * 100
morphed_im = morph_class.generate_single_morphed(height_pct)
crop_im = crop_image_to_size(morphed_im, morphed_dim, scale)
if save_images:
if isinstance(save_images, str):
outdir = save_images
else:
outdir = os.path.join(morph_class.output_folder, "single_morphed")
if not os.path.exists(outdir):
os.mkdir(outdir)
if not name:
name = "single_morph"
name += f"_{height_pct:.1f}pct_{morphed_dim[0]:.3f}x{morphed_dim[1]:.3f}.png"
save_image(os.path.join(outdir, name), crop_im, detect_range=False)
return crop_im
def single_image_morpher_resize(morph_class, morphed_dim, source_dim, target_dim, scale, save_images=True, name=""):
"""
Parameters
----------
save_images
morph_class : morphing.Morph
A trained instance of the Morph class.
morphed_dim : tuple
Tuple (height, width) in um, dimensions of the wanted morphed image
source_dim : tuple
Tuple (height, width) in um, dimensions of the original source image
target_dim : tuple
Tuple (height, width) in um, dimensions of the original target
scale : float
Resolutions of image as: um pr pixel
save_images : bool or string
Folder to save images to default folder from morph_class or a specific folder
name : str
Name to be used for the file along with dimensions
Returns
-------
np.ndarray
Morhped image
"""
for t in (morphed_dim, source_dim, target_dim):
assert isinstance(t, (tuple, list, np.ndarray)), f"Dimensions must be given as 2-tuples, not {t}"
assert len(t) == 2, f"Dimensions must have length 2, got a dimension of {t}"
height_pct = interpolate_pct(morphed_dim[0], source_dim[0], target_dim[0])
# width_pct = interpolate_pct(morphed_dim[1], source_dim[1], target_dim[1])
# if not np.isclose(width_pct, height_pct):
# logger.debug("Relative height and width placement is not close. Using relative height.")
height_pct = height_pct * 100
morphed_im = morph_class.generate_single_morphed(height_pct)
crop_im = crop_image_to_size(morphed_im, (morphed_dim[0], morphed_dim[0]), scale)
vpx = int(np.ceil(morphed_dim[0] / scale))
hpx = int(np.ceil(morphed_dim[1] / scale))
re_im = cv2.cvtColor(cv2.resize(cv2.cvtColor(crop_im, cv2.COLOR_RGB2BGR), (hpx,vpx)), cv2.COLOR_BGR2RGB)
if save_images:
if isinstance(save_images, str):
outdir = save_images
else:
outdir = os.path.join(morph_class.output_folder, "single_morphed")
if not os.path.exists(outdir):
os.mkdir(outdir)
if not name:
name = "single_morph"
name += f"_{height_pct:.1f}pct_{morphed_dim[0]}x{morphed_dim[1]}.png"
save_image(os.path.join(outdir, name), re_im, detect_range=False)
return re_im
def crop_image_to_size(image, size, scale, pos="cc"):
"""
Parameters
----------
image : np.ndarray
Image as a numpy array
size : tuple
Tuple (height, width) of are to get height and width in um
scale : float
Resolutions of image as: um pr pixel
pos : str
Relative vertical position image, first character gives vertical position, second gives horisontal position
Vertical position should be [t]op, [c]enter or [b]ottom.
Horisontal position should be [l]eft, [c]enter or [r]ight
Returns
-------
np.ndarray
Cropped image
"""
assert isinstance(size, (tuple, int, np.ndarray)), f"Argument 'size' be a tuple, not {type(size).__name__}"
assert len(size) == 2, f"Argument 'size' must have length 2, not {len(size)}"
vpx = int(np.ceil(size[0] / scale))
hpx = int(np.ceil(size[1] / scale))
(vsize, hsize, csize) = image.shape
if (vpx, hpx) > (vsize, hsize):
logger.warning(f"At least one dimension of {(vpx, hpx)} is larger than {image.shape}, this dimension"
f"will limited to size of image.")
vpx = min(vpx, image.shape[0])
hpx = min(hpx, image.shape[1])
assert isinstance(pos, str), f"Argument 'pos' be a string, not {type(pos).__name__}"
assert len(pos) == 2, f"Argument 'pos' must have length 2, not {len(pos)}"
vpos = pos[0].lower()
hpos = pos[1].lower()
assert vpos in "tcb", f"First element of 'pos' must be either, 't', 'c' or 'b' not {vpos}"
assert hpos in "lcr", f"First element of 'pos' must be either, 'l', 'c' or 'r' not {vpos}"
vtop, vbot = _get_vpos_idx(vsize, vpx, vpos)
hleft, hright = _get_vpos_idx(hsize, hpx, hpos)
crop = image[vtop:vbot, hleft:hright, ...]
# Opposite of crop
# inverse = np.copy(image)
# inverse[vtop:vbot, hleft:hright, ...] = [0, 0, 255]
return crop
def pad_image_to_square(image, size=None, color="black", pos="cc"):
""" Pad an image to make it a square
Parameters
----------
image : np.ndarray
Image to pad
size : int (optional)
Size to pad to.
color : string or list
Color to pad with either a string "black", "red". Default: "black"
pos : str
Relative vertical position image, first character gives vertical position, second gives horisontal position
Vertical position should be [t]op, [c]enter or [b]ottom.
Horisontal position should be [l]eft, [c]enter or [r]ight
Returns
-------
| |
self.certificate is not None and self.certificate._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices.Device']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:devices'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.device is not None:
for child_ref in self.device:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Devices']['meta_info']
class Packages(object):
"""
SAM certificate information package
.. attribute:: package
SAM certificate information for a specific package
**type**\: list of :py:class:`Package <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Packages.Package>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.package = YList()
self.package.parent = self
self.package.name = 'package'
class Package(object):
"""
SAM certificate information for a specific
package
.. attribute:: package_name <key>
Specify package name
**type**\: str
.. attribute:: certificate_flags
Certificate flags
**type**\: :py:class:`CertificateFlags <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.Packages.Package.CertificateFlags>`
.. attribute:: certificate_index
Certificate index
**type**\: int
**range:** 0..65535
.. attribute:: location
Certificate location
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.package_name = None
self.certificate_flags = Sam.Packages.Package.CertificateFlags()
self.certificate_flags.parent = self
self.certificate_index = None
self.location = None
class CertificateFlags(object):
"""
Certificate flags
.. attribute:: is_expired
Expired flag
**type**\: bool
.. attribute:: is_revoked
Revoked flag
**type**\: bool
.. attribute:: is_trusted
Trusted flag
**type**\: bool
.. attribute:: is_validated
Validated flag
**type**\: bool
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.is_expired = None
self.is_revoked = None
self.is_trusted = None
self.is_validated = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-flags'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.is_expired is not None:
return True
if self.is_revoked is not None:
return True
if self.is_trusted is not None:
return True
if self.is_validated is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages.Package.CertificateFlags']['meta_info']
@property
def _common_path(self):
if self.package_name is None:
raise YPYModelError('Key property package_name is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:packages/Cisco-IOS-XR-crypto-sam-oper:package[Cisco-IOS-XR-crypto-sam-oper:package-name = ' + str(self.package_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.package_name is not None:
return True
if self.certificate_flags is not None and self.certificate_flags._has_data():
return True
if self.certificate_index is not None:
return True
if self.location is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages.Package']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:packages'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.package is not None:
for child_ref in self.package:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.Packages']['meta_info']
class CertificateRevocations(object):
"""
Certificate revocation list index table
information
.. attribute:: certificate_revocation
Certificate revocation list index information
**type**\: list of :py:class:`CertificateRevocation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.certificate_revocation = YList()
self.certificate_revocation.parent = self
self.certificate_revocation.name = 'certificate_revocation'
class CertificateRevocation(object):
"""
Certificate revocation list index information
.. attribute:: crl_index <key>
CRL index
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: certificate_revocation_list_detail
Certificate revocation list detail information
**type**\: :py:class:`CertificateRevocationListDetail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail>`
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.certificate_revocation_list_detail = Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail()
self.certificate_revocation_list_detail.parent = self
class CertificateRevocationListDetail(object):
"""
Certificate revocation list detail information
.. attribute:: crl_index
CRL index
**type**\: int
**range:** 0..65535
.. attribute:: issuer
Issuer name
**type**\: :py:class:`Issuer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer>`
.. attribute:: updates
Updated time of CRL is displayed
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.issuer = Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer()
self.issuer.parent = self
self.updates = None
class Issuer(object):
"""
Issuer name
.. attribute:: common_name
Common name
**type**\: str
.. attribute:: country
Country
**type**\: str
.. attribute:: organization
Organization
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.common_name = None
self.country = None
self.organization = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:issuer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_name is not None:
return True
if self.country is not None:
return True
if self.organization is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail.Issuer']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-detail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.issuer is not None and self.issuer._has_data():
return True
if self.updates is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation.CertificateRevocationListDetail']['meta_info']
@property
def _common_path(self):
if self.crl_index is None:
raise YPYModelError('Key property crl_index is None')
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocations/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation[Cisco-IOS-XR-crypto-sam-oper:crl-index = ' + str(self.crl_index) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.certificate_revocation_list_detail is not None and self.certificate_revocation_list_detail._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations.CertificateRevocation']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.certificate_revocation is not None:
for child_ref in self.certificate_revocation:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocations']['meta_info']
class CertificateRevocationListSummary(object):
"""
Certificate revocation list summary information
.. attribute:: crl_index
CRL index
**type**\: int
**range:** 0..65535
.. attribute:: issuer
Issuer name
**type**\: :py:class:`Issuer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_crypto_sam_oper.Sam.CertificateRevocationListSummary.Issuer>`
.. attribute:: updates
Updated time of CRL is displayed
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.crl_index = None
self.issuer = Sam.CertificateRevocationListSummary.Issuer()
self.issuer.parent = self
self.updates = None
class Issuer(object):
"""
Issuer name
.. attribute:: common_name
Common name
**type**\: str
.. attribute:: country
Country
**type**\: str
.. attribute:: organization
Organization
**type**\: str
"""
_prefix = 'crypto-sam-oper'
_revision = '2015-01-07'
def __init__(self):
self.parent = None
self.common_name = None
self.country = None
self.organization = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-summary/Cisco-IOS-XR-crypto-sam-oper:issuer'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.common_name is not None:
return True
if self.country is not None:
return True
if self.organization is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocationListSummary.Issuer']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam/Cisco-IOS-XR-crypto-sam-oper:certificate-revocation-list-summary'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.crl_index is not None:
return True
if self.issuer is not None and self.issuer._has_data():
return True
if self.updates is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_crypto_sam_oper as meta
return meta._meta_table['Sam.CertificateRevocationListSummary']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-crypto-sam-oper:sam'
def is_config(self):
''' Returns True if this instance represents config data else returns | |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.10065,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.45472,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 9.4469e-07,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 5.06007e-06,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.170231,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.274576,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.138597,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.583403,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.194694,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.20385,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 9.55956e-07,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00714024,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0516334,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0528064,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0516344,
'Execution Unit/Register Files/Runtime Dynamic': 0.0599467,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.108777,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.286151,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.53757,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0022238,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0022238,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00199729,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000806195,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000758568,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00720345,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.019165,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0507642,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 3.22904,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.198317,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.172418,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 5.60426,
'Instruction Fetch Unit/Runtime Dynamic': 0.447868,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.038012,
'L2/Runtime Dynamic': 0.00738265,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.91872,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.811618,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0544038,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0544038,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.17563,
'Load Store Unit/Runtime Dynamic': 1.13432,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.13415,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.268301,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0476104,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0479366,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.20077,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0332361,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.438665,
'Memory Management Unit/Runtime Dynamic': 0.0811727,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 17.0499,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 2.7582e-06,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00768037,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0868373,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
################################################################################
# (c) [2013] The Johns Hopkins University / Applied Physics Laboratory All Rights Reserved.
# Contact the JHU/APL Office of Technology Transfer for any additional rights. www.jhuapl.edu/ott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
""" Solves a membrane detection/classification problem.
This module provides the toplevel interface for solving a binary
"membrane vs non-membrane" classification problem for EM data sets
(e.g. [1]) using convolutional neural networks.
The overall approach is based on <NAME>'s paper [2] and the code
is derived from a LeNet example included in the Theano code base for
MNIST classification.
References:
[1] http://brainiac2.mit.edu/isbi_challenge/
[2] Ciresan, Dan, et al. "Deep neural networks segment neuronal membranes
in electron microscopy images." Advances in neural information
processing systems. 2012.
December 2013, mjp
"""
import os, os.path
import sys, time
import socket
import argparse
import numpy
from PIL import Image
import pdb
import theano
import theano.tensor as T
import em_networks as EMN
from em_utils import *
from tiles import *
def load_membrane_data(trainDataFile, trainLabelsFile,
tileSize,
trainSlices, validSlices,
nZeeChannels=0):
"""Loads data set and creates corresponding tile managers.
"""
# load the volume and the labels
if trainDataFile.endswith('.tif'):
X = load_tiff_data(trainDataFile)
# Assumes raw conference data (i.e. not preprocessed).
#for ii in range(X.shape[0]):
# X[ii,:,:] = X[ii,:,:] - numpy.mean(X[ii,:,:])
#X = X / numpy.max(numpy.abs(X))
print '[%s]: Warning: no longer zero-meaning and scaling data' % __name__
elif trainDataFile.endswith('.npz'):
# assumes volume data is stored as the tensor X and is suitably preprocessed
X = numpy.load(trainDataFile)['X']
else:
raise RuntimeError('unexpected data file extension')
Y = load_tiff_data(trainLabelsFile)
# mirror edges
border = numpy.floor(tileSize/2.)
X = mirror_edges_tensor(X, border)
Y = mirror_edges_tensor(Y, border)
# Use 0 and 1 as class labels. This is actually important because
# the neural network code will use class labels as indices into
# the outputs of the last network layer.
#
# 0 := non-membrane
# 1 := membrane
Y[Y==0] = 1; Y[Y==255] = 0
assert(Y.max() == 1)
X_train = X[trainSlices,:,:]
Y_train = Y[trainSlices,:,:]
X_valid = X[validSlices,:,:]
Y_valid = Y[validSlices,:,:]
# tile managers will put the images into GPU memory via Theano shared vars.
train = TileManager(X_train, Y_train, tileSize=tileSize, nZeeChannels=nZeeChannels)
valid = TileManager(X_valid, Y_valid, tileSize=tileSize, nZeeChannels=nZeeChannels)
return (train, valid, (X, Y))
def random_image_modifiers(flipProb=.6, rotProb=.6):
"""Randomly applies certain transforms to a 2d image.
As of this writing, these transforms are some
combination of flips and rotations.
"""
# clip probabilities to [0,1]
flipProb = max(min(flipProb,1),0)
rotProb = max(min(rotProb,1),0)
flipDim = 0; rotDir = 0
if numpy.random.rand() < flipProb:
flipDim = numpy.sign(numpy.random.rand() - .5)
if numpy.random.rand() < rotProb:
rotDir = numpy.sign(numpy.random.rand() - .5)
return flipDim, rotDir
def train_network(nn, trainMgr, validMgr,
nEpochs=30, learningRate=.001, decay=.995,
maxNumTilesPerEpoch=sys.maxint,
outDir="."):
"""Learns parameters for the given neural network.
"""
p2 = int(numpy.floor(nn.p/2.0))
# compute number of minibatches
nTrainBatches = int(numpy.ceil(trainMgr.batchSize / nn.miniBatchSize))
nValidBatches = int(numpy.ceil(validMgr.batchSize / nn.miniBatchSize))
print '[%s]: # of training batches is %d' % (__name__, nTrainBatches)
# allocate symbolic variables
indexT = T.lscalar() # index to a [mini]batch
learningRateT = T.scalar() # learning rate, theano variable
print '[%s]: initializing Theano...' % __name__
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the validation data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
predict_validation_data = theano.function([indexT], nn.layers[-1].p_y_given_x,
givens={
nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#nn.x: validMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
#nn.y: validMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# functions for the training data
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# The cost we minimize during training is the NLL of the model
# Assumes the last layer is the logistic regression layer.
cost = nn.layers[-1].negative_log_likelihood(nn.y)
# create a list of all model parameters to be fit by gradient descent
#params = layer3.params + layer2.params + layer1.params + layer0.params
params = reduce(lambda a,b: a+b, [l.params for l in nn.layers])
# create a list of gradients for all model parameters
grads = T.grad(cost, params)
# train_model is a function that updates the model parameters via
# SGD. Since this model has many parameters, it would be tedious to
# manually create an update rule for each model parameter. We thus
# create the updates list by automatically looping over all
# (params[i],grads[i]) pairs.
updates = []
for param_i, grad_i in zip(params, grads):
updates.append((param_i, param_i - learningRateT * grad_i))
train_model = theano.function([indexT, learningRateT], [cost, nn.layers[-1].p_y_given_x], updates=updates,
givens={
nn.x: trainMgr.X_batch_GPU[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize],
nn.y: trainMgr.y_batch_int[(indexT*nn.miniBatchSize):(indexT+1)*nn.miniBatchSize]})
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Do the training
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
startTime = time.clock()
trainTime = 0
validTime = 0
lastChatter = -1
nTilesProcessed = 0
nTilesFlipped = 0
nTilesRotated = 0
print '[%s]: Training network.' % __name__
for epoch in xrange(nEpochs):
print '[%s]: Starting epoch %d / %d (net time: %0.2f m)' % (__name__, epoch, nEpochs, (time.clock()-startTime)/60.)
sys.stdout.flush()
prevParams = EMN.save_network_parameters(nn, None) # params just before learning
predictions = numpy.zeros(trainMgr.y_batch_local.shape)
nErrors = 0
for slices,rows,cols,pct in trainMgr.make_balanced_pixel_generator():
# reset predictions
predictions[:] = -1;
# transform images and udpate GPU memory
flipDim,rotDir = random_image_modifiers()
trainMgr.update_gpu(slices, rows, cols, flipDim=flipDim, rotDir=rotDir)
if flipDim != 0: nTilesFlipped += len(slices)
if rotDir != 0: nTilesRotated += len(slices)
# process all mini-batches
for minibatchIdx in xrange(nTrainBatches):
tic = time.clock()
[costij, probij] = train_model(minibatchIdx, learningRate)
trainTime += time.clock()-tic
predij = numpy.argmax(probij,axis=1)
predictions[(minibatchIdx*nn.miniBatchSize):(minibatchIdx+1)*nn.miniBatchSize] = predij
nTilesProcessed += len(slices)
nErrors = numpy.sum(predictions != trainMgr.y_batch_local)
# periodically report progress (e.g. every 30 min)
netTime = time.clock()-startTime
if numpy.floor(netTime/1800) > lastChatter:
print '[%s]: epoch %d; processed %0.2e tiles (%0.2f %%); net time %0.2f m' % (__name__, epoch, nTilesProcessed, pct, netTime/60.)
lastChatter = numpy.floor(netTime/1800)
sys.stdout.flush()
# check for early epoch termination
if nTilesProcessed >= maxNumTilesPerEpoch:
print '[%s]: epoch %d: quitting early after %d tiles processed (%0.2f %%)' % (__name__, epoch, nTilesProcessed, pct)
break
#----------------------------------------
# update learning rate after each training epoch
#----------------------------------------
if decay < 1:
learningRate *= decay
#----------------------------------------
# save result (even though it may just be an intermediate result)
#----------------------------------------
fn = 'params_epoch%02d' % epoch
newParams = EMN.save_network_parameters(nn, os.path.join(outDir, fn), verbose=False)
# report how much the network parameters changed
keys = newParams.keys(); keys.sort()
for key in keys:
delta = numpy.ndarray.flatten(numpy.abs(newParams[key] - prevParams[key]))
print '[%s]: %s (%d params)\n %0.2e / %0.2e / %0.2e / %0.2e' % (__name__, key, len(delta), numpy.min(delta), numpy.max(delta), numpy.mean(delta), numpy.median(delta))
#----------------------------------------
# validation performance
#----------------------------------------
print '[%s]: validating performance ...' % __name__
Y_hat = numpy.zeros(validMgr.Y_local.shape)
for slices,rows,cols in validMgr.make_all_pixel_generator():
# update tiles on the GPU
validMgr.update_gpu(slices,rows,cols,flipDim=0,rotDir=0)
for ii in range(nValidBatches):
# predictions is a (nTiles x 2) matrix
# grab the second output (y=1)
# (i.e. we store probability of membrane)
tic = time.clock()
pMembrane = predict_validation_data(ii)[:,1]
validTime += time.clock() - tic
# Be careful - on the last iteration, there may be
# less than batchSize tiles remaining.
a = ii*nn.miniBatchSize
b = min((ii+1)*nn.miniBatchSize, len(slices))
if a > len(slices): break
Y_hat[slices[a:b], rows[a:b], cols[a:b]] = pMembrane[0:b-a]
# Validation statistics are based on a simple threshold
# (without any other postprocessing).
#
# note: throw away the border before evaluating
Y_true = validMgr.Y_local[:,p2:-p2,p2:-p2]
Y_hat = Y_hat[:,p2:-p2,p2:-p2]
eval_performance(Y_true, Y_hat, 0.5, verbose=True)
eval_performance(Y_true, Y_hat, 0.7, verbose=True)
# statistics for this epoch
print '[%s]: epoch %d complete!' % (__name__, epoch)
print '[%s]: learning rate: %0.2e' % (__name__, learningRate)
print '[%s]: # errors: %d' % (__name__, nErrors)
print '[%s]: net elapsed time: %0.2f m' % (__name__, ((time.clock() - startTime) / 60.))
print '[%s]: net gpu train time: %0.2f m' % (__name__, (trainTime/60.))
print '[%s]: net validation time: %0.2f m' % (__name__, (validTime/60.))
print '[%s]: processed tiles: %0.2e' % (__name__, nTilesProcessed)
print '[%s]: flipped tiles: %0.2e' % (__name__, nTilesFlipped)
print '[%s]: rotated tiles: %0.2e' % (__name__, nTilesRotated)
endTime = time.clock()
print('[%s]: Optimization complete.' % __name__)
print '[%s]: The code for file "%s" ran for %0.2fm' % (__name__, os.path.split(__file__)[1], ((endTime - startTime) / 60.))
print "[%s]: GPU train time: %0.2fm" | |
P2[0:int(L/2+1)]
P11 = P1[:]
P1[1:-1] = 2*P11[1:-1]
f = freq*np.linspace(0, int(L/2), num=int(L/2)+1,endpoint = True)/ L
plt.figure(figsize=(5,3))
plt.plot(f,P1)
font=15
plt.title('Single-sided amplitude spectrum',fontsize = font)
plt.xlabel('frequncy (Hz)', fontsize = font)
plt.ylabel('|P1(f)|', fontsize = font)
plt.tight_layout()
plt.savefig('FFT_' + str(round_number)+'.png', dpi = 600,bbox_inches='tight')
return (acf_lag, pacf_lag)
def residual_analysis(X, y, y_hat, nlag =None, alpha = 0.01, round_number = 0):
"""
This funcion assesses the residuals (heteroscedasticity and dyanmics)
Heteroscedasticity is tested on Breusch-Pagan Test and White Test
Dyanmics is assessed based on ACF and PACF
Input:
X: independent variables of size N x m
y_hat: fitted dependent variable of size N x 1
residual: residuals of size N x 1
alpha: significance level for statistical tests
Output:
figures, residual analysis
(int_heteroscedasticity, int_dynamics), whether there is heteroscedasticity and dynamics
"""
print('=== Residual Analysis ===')
residual = y-y_hat
if nlag is None:
if y.shape[0]<40:
nlag = 10
elif y.shape[0] >200:
nlag = 50
else:
nlag = y.shape[0]//4
'''Basic Residual Plot'''
fig, ax = plt.subplots(1,1,figsize=(4,3))
plt.plot(y,y_hat,'*')
sm.qqline(ax=ax, line='45', fmt='k--')
plt.ylabel('fitted y', fontsize=14)
plt.xlabel('y', fontsize=14)
plt.axis('scaled')
plt.tight_layout()
# plt.xticks(np.arange(int(min(y)), int(max(y)), 25))
# plt.yticks(np.arange(int(min(y)), int(max(y)), 25))
plt.title('Real vs Fitted')
plt.savefig('Fit_plot_' + str(round_number)+'.png', dpi = 600,bbox_inches='tight')
fontsize = 20
markersize = 8
sample_number = np.linspace(1, residual.shape[0], residual.shape[0], endpoint=True)
fig, axs = plt.subplots(2, 2, figsize=(12,9))
axs[0,0].hist(residual,normed=1,facecolor='skyblue', alpha=1, edgecolor='black')
axs[0,0].axvline(x=0, color='k', linestyle='--',alpha=0.6)
axs[0,0].set_ylabel('Frequency',fontsize = fontsize)
axs[0,0].set_xlabel('Residual',fontsize = fontsize)
axs[0,0].set_title('Residual histogram',fontsize = fontsize)
axs[0,0].tick_params(labelsize = fontsize-3)
axs[0,1].plot(sample_number, residual, 'o', color = 'cornflowerblue', markersize = markersize)
axs[0,1].axhline(y=0, color='k', linestyle='--',alpha=0.6)
axs[0,1].set_xlabel('Sample number',fontsize = fontsize)
axs[0,1].set_ylabel('Residual',fontsize = fontsize)
axs[0,1].set_title('Residual',fontsize = fontsize)
axs[0,1].tick_params(labelsize = fontsize-3)
sm.qqplot(residual.squeeze(), stats.t, fit=True,ax=axs[1,0])
sm.qqline(ax=axs[1,0], line='45', fmt='k--')
axs[1,0].set_xlabel('Theoretical quantiles',fontsize = fontsize)
axs[1,0].set_ylabel('Sample quantiles',fontsize = fontsize)
axs[1,0].set_title('Normal Q-Q plot',fontsize = fontsize)
axs[1,0].tick_params(labelsize = fontsize-3)
axs[1,0].get_lines()[0].set_markersize(markersize)
axs[1,0].get_lines()[0].set_markerfacecolor('cornflowerblue')
axs[1,1].plot(y_hat, residual, 'o', color = 'cornflowerblue', markersize = markersize)
axs[1,1].axhline(y=0, color='k', linestyle='--',alpha=0.6)
axs[1,1].set_xlabel('Fitted response',fontsize = fontsize)
axs[1,1].set_ylabel('Residual',fontsize = fontsize)
axs[1,1].set_title('Residual versus fitted response',fontsize = fontsize)
axs[1,1].tick_params(labelsize = fontsize-3)
plt.tight_layout()
plt.savefig('Residual_plot_' + str(round_number)+'.png', dpi = 600,bbox_inches='tight')
# '''Residual Plot agianst predictors'''
# m = X.shape[1]
# for i in range(m):
# fig, axs = plt.subplots(1, 1, figsize=(3,3))
# axs.plot(X[:,i], residual, 'o', color = 'cornflowerblue', markersize = markersize)
# axs.axhline(y=0, color='k', linestyle='--',alpha=0.6)
# axs.set_xlabel('Predictor '+str(i+1),fontsize = fontsize)
# axs.set_ylabel('Residual',fontsize = fontsize)
# axs.set_title('Residual versus predictor '+str(i+1),fontsize = fontsize)
# axs.tick_params(labelsize = fontsize-3)
# plt.tight_layout()
# plt.savefig('Residual_plot_predictor_' + str(round_number)+'_'+str(i)+'.png', dpi = 600,bbox_inches='tight')
#
'''Heteroscedacity'''
#test whether variance is the same in 2 subsamples
test_GF = sms.het_goldfeldquandt(residual,X)
name = ['F statistic', 'p-value']
GF_test = dict(zip(name,test_GF[0:2]))
#print(GF_test)
#simple test for heteroscedasticity by Breusch-Pagan test
test_BP = sms.het_breuschpagan(residual,np.column_stack((np.ones((y_hat.shape[0],1)),y_hat)))
BP_test = dict(zip(name,test_BP[2:]))
#print(BP_test)
#simple test for heteroscedasticity by White test
test_white = sms.het_white(residual, np.column_stack((np.ones((y_hat.shape[0],1)),y_hat)))
White_test = dict(zip(name,test_white[2:]))
#print(White_test)
int_heteroscedasticity = 1
if test_GF[1] > alpha and test_BP[-1]>alpha and test_white[-1]>alpha:
int_heteroscedasticity = 0
'''Dynamics'''
#autocorrelation
fig = plt.figure(figsize=(5,3))
ax1 = fig.add_subplot(111)
fig = sm.graphics.tsa.plot_acf(residual, lags=nlag, ax=ax1, alpha= alpha)
for item in ([ax1.title, ax1.xaxis.label, ax1.yaxis.label] + ax1.get_xticklabels() + ax1.get_yticklabels()):
item.set_fontsize(14)
ax1.set_xlabel('Lag')
plt.tight_layout()
plt.savefig('ACF_' + str(round_number)+'.png', dpi = 600,bbox_inches='tight')
#partial autocorrelation
fig = plt.figure(figsize=(5,3))
ax2 = fig.add_subplot(111)
fig = sm.graphics.tsa.plot_pacf(residual, lags=nlag, ax=ax2, alpha= alpha)
for item in ([ax2.title, ax2.xaxis.label, ax2.yaxis.label] + ax2.get_xticklabels() + ax2.get_yticklabels()):
item.set_fontsize(14)
ax2.set_xlabel('Lag')
plt.tight_layout()
plt.savefig('PACF_' + str(round_number)+'.png', dpi = 600,bbox_inches='tight')
#ACF
[acf, confint, qstat, acf_pvalues] = sm.tsa.stattools.acf(residual, nlags=nlag,qstat = True, alpha = alpha)
acf_detection = acf_pvalues < (alpha/nlag) #Ljung-Box Q-Statistic
acf_lag = [i for i,x in enumerate(acf_detection) if x == True]
#PACF
[pacf, confint_pacf] = sm.tsa.stattools.pacf(residual, nlags=nlag, alpha = alpha)
pacf_lag = [i for i,x in enumerate(pacf) if x<confint_pacf[i][0] or x>confint_pacf[i][1]]
if acf_lag != [] or pacf_lag != []:
int_dynamics = 1
else:
int_dynamics = 0
return (int_heteroscedasticity, int_dynamics)
def nonlinearity_assess_dynamic(X, y, plot, cat=None, alpha = 0.01, difference = 0.4, xticks = None, yticks = None, round_number = 0, lag = 3):
"""
This function assesses the nonlinearity property of the data set for regression purpose
Particularly, the pairwise nonlinear correlaiton between X[:] and y is assessed
Input:
X: independent variables of size N x m
y: dependent variable of size N x 1
plot: flag for plotting
alpha: significance level for quaratic testing
difference: significance level for maximal correlation - linear correlation
Output:
int, whether there is nonlinearity in dataset
"""
#nonlinearity by linear correlation,quadratic test, and maximal correlation
m = np.shape(X)[1]
N = np.shape(X)[0]
if yticks is None:
yticks = ['y']
if xticks is None:
xticks = [r'x$_'+str(i)+'$' for i in range(1,np.shape(X)[1]+1)]
xticks = xticks + yticks
ylabel = ['lag'+str(i+1) for i in range(lag)]
#pre-process data
scaler_x = StandardScaler()
scaler_x.fit(X)
X = scaler_x.transform(X)
scaler_y = StandardScaler()
scaler_y.fit(y)
y=scaler_y.transform(y)
LC = np.zeros((m+1,lag))
QT = np.zeros((m+1,lag))
MC = np.zeros((m+1,lag))
for l in range(0,lag):
for i in range(0,m):
#linear correlation
LC[i,l] = np.corrcoef(X[:-l-1,i],y[l+1:].squeeze())[0,1]
#quaratic test
reg = LinearRegression(fit_intercept=False).fit(X[:-l-1,i].reshape(-1, 1), y[l+1:].reshape(-1, 1))
y_pred = reg.predict(X[:-l-1,i].reshape(-1, 1))
mse1 = np.sum((y[l+1:].reshape(-1, 1)-y_pred)**2)
regq = LinearRegression(fit_intercept=False).fit(np.array([X[:-l-1,i]**2,X[:-l-1,i]]).transpose(), y[l+1:].reshape(-1, 1))
yq_pred = regq.predict(np.array([X[:-l-1,i]**2, X[:-l-1,i]]).transpose())
mse2 = np.sum((y[l+1:].reshape(-1, 1)-yq_pred)**2)
F = (mse1- mse2)/(mse2/(N-2))
p_value = 1 - f.cdf(F, 1, N-2)
QT[i,l] = 0 if p_value < 10*np.finfo(float).eps else p_value
#maximal correlation by ACE pacakge R (acepack)
if cat == None or cat[i] == 0:
MC[i,l] = ace_R.ace_R(X[:-l-1,i].reshape(-1, 1), y[l+1:])
else:
MC[i,l] = ace_R.ace_R(X[:-l-1,i].reshape(-1, 1), y[l+1:], cat=1)
for l in range(0,lag):
#linear correlation
LC[m,l] = np.corrcoef(y[:-l-1].squeeze(),y[l+1:].squeeze())[0,1]
#quaratic test
reg = LinearRegression(fit_intercept=False).fit(y[:-l-1].reshape(-1, 1), y[l+1:].reshape(-1, 1))
y_pred = reg.predict(y[:-l-1].reshape(-1, 1))
mse1 = np.sum((y[l+1:].reshape(-1, 1)-y_pred)**2)
regq = LinearRegression(fit_intercept=False).fit(np.array([y[:-l-1].squeeze()**2,y[:-l-1].squeeze()]).transpose(), y[l+1:].reshape(-1, 1))
yq_pred = regq.predict(np.array([y[:-l-1].squeeze()**2, y[:-l-1].squeeze()]).transpose())
mse2 = np.sum((y[l+1:].reshape(-1, 1)-yq_pred)**2)
F = (mse1- mse2)/(mse2/(N-2))
p_value = 1 - f.cdf(F, 1, N-2)
QT[m,l] = 0 if p_value < 10*np.finfo(float).eps else p_value
#maximal correlation by ACE pacakge R (acepack)
if cat == None or cat[i] == 0:
MC[m,l] = ace_R.ace_R(y[:-l-1].reshape(-1, 1), y[l+1:])
else:
MC[m,l] = ace_R.ace_R(y[:-l-1].reshape(-1, 1), y[l+1:], cat=1)
if plot:
print('=== Nonlinearity test results for lagged data ===')
## plot for linear correlation
cmap = sns.diverging_palette(10,250, as_cmap=True)
plt.figure(figsize=(X.shape[1]+1,lag))
sns.set(font_scale=1.6)
sns.set_style("whitegrid")
ax=sns.heatmap(LC.transpose(),linewidths=0.8,vmin=-1,vmax=1,cmap=cmap,annot=True,\
linecolor="white",annot_kws={"size": 14},xticklabels=xticks,square=True,\
yticklabels=ylabel, cbar_kws={'label': 'linear correlation',"orientation": "horizontal",'ticks' : [-1,0,1]})
loc, labels = plt.yticks()
ax.set_yticklabels(labels, rotation=0)
plt.savefig('linear_correlation_' + str(round_number)+ 'lag'+str(lag)+'.png', dpi = 600,bbox_inches='tight')
## plot quadratic test
plt.figure(figsize=(X.shape[1]+1,lag))
#calcaultate the rejection threhsold: default alpha=0.01 for one test
q_test_threshold = alpha/np.shape(QT)[0]/np.shape(QT)[1]
plot_threshold = math.floor(np.log10(q_test_threshold))
plot_threshold = 10**plot_threshold
#set lower bar
low_value_flags = QT < plot_threshold**2
QT[low_value_flags] = plot_threshold**2
ax=sns.heatmap(QT.transpose(),linewidths=0.8,vmin=plot_threshold**2,vmax=1,cmap="Blues",annot=True, norm=LogNorm(),\
linecolor="white",annot_kws={"size": 14},xticklabels=xticks,square=True,yticklabels=ylabel,\
cbar_kws={'label': 'p-value of quadratic test',"orientation": "horizontal",'ticks' : [plot_threshold**2,plot_threshold,1]})
loc, labels = plt.yticks()
ax.set_yticklabels(labels, rotation=0)
plt.savefig('quaradtic_test_' + str(round_number)+ 'lag'+str(lag)+'.png', dpi = 600,bbox_inches='tight')
## plot maximal correlation
plt.figure(figsize=(X.shape[1]+1,lag))
ax=sns.heatmap(MC.transpose(),linewidths=0.8,vmin=0,vmax=1,cmap="Blues",annot=True,\
linecolor="white",annot_kws={"size": 14},xticklabels=xticks,square=True,yticklabels=ylabel,\
cbar_kws={'label': 'maximal correlation',"orientation": "horizontal",'ticks' : [0,0.5,1]})
loc, labels = plt.yticks()
ax.set_yticklabels(labels, rotation=0)
plt.savefig('maximal_correlation_' + str(round_number)+ 'lag'+str(lag)+'.png', dpi = 600,bbox_inches='tight')
if m >1:
###For quadartic test
Bi,_ = nr.poly_feature(X, degree = 2, interaction = True, power = False)
Bi = Bi[:,X.shape[1]:]
bi_test_result = np.zeros(l+1)
for l in range(lag):
p_values = np.zeros((Bi.shape[1],1))
counter = 0
for i in range(0,m-1):
for j in range(i+1,m):
regl = LinearRegression(fit_intercept=False).fit(np.array([X[:-l-1,i], X[:-l-1,j]]).transpose(), y[l+1:].reshape(-1,1))
yl_pred = regl.predict(np.array([X[:-l-1,i], X[:-l-1,j]]).transpose())
mse1 = np.sum((y[l+1:].reshape(-1,1) - yl_pred)**2)
regi = LinearRegression(fit_intercept=False).fit(np.array([X[:-l-1,i], X[:-l-1,j],Bi[:-l-1, counter]]).transpose(), y[l+1:].reshape(-1,1))
yi_pred = regi.predict(np.array([X[:-l-1,i], X[:-l-1,j], Bi[:-l-1,counter]]).transpose())
mse2 = np.sum((y[l+1:].reshape(-1,1)-yi_pred)**2)
counter += 1
F = (mse1-mse2)/(mse2/(N-2))
p_values[counter-1] = 1-f.cdf(F, 1, N-2)
tri = np.zeros((m-1, m-1))
count = 0
for i in range(1,m):
if i == 1:
tri[-i, -1] = p_values[-i-count:]
else:
tri[-i, -i:] = p_values[-i-count:-count].flatten()
count += i
tri[tri<1e-15] = 0
bi_test_result[l] = sum(p_values < alpha/np.shape(p_values)[0]/(lag+1))
if plot:
mask = np.zeros_like(tri, dtype=np.bool)
mask[np.tril_indices_from(mask, k=-1)] = True
s=17
# Set up the matplotlib figure
sns.set_style("white")
fig, ax = plt.subplots(figsize=(1.5*(m-1),1.5*(m-1)))
sns.set(font_scale=1.3)
plt.tick_params(labelsize=s)
plot_threshold = 0.15
sns.heatmap(tri, cmap="Blues", mask=mask,square=True,vmin=0,vmax=1,linecolor="white", linewidths=0.8, ax=ax,annot=True,cbar_kws={"shrink": .82, 'ticks' : [0, 0.15, 0.5, 1]})
ax.set_xticklabels(xticks[1:])
ax.set_yticklabels(xticks[:-1])
plt.title('p_values for bilinear terms lag ' + str(l))
plt.savefig('f_bilinear_'+ str(round_number)+ 'lag'+str(l)+'.png', dpi = 600,bbox_inches='tight')
bi_test = sum(bi_test_result) > 1
#detemine whether nonlinearity is significant
corr_difference = MC - abs(LC) > difference #default 0.4, for maximal correlation
a= MC>0.92
b | |
<gh_stars>0
""" Module with functionalities for blocking based on a dictionary of records,
where a blocking function must return a dictionary with block identifiers
as keys and values being sets or lists of record identifiers in that block.
"""
# =============================================================================
def noBlocking(rec_dict):
"""A function which does no blocking but simply puts all records from the
given dictionary into one block.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys and
corresponding list of record values
"""
print("Run 'no' blocking:")
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
rec_id_list = list(rec_dict.keys())
block_dict = {'all_rec':rec_id_list}
return block_dict
# -----------------------------------------------------------------------------
def simpleBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that simply concatenates attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
Examples:
If the blocking is based on 'postcode' then:
block_dict = {'2000': [rec1_id, rec2_id, rec3_id, ...],
'2600': [rec4_id, rec5_id, ...],
...
}
while if the blocking is based on 'postcode' and 'gender' then:
block_dict = {'2000f': [rec1_id, rec3_id, ...],
'2000m': [rec2_id, ...],
'2600f': [rec5_id, ...],
'2600m': [rec4_id, ...],
...
}
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run simple blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
rec_bkv += attr_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
def phoneticBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that concatenates Soundex encoded values of
attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run phonetic blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
if (attr_val == ''):
rec_bkv += 'z000' # Often used as Soundex code for empty values
else: # Convert the value into its Soundex code
attr_val = attr_val.lower()
sndx_val = attr_val[0] # Keep first letter
for c in attr_val[1:]: # Loop over all other letters
if (c in 'aehiouwy'): # Not inlcuded into Soundex code
pass
elif (c in 'bfpv'):
if (sndx_val[-1] != '1'): # Don't add duplicates of digits
sndx_val += '1'
elif (c in 'cgjkqsxz'):
if (sndx_val[-1] != '2'): # Don't add duplicates of digits
sndx_val += '2'
elif (c in 'dt'):
if (sndx_val[-1] != '3'): # Don't add duplicates of digits
sndx_val += '3'
elif (c in 'l'):
if (sndx_val[-1] != '4'): # Don't add duplicates of digits
sndx_val += '4'
elif (c in 'mn'):
if (sndx_val[-1] != '5'): # Don't add duplicates of digits
sndx_val += '5'
elif (c in 'r'):
if (sndx_val[-1] != '6'): # Don't add duplicates of digits
sndx_val += '6'
if (len(sndx_val) < 4):
sndx_val += '000' # Ensure enough digits
sndx_val = sndx_val[:4] # Maximum length is 4
rec_bkv += sndx_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
def slkBlocking(rec_dict, fam_name_attr_ind, giv_name_attr_ind,
dob_attr_ind, gender_attr_ind):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
This function should implement the statistical linkage key (SLK-581)
blocking approach as used in real-world linkage applications:
http://www.aihw.gov.au/WorkArea/DownloadAsset.aspx?id=60129551915
A SLK-581 blocking key is the based on the concatenation of:
- 3 letters of family name
- 2 letters of given name
- Date of birth
- Sex
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as
keys and corresponding list of record values
fam_name_attr_ind : The number (index) of the attribute that contains
family name (last name)
giv_name_attr_ind : The number (index) of the attribute that contains
given name (first name)
dob_attr_ind : The number (index) of the attribute that contains
date of birth
gender_attr_ind : The number (index) of the attribute that contains
gender (sex)
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run SLK-581 blocking:')
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Get family name value
#
fam_name = rec_values[fam_name_attr_ind]
if (fam_name == ''):
rec_bkv += '999'
else:
fam_nam = fam_name.replace('-','') # Remove non letter characters
fam_nam = fam_name.replace(",",'')
fam_nam = fam_name.replace('_','')
if (len(fam_name) >= 5):
rec_bkv += (fam_name[1]+fam_name[2]+fam_name[4])
elif (len(fam_name) >= 3):
rec_bkv += (fam_name[1]+fam_name[2]+'2')
elif (len(fam_name) >= 2):
rec_bkv += (fam_name[1]+'22')
# Get given name value
#
giv_name = rec_values[giv_name_attr_ind]
if (giv_name == ''):
rec_bkv += '99'
else:
giv_nam = giv_name.replace('-','') # Remove non letter characters
giv_nam = giv_name.replace(",",'')
giv_nam = giv_name.replace('_','')
if (len(giv_name) >= 3):
rec_bkv += (giv_name[1]+giv_name[2])
elif (len(giv_name) >= 2):
rec_bkv += (giv_name[1]+'2')
# DoB structure we use: dd/mm/yyyy
# Get date of birth
#
dob = rec_values[dob_attr_ind]
dob_list = rec_values[dob_attr_ind].split('/')
# Add some checks
#
if (len(dob_list[0]) < 2):
dob_list[0] = '0' + dob_list[0] # Add leading zero for days < 10
if (len(dob_list[1]) < 2):
dob_list[1] = '0' + dob_list[1] # Add leading zero for months < 10
dob = ''.join(dob_list) # Create: ddmmyyyy
assert len(dob) == 8, dob
rec_bkv += dob
# Get gender
#
gender = rec_values[gender_attr_ind].lower()
if (gender == 'm'):
rec_bkv += '1'
elif (gender == 'f'):
rec_bkv += '2'
else:
rec_bkv += '9'
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
# -----------------------------------------------------------------------------
# Extra task: Implement canopy clustering based blocking as described in
# the Data Matching book
# -----------------------------------------------------------------------------
def printBlockStatistics(blockA_dict, blockB_dict):
"""Calculate and print some basic statistics about the generated blocks
"""
print('Statistics of the generated blocks:')
numA_blocks = len(blockA_dict)
numB_blocks = | |
ix not in idx]
m_idx = self._sort_indexer_as_codes(m_idx, group)
if exp == 'after':
names.extend(name)
names.extend([c for c in group])
combines.append(
np.concatenate([vec, self.matrix[:, m_idx]], axis=1))
else:
names.extend([c for c in group])
names.extend(name)
combines.append(
np.concatenate([self.matrix[:, m_idx], vec], axis=1))
else:
names.extend(name)
combines.append(vec)
if axis == 'y':
self._switch_axes()
# re-construct the combined data matrix
combines = np.concatenate(combines, axis=1)
if axis == 'y':
self._switch_axes()
combined_matrix = np.concatenate([self.matrix[:, [0]],
combines], axis=1)
if axis == 'y':
combined_matrix = combined_matrix.swapaxes(1, 2)
self._switch_axes()
# update the sectional information
new_sect_def = list(range(0, combined_matrix.shape[1] - 1))
if axis == 'x':
self.xdef = new_sect_def
self._x_indexers = self._get_x_indexers()
self.comb_x = names
else:
self.ydef = new_sect_def
self._y_indexers = self._get_y_indexers()
self.comb_y = names
self.matrix = combined_matrix
def _slice_vec(self, code, axis='x'):
'''
'''
if axis == 'x':
code_idx = self.xdef.index(code) + 1
else:
code_idx = self.ydef.index(code) + 1
if axis == 'x':
m_slice = self.matrix[:, [code_idx]]
else:
self._switch_axes()
m_slice = self.matrix[:, [code_idx]]
self._switch_axes()
return m_slice
def _grp_vec(self, codes, axis='x'):
netted, idx = self._missingfy(codes=codes, axis=axis,
keep_codes=True, keep_base=True,
indices=True, inplace=False)
if axis == 'y':
netted._switch_axes()
net_vec = np.nansum(netted.matrix[:, netted._x_indexers],
axis=1, keepdims=True)
net_vec /= net_vec
return net_vec, idx
def _logic_vec(self, condition):
"""
Create net vector of qualified rows based on passed condition.
"""
filtered = self.filter(condition=condition, inplace=False)
net_vec = np.nansum(filtered.matrix[:, self._x_indexers], axis=1,
keepdims=True)
net_vec /= net_vec
return net_vec
def _grp_type(self, grp_def):
if isinstance(grp_def, list):
if not isinstance(grp_def[0], (int, float)):
return 'block'
else:
return 'list'
elif isinstance(grp_def, tuple):
return 'logical'
elif isinstance(grp_def, dict):
return 'wildcard'
def _add_unused_codes(self, grp_def_list, axis):
'''
'''
query_codes = self.xdef if axis == 'x' else self.ydef
frame_lookup = {c: [[c], [c], None, False] for c in query_codes}
frame = [[code] for code in query_codes]
for grpdef_idx, grpdef in enumerate(grp_def_list):
for code in grpdef[1]:
if [code] in frame:
if grpdef not in frame:
frame[frame.index([code])] = grpdef
else:
frame[frame.index([code])] = '-'
frame = [code for code in frame if not code == '-']
for code in frame:
if code[0] in list(frame_lookup.keys()):
frame[frame.index([code[0]])] = frame_lookup[code[0]]
return frame
def _organize_grp_def(self, grp_def, method_expand, complete, axis):
"""
Sanitize a combine instruction list (of dicts): names, codes, expands.
"""
organized_def = []
codes_used = []
any_extensions = complete
any_logical = False
if method_expand is None and complete:
method_expand = 'before'
if not self._grp_type(grp_def) == 'block':
grp_def = [{'net': grp_def, 'expand': method_expand}]
for grp in grp_def:
if any(isinstance(val, (tuple, dict)) for val in list(grp.values())):
if complete:
ni_err = ('Logical expr. unsupported when complete=True. '
'Only list-type nets/groups can be completed.')
raise NotImplementedError(ni_err)
if 'expand' in list(grp.keys()):
del grp['expand']
expand = None
logical = True
else:
if 'expand' in list(grp.keys()):
grp = copy.deepcopy(grp)
expand = grp['expand']
if expand is None and complete:
expand = 'before'
del grp['expand']
else:
expand = method_expand
logical = False
organized_def.append([list(grp.keys()), list(grp.values())[0], expand, logical])
if expand:
any_extensions = True
if logical:
any_logical = True
codes_used.extend(list(grp.values())[0])
if not any_logical:
if len(set(codes_used)) != len(codes_used) and any_extensions:
ni_err_extensions = ('Same codes in multiple groups unsupported '
'with expand and/or complete =True.')
raise NotImplementedError(ni_err_extensions)
if complete:
return self._add_unused_codes(organized_def, axis)
else:
return organized_def
def _force_to_nparray(self):
"""
Convert the aggregation result into its numpy array equivalent.
"""
if isinstance(self.result, pd.DataFrame):
self.result = self.result.values
return True
else:
return False
def _attach_margins(self):
"""
Force margins back into the current Quantity.result if none are found.
"""
if not self._res_is_stat():
values = self.result
if not self._has_y_margin and not self.y == '@':
margins = False
values = np.concatenate([self.rbase[1:, :], values], 1)
else:
margins = True
if not self._has_x_margin:
margins = False
values = np.concatenate([self.cbase, values], 0)
else:
margins = True
self.result = values
return margins
else:
return False
def _organize_expr_def(self, expression, axis):
"""
"""
# Prepare expression parts and lookups for indexing the agg. result
val1, op, val2 = expression[0], expression[1], expression[2]
if self._res_is_stat():
idx_c = [self.current_agg]
offset = 0
else:
if axis == 'x':
idx_c = self.xdef if not self.comb_x else self.comb_x
else:
idx_c = self.ydef if not self.comb_y else self.comb_y
offset = 1
# Test expression validity and find np.array indices / prepare scalar
# values of the expression
idx_err = '"{}" not found in {}-axis.'
# [1] input is 1. scalar, 2. vector from the agg. result
if isinstance(val1, list):
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = val1[0]
val2 = idx_c.index(val2) + offset
expr_type = 'scalar_1'
# [2] input is 1. vector from the agg. result, 2. scalar
elif isinstance(val2, list):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
val1 = idx_c.index(val1) + offset
val2 = val2[0]
expr_type = 'scalar_2'
# [3] input is two vectors from the agg. result
elif not any(isinstance(val, list) for val in [val1, val2]):
if not val1 in idx_c:
raise IndexError(idx_err.format(val1, axis))
if not val2 in idx_c:
raise IndexError(idx_err.format(val2, axis))
val1 = idx_c.index(val1) + offset
val2 = idx_c.index(val2) + offset
expr_type = 'vectors'
return val1, op, val2, expr_type, idx_c
@staticmethod
def constant(num):
return [num]
def calc(self, expression, axis='x', result_only=False):
"""
Compute (simple) aggregation level arithmetics.
"""
unsupported = ['cbase', 'rbase', 'summary', 'x_sum', 'y_sum']
if self.result is None:
raise ValueError('No aggregation to base calculation on.')
elif self.current_agg in unsupported:
ni_err = 'Aggregation type "{}" not supported.'
raise NotImplementedError(ni_err.format(self.current_agg))
elif axis not in ['x', 'y']:
raise ValueError('Invalid axis parameter: {}'.format(axis))
is_df = self._force_to_nparray()
has_margin = self._attach_margins()
values = self.result
expr_name = list(expression.keys())[0]
if axis == 'x':
self.calc_x = expr_name
else:
self.calc_y = expr_name
values = values.T
expr = list(expression.values())[0]
v1, op, v2, exp_type, index_codes = self._organize_expr_def(expr, axis)
# ====================================================================
# TODO: generalize this calculation part so that it can "parse"
# arbitrary calculation rules given as nested or concatenated
# operators/codes sequences.
if exp_type == 'scalar_1':
val1, val2 = v1, values[[v2], :]
elif exp_type == 'scalar_2':
val1, val2 = values[[v1], :], v2
elif exp_type == 'vectors':
val1, val2 = values[[v1], :], values[[v2], :]
calc_res = op(val1, val2)
# ====================================================================
if axis == 'y':
calc_res = calc_res.T
ap_axis = 0 if axis == 'x' else 1
if result_only:
if not self._res_is_stat():
self.result = np.concatenate([self.result[[0], :], calc_res],
ap_axis)
else:
self.result = calc_res
else:
self.result = np.concatenate([self.result, calc_res], ap_axis)
if axis == 'x':
self.calc_x = index_codes + [self.calc_x]
else:
self.calc_y = index_codes + [self.calc_y]
self.cbase = self.result[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = self.result[:, [0]]
else:
self.rbase = None
if not self._res_is_stat():
self.current_agg = 'calc'
self._organize_margins(has_margin)
else:
self.current_agg = 'calc'
if is_df:
self.to_df()
return self
def count(self, axis=None, raw_sum=False, margin=True, as_df=True):
"""
Count entries over all cells or per axis margin.
Parameters
----------
axis : {None, 'x', 'y'}, deafult None
When axis is None, the frequency of all cells from the uni- or
multivariate distribution is presented. If the axis is specified
to be either 'x' or 'y' the margin per axis becomes the resulting
aggregation.
raw_sum : bool, default False
If True will perform a simple summation over the cells given the
axis parameter. This ignores net counting of qualifying answers in
favour of summing over all answers given when considering margins.
margin : bool, deafult True
Controls whether the margins of the aggregation result are shown.
This also applies to margin aggregations themselves, since they
contain a margin in (form of the total number of cases) as well.
as_df : bool, default True
Controls whether the aggregation is transformed into a Quantipy-
multiindexed (following the Question/Values convention)
pandas.DataFrame or will be left in its numpy.array format.
Returns
-------
self
Passes a pandas.DataFrame or numpy.array of cell or margin counts
to the ``result`` property.
"""
if axis is None and raw_sum:
raise ValueError('Cannot calculate raw sum without axis.')
if axis is None:
self.current_agg = 'freq'
elif axis == 'x':
self.current_agg = 'cbase' if not raw_sum else 'x_sum'
elif axis == 'y':
self.current_agg = 'rbase' if not raw_sum else 'y_sum'
if not self.w == '@1':
self.weight()
if not self.is_empty or self._uses_meta:
counts = np.nansum(self.matrix, axis=0)
else:
counts = self._empty_result()
self.cbase = counts[[0], :]
if self.type in ['simple', 'nested']:
self.rbase = counts[:, [0]]
else:
self.rbase = | |
<filename>mm_scripting_util/util.py
import logging
import os
import sys
import numpy as np
import shutil
import platform
import getpass
import traceback
import corner
import matplotlib.pyplot as plt
import matplotlib
import inspect
import enum
import collections
import scipy.stats
import tabulate
import glob
import json
import argparse
import madminer.core
import madminer.lhe
import madminer.sampling
import madminer.utils.interfaces.madminer_hdf5
import subprocess
HAS_TORCH = True
TORCH_IMPORT_ERROR = None
try:
import madminer.ml
except ImportError:
TORCH_IMPORT_ERROR = traceback.format_exc()
HAS_TORCH = False
class _mm_base_util:
"""
base functions and variables used in most other functions.
'true' helpers, if you will.
Also, baseline init function for dir/name/path etc.
"""
_CONTAINS_BASE_UTIL = True
_DEFAULT_COLOR_CYCLE = plt.rcParams["axes.prop_cycle"].by_key()["color"]
_DEFAULT_LINESTYLE_CYCLE = list(matplotlib.lines.lineStyles.keys())[1:]
class error_codes(enum.Enum):
"""
Error code enum class utility;
generally helpful for debugging/unit testing.
"""
# ones: general errors/success
Success = 0
Error = 1
InvalidBackendError = 2
NoDirectoryError = 3
InvalidPlatformError = 4
InvalidInputError = 5
InvalidTypeError = 6
InitError = 7
CaughtExceptionError = 8
# tens: simulation errors
NoCardError = 11
IncorrectCardNumberError = 12
NoMadminerCardError = 13
NoScriptError = 14
IncorrectScriptNumberError = 15
NoDataFileError = 16
IncorrectDataFileNumberError = 17
NoProcessedDataFileError = 18
# twenties: training errors
NoAugmentedDataFileError = 20
IncorrectAugmentedDataFileError = 21
UnknownTrainingModelError = 22
NoTrainedModelsError = 23
ExistingModelError = 24
MultipleMatchingFilesError = 25
ExistingEvaluationError = 26
NoEvaluatedModelError = 27
TorchImportError = 28
ExistingAugmentedDataFileError = 29
def __init__(self, name, path):
self.HAS_TORCH = HAS_TORCH
self.TORCH_IMPORT_ERROR = TORCH_IMPORT_ERROR
self.name = name
self.path = path
self.dir = self.path + "/" + self.name
self.log = logging.getLogger(__name__)
self.module_path = os.path.dirname(__file__)
self.default_card_directory = "{}/data/cards_tth".format(self.module_path)
self._main_sample_config = lambda: "{}/main_sample.mmconfig".format(self.dir)
self._augmentation_config = lambda aug_sample_name: "{}/data/samples/{}/augmented_sample.mmconfig".format(
self.dir, aug_sample_name
)
self._training_config = lambda aug_sample_name, training_name: "{}/models/{}/{}/training_model.mmconfig".format(
self.dir, aug_sample_name, training_name
)
self._evaluation_config = lambda training_name, evaluation_name: "{}/evaluations/{}/{}/evaluation.mmconfig".format(
self.dir, training_name, evaluation_name
)
def _check_valid_init(self):
"""
Object member function to check for correct initialization
"""
if os.path.exists(self.dir):
return self.error_codes.Success
self.log.error("Init not successful; directory " + self.dir + "does not exist.")
return self.error_codes.InitError
def _check_valid_madminer_ml(self):
if self.HAS_TORCH:
return self.error_codes.Success
self.log.error("Error - pytorch unable to be imported on this machine. Check error message: ")
for err_line in self.TORCH_IMPORT_ERROR.strip('\n').split('\n'):
self.log.error(' % {}'.format(err_line))
return self.error_codes.TorchImportError
def _dir_size(self, pathname, matching_pattern=""):
"""Description:
Helper function returning number of files in pathname matching the given pattern.
Warns if matching pattern is
Parameters:
pathname
string. sampling directory to search.
matching_pattern
string, default empty. file pattern to check files against.
Returns:
int, number of matching files in directory, or -1 if dir does not exist.
"""
if type(matching_pattern) is not list:
matching_pattern = [matching_pattern]
if os.path.exists(pathname):
dir_elts = len(
[
elt
for elt in os.listdir(pathname)
if all([pattern in elt for pattern in matching_pattern])
]
)
return dir_elts
return -1
def _replace_lines(self, infile, line_numbers, line_strings, outfile=None):
"""
# Description
Helper member function for rewriting specific lines in the listed files. used
mostly to setup .dat backend files, etc.
# Parameters
infile: string
path/filename of input file (file to edit)
line_numbers: list of ints
ordered list of line numbers to change
line_strings: list of strings
ordered list of strings to replace, corresponding to the line numbers
in the line_numbers list
outfile: string
path/filename of file to write to (defaults to infile if left blank)
"""
if outfile is None:
outfile = infile
assert len(line_numbers) == len(line_strings)
with open(infile, "r") as file:
lines = file.readlines()
for i, line_number in enumerate(line_numbers):
assert line_number <= len(lines)
lines[line_number - 1] = line_strings[i]
with open(outfile, "w+") as file:
file.writelines(lines)
def _remove_files(self, path_to_clear, include_folder=False, matching_pattern=""):
"""
# description
utility function to remove files in a folder/the folder itself
"""
if os.path.exists(path_to_clear):
if include_folder and matching_pattern is "":
if platform.system() == "Linux":
if include_folder:
cmd = 'rm -r "{}"'.format(path_to_clear)
else:
cmd = 'rm -r "{}"/*'.format(path_to_clear)
elif platform.system() == "Windows":
if include_folder:
cmd = 'rmdir /s "{}"'.format(path_to_clear)
else:
cmd = 'rmdir /s /q "{}"\\'.format(path_to_clear)
else:
return
os.system(cmd)
self.log.info("Removed directory {}".format(path_to_clear))
else:
matching_files = [
elt for elt in os.listdir(path_to_clear) if matching_pattern in elt
]
for f in matching_files:
os.remove(path_to_clear + "/" + f)
self.log.debug("Removing file {}/{}".format(path_to_clear, f))
self.log.info("Removed files from directory {}".format(path_to_clear))
def _check_directory(
self, local_pathname, force, pattern="", mkdir_if_not_existing=True
):
"""
# description
utility function for path checking/validation of data.
# parameters
local_pathname: string
pathname locally (self.dir/<local_pathname>) to directory to trawl
force: bool
force delection//instantiation if files already exist
pattern: string
file naming scheme to match when checking for files within the directory
mkdir_if_not_existing: bool
mkdir switch, pretty selfexplanatory
"""
dirsize = self._dir_size(
pathname=self.dir + "/" + local_pathname, matching_pattern=pattern
)
if dirsize < 0:
if mkdir_if_not_existing:
os.mkdir(self.dir + "/" + local_pathname)
else:
return
elif dirsize > 0:
self.log.warning(
'existing data in specified directory "{}"'.format(local_pathname)
)
if not force:
raise FileExistsError(
"Directory {} not empty of data".format(local_pathname)
)
else:
self.log.info(
'Force flag triggered. Removing data in directory "{}".'.format(
local_pathname
)
)
self._remove_files(
self.dir + "/" + local_pathname,
include_folder=False,
matching_pattern=pattern,
)
def _search_for_paths(self, pathname, include_module_paths=True):
"""
# description
helper function which searchs for paths both within the object's internal directory,
and the greater module directory. order is specified by search order, but could be changed
in a later update - might be a good idea.
# parameters
pathname: string
name of path to search for. can be a word, path, etc.
include_module_paths: bool
switch for extra searches in module main directory, if files not found locally.
default true.
"""
# search first for exact path
if pathname is None:
return None
if os.path.exists(pathname):
ret = pathname
# then, search for local pathnames (raw and /data/)
elif os.path.exists(self.dir + "/" + pathname):
ret = self.dir + pathname
elif os.path.exists(self.dir + "/data/" + pathname):
ret = self.dir + "/data/" + pathname
# check for terminal cwd paths
elif os.path.exists(os.getcwd() + "/" + pathname):
ret = os.getcwd() + "/" + pathname
# last, check default file database (data/cards/, data/backends/, data/, and raw)
elif include_module_paths and os.path.exists(
self.module_path + "/data/backends/" + pathname
):
ret = self.module_path + "/data/backends/" + pathname
elif include_module_paths and os.path.exists(
self.module_path + "/data/" + pathname
):
ret = self.module_path + "/data/" + pathname
elif include_module_paths and os.path.exists(
self.module_path + "/data/cards/" + pathname
):
self.module_path + "/data/cards/" + pathname
elif include_module_paths and os.path.exists(self.module_path + "/" + pathname):
ret = self.module_path + "/" + pathname
else:
self.log.error("Could not find pathname {}".format(pathname))
return None
return ret
# otherwise this doesn't exist
def _string_find_nth(self, string, substring, n):
"""
# description
helper function, finds position of the nth occurence of a substring in a string
# parameters
string: str
string to search for substrings within
substring: str
substring to find within param <string>
n: int
level of occurence of <substring> in <string> to find
return: int
the index of first letter of the nth occurence of substring in string
"""
parts = string.split(substring, n + 1)
if len(parts) <= n + 1:
return -1
return len(string) - len(parts[-1]) - len(substring)
def _get_var_name(self, var):
"""
# description
admittedly suspicious helper function. gets (with varying success)
the name of a variable
# parameters
var: Type
variable of any type at all
return: list[string]
list of possible names for this variable, by instantiation.
"""
callers_local_vars = inspect.currentframe().f_back.f_locals.items()
return [k for k, v in callers_local_vars if v is var]
def _tabulate_comparison_information(
self, r, pers, observables, benchmarks, threshold=2.0
):
strarr = np.round(np.vstack(r), 2).astype(str)
index = np.vstack(r) >= threshold
strarr[index] = np.asarray(
["\033[1;37;41m" + elt + "\033[0;37;40m" for elt in strarr[index]]
)
columns = ["bin # "] + [
"-\n".join(
[
benchmark[10 * i : 10 * (i + 1)]
for i in range(int(len(benchmark) / 10) + 1)
]
)
for obs in observables
for benchmark in benchmarks
]
tab_output = tabulate.tabulate(
np.vstack(
[
np.vstack([[str(i + 1) for i in range(r.shape[2])], strarr]).T,
np.asarray(
[
np.hstack(
[
["failed"],
np.asarray(
[
"{:.1%}".format(elt)
for elt in np.hstack(pers)
]
),
]
)
]
),
]
),
tablefmt="pipe",
headers=columns,
)
header = " " * (self._string_find_nth(tab_output, "|", 1))
for i, obs in enumerate(observables):
header += "| "
header += obs
header += " " * (
self._string_find_nth(tab_output, "|", len(benchmarks) * (i + 1) + 1)
- len(header)
)
| |
# Sprite classes for platform game
import pygame as pg
from settings import *
from random import choice, randint, uniform
vec = pg.math.Vector2
def gimme_gibs(game, pos, qty):
for i in range(qty):
impulse = randint(-GIB_IMPULSE, GIB_IMPULSE)
#print(impulse)
Gib(game, pos, impulse)
def gimme_player_gibs(game, pos):
for image in game.player_gibs_imgs:
impulse = randint(-20, 20)
#print(impulse)
Player_Gib(game, pos, impulse, image)
def flip_images(list):
flipped_imgs = []
for frame in list:
flipped_imgs.append(pg.transform.flip(frame, True, False))
return flipped_imgs
def resize_to_multiplier(image, mult):
resized_img = pg.transform.scale(image, (image.get_width() * mult , image.get_height() * mult))
return resized_img
def collide_hit_rect(one, two):
return one.hit_rect.colliderect(two.rect)
def collide_hit_rect_mob(one, two):
return one.hit_rect.colliderect(two.hit_rect)
def collide_with_walls(sprite, group, dir):
if dir == 'x':
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
if hits[0].rect.centerx > sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2
if hits[0].rect.centerx < sprite.hit_rect.centerx:
sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2
sprite.vel.x = 0
sprite.acc.x = 0
sprite.hit_rect.centerx = sprite.pos.x
if sprite.__class__.__name__ == 'Mob':
sprite.direction *= -1
if dir == 'y':
hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect)
if hits:
if hits[0].rect.centery > sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.top - 0
sprite.jumping = False
if type(hits[0]).__name__ == 'TempPlatform' :
hits[0].trigger()
if hits[0].rect.centery < sprite.hit_rect.centery:
sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height
sprite.vel.y = 0
sprite.acc.y = 0
sprite.hit_rect.bottom = sprite.pos.y
class Player(pg.sprite.Sprite):
def __init__(self, game, x, y):
self._layer = PLAYER_LAYER
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.player_img
self.image = resize_to_multiplier(self.image, CHARACTER_SIZE_MULTIPLIER)
self.rect = self.image.get_rect()
self.hit_rect = pg.Rect(0,0,(self.rect.width * 0.25), (self.rect.height * 0.75))
self.image_right = self.image
self.image_left = pg.transform.flip(self.image, True, False)
self.load_images()
self.pos = vec(x,y)
self.rect.midbottom = self.pos
self.hit_rect.midbottom = self.pos
self.vel = vec(0,0)
self.acc = vec(0,0)
self.jumping = False
self.walking = False
self.gun_dir = 'right'
self.last_shot = 0
self.last_update = 0
self.current_frame = 0
def load_images(self):
self.idle_frames_r = self.game.player_idle_imgs
self.walk_frames_r = self.game.player_walk_imgs
self.idle_frames_l = flip_images(self.idle_frames_r)
self.walk_frames_l = flip_images(self.walk_frames_r)
def jump_cut(self):
if self.jumping:
if self.vel.y < -3:
self.vel.y = -3
def jump(self):
self.hit_rect.bottom += 1
hits = pg.sprite.spritecollide(self, self.game.walls, False, collide_hit_rect)
self.hit_rect.bottom -= 1
if hits and not self.jumping:
self.jumping = True
self.vel.y -= PLAYER_JUMP
self.game.jump_snd.play()
def shoot(self):
now = pg.time.get_ticks()
if now - self.last_shot > GUN_RATE:
self.last_shot = now
Bullet(self.game, self.pos, self.gun_dir)
self.game.shoot_snd.play()
def get_keys(self):
self.acc = vec(0,GRAVITY)
keys = pg.key.get_pressed()
if keys[pg.K_LEFT] or keys[pg.K_a]:
self.acc.x = - PLAYER_ACC
#self.image = self.image_left
self.gun_dir = 'left'
if keys[pg.K_RIGHT] or keys[pg.K_d]:
self.acc.x = PLAYER_ACC
#self.image = self.image_right
self.gun_dir = 'right'
if keys[pg.K_LCTRL] or keys[pg.K_MODE]: #or keys[pg.K_RALT]
self.shoot()
def animate(self):
now = pg.time.get_ticks()
if abs(self.vel.x) >= 1:
self.walking = True
else:
self.walking = False
# walk animation:
if self.walking:
if now - self.last_update > PLAYER_ANIM_SPEED:
self.last_update = now
self.current_frame = (self.current_frame +1) % len(self.walk_frames_r)
bottom = self.rect.bottom
if self.gun_dir == 'right':
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
if not self.walking:
if now - self.last_update > PLAYER_ANIM_SPEED:
self.last_update = now
self.current_frame = (self.current_frame +1) % len(self.idle_frames_r)
bottom = self.rect.bottom
if self.gun_dir == 'right':
self.image = self.idle_frames_r[self.current_frame]
else:
self.image = self.idle_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
def update(self):
self.get_keys()
self.animate()
#apply friction
self.acc.x += self.vel.x * PLAYER_FRICTION
#laws of motion, acceleration is added to velocity.
#In the x axis ,if the button is not pressed, not change in velocity (except friction)
self.vel += self.acc
#makes player stop in case of very low speed (x)
if abs(self.vel.x) < 0.1:
self.vel.x = 0
#update position, v+1/2Gamma (not squared?) and collisions
self.pos.y += self.vel.y + 0.5 * self.acc.y
self.hit_rect.bottom = self.pos.y
collide_with_walls(self, self.game.walls, 'y')
self.rect.bottom = self.hit_rect.bottom
self.pos.x += self.vel.x + 0.5 * self.acc.x
self.hit_rect.centerx = self.pos.x
collide_with_walls(self, self.game.walls, 'x')
self.rect.centerx = self.hit_rect.centerx
class Obstacle(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h):
self.groups = game.walls
pg.sprite.Sprite.__init__(self, self.groups)
self.image = pg.Surface((w,h))
self.image.fill(GREEN)
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
class MobWall(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h):
self.groups = game.mob_walls
pg.sprite.Sprite.__init__(self, self.groups)
self.image = pg.Surface((w,h))
self.image.fill(YELLOW)
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
class TempPlatform(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h):
self.groups = game.all_sprites, game.walls
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = self.game.temp_plat_img
self.image = resize_to_multiplier(self.image, TILE_SIZE_MULTIPLIER)
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
self.destruction_imgs = self.game.temp_plat_dest_imgs
self.touched = False
self.touched_time = pg.time.get_ticks()
self.duration = PLAT_DEST__ANIM_SPEED * 4
self.last_update = 0
self.current_frame = 0
def update(self):
if self.touched:
now = pg.time.get_ticks()
if now - self.last_update > PLAT_DEST__ANIM_SPEED:
if self.current_frame == len(self.destruction_imgs)-1 :
self.kill()
else:
self.last_update = now
self.current_frame = (self.current_frame +1) % len(self.destruction_imgs)
top = self.rect.midtop
self.image = self.destruction_imgs[self.current_frame]
self.rect = self.image.get_rect()
self.rect.midtop = top
if pg.time.get_ticks() - self.touched_time > self.duration:
self.game.walls.remove(self)
#self.kill()
def trigger(self):
if not self.touched:
self.touched_time = pg.time.get_ticks()
self.touched = True
class Spike(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h):
self.groups = game.spikes
pg.sprite.Sprite.__init__(self, self.groups)
self.image = pg.Surface((w,h))
self.image.fill(RED)
self.rect = self.image.get_rect()
self.rect.topleft = (x, y)
class Mob(pg.sprite.Sprite):
def __init__(self, game, x, y):
self._layer = MOB_LAYER
self.groups = game.mobs, game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.mob_img
self.image = resize_to_multiplier(self.image, CHARACTER_SIZE_MULTIPLIER)
self.rect = self.image.get_rect()
self.hit_rect = pg.Rect(0,0,(self.rect.width * 0.25), (self.rect.height * 0.75))
self.image_left = self.image
self.image_right = pg.transform.flip(self.image, True, False)
self.pos = vec(x,y)
self.rect.midbottom = self.pos
self.hit_rect.midbottom = self.pos
self.vel = vec(0,0)
self.acc = vec(0,0)
self.direction = -1
self.walk_frames_l = self.game.mob_walk_imgs
self.walk_frames_r = flip_images(self.walk_frames_l)
self.last_update = 0
self.current_frame = 0
def animate(self):
now = pg.time.get_ticks()
# walk animation:
if now - self.last_update > MOB_ANIM_SPEED:
self.last_update = now
self.current_frame = (self.current_frame +1) % len(self.walk_frames_r)
bottom = self.rect.bottom
if self.vel.x > 0:
self.image = self.walk_frames_r[self.current_frame]
else:
self.image = self.walk_frames_l[self.current_frame]
self.rect = self.image.get_rect()
self.rect.bottom = bottom
def update(self):
self.animate()
self.acc = vec(0,GRAVITY)
self.acc.x += MOB_ACC * self.direction
self.acc.x += self.vel.x * PLAYER_FRICTION
#laws of motion, acceleration is added to velocity.
#In the x axis ,if the button is not pressed, not change in velocity (except friction)
self.vel += self.acc
#makes player stop in case of very low speed (x)
if abs(self.vel.x) < 0.1:
self.vel.x = 0
#update position, v+1/2Gamma (not squared?) and collisions
self.pos.y += self.vel.y + 0.5 * self.acc.y
self.hit_rect.bottom = self.pos.y
collide_with_walls(self, self.game.mob_walls, 'y')
collide_with_walls(self, self.game.walls, 'y')
self.rect.bottom = self.hit_rect.bottom
self.pos.x += self.vel.x + 0.5 * self.acc.x
self.hit_rect.centerx = self.pos.x
collide_with_walls(self, self.game.mob_walls, 'x')
collide_with_walls(self, self.game.walls, 'x')
self.rect.centerx = self.hit_rect.centerx
#flips image based on velocity
# if self.vel.x > 0:
# self.image = self.image_right
# if self.vel.x < 0:
# self.image = self.image_left
class Bullet(pg.sprite.Sprite):
def __init__(self, game, pos, direction):
self._layer = BULLET_LAYER
self.groups = game.bullets, game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = game.bullet_img
self.image = resize_to_multiplier(self.image, CHARACTER_SIZE_MULTIPLIER)
self.vel = BULLET_SPEED
if direction == 'left':
self.image = pg.transform.flip(self.image, True, False)
self.vel = -BULLET_SPEED
self.rect = self.image.get_rect()
self.pos = vec(pos)
self.pos.y = self.pos.y + MUZZLE_OFFSET.y
self.pos.x = self.pos.x + (self.vel/BULLET_SPEED) * MUZZLE_OFFSET.x
self.rect.center = self.pos
self.spawn_time = pg.time.get_ticks()
MuzzleFlash(self.game, self.pos, direction)
def update(self):
self.pos.x += self.vel
self.rect.center = self.pos
if pg.sprite.spritecollideany(self, self.game.walls):
self.kill()
if pg.time.get_ticks() - self.spawn_time > BULLET_LIFETIME:
self.kill()
class Teleport(pg.sprite.Sprite):
def __init__(self, game, x, y, w, h, dest):
self._layer = EFFECT_LAYER
self.groups = game.teleports
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.image = pg.Surface((w,h))
self.image.fill(RED)
self.rect = pg.Rect(x, y, w, h)
self.x = x
self.y = y
self.rect.x = x
self.rect.y = y
self.destination = dest
class MuzzleFlash(pg.sprite.Sprite):
def __init__(self, game, pos, direction):
self._layer = EFFECTS_LAYER
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.pos = pos
size = randint(3,6)
self.image = resize_to_multiplier(self.game.flash_img, size)
self.rect = self.image.get_rect()
if direction == 'left':
self.image = pg.transform.flip(self.image, True, False)
self.rect.midright = self.pos
else:
self.rect.midleft = self.pos
self.spawn_time = pg.time.get_ticks()
def update(self):
if pg.time.get_ticks() - self.spawn_time > 50:
self.kill()
class BloodSplatter(pg.sprite.Sprite):
def __init__(self, game, pos):
self._layer = BLOOD_LAYER
self.groups = game.all_sprites
pg.sprite.Sprite.__init__(self, self.groups)
self.game = game
self.pos = pos
self.image = pg.transform.flip(choice(self.game.bloods_imgs),
choice([True, False]), choice([True, False]))
self.rect = self.image.get_rect()
self.rect.center = self.pos
self.spawn_time = pg.time.get_ticks()
#self.current_size = 1
#self.last_update = pg.time.get_ticks()
def update(self):
if pg.time.get_ticks() - self.spawn_time > BLOOD_DURATION:
self.kill()
# Failed attempt to animate blood, small pixles don't look good
# now = pg.time.get_ticks()
# if now - self.last_update > 25:
# center = self.rect.center
# self.last_update = now
# self.image = pg.transform.scale(self.image, (self.image.get_width() * self.current_size,
# self.image.get_height() * self.current_size))
# self.current_size += 1
# self.rect = self.image.get_rect()
# self.rect.center = | |
-
(9554264*mbkin**2*(1 - mckin/mbkin)**10)/(1964655*mckin**2) -
(64688*mbkin*(1 - mckin/mbkin)**10)/(76545*mckin) +
(64688*mckin*(1 - mckin/mbkin)**10)/(76545*mbkin) -
(9554264*mckin**2*(1 - mckin/mbkin)**10)/(1964655*mbkin**2) +
(577664*(1 - mckin/mbkin)**11)/56133 -
(288832*mbkin**2*(1 - mckin/mbkin)**11)/(56133*mckin**2) -
(16850608*mbkin*(1 - mckin/mbkin)**11)/(21611205*mckin) +
(16850608*mckin*(1 - mckin/mbkin)**11)/(21611205*mbkin) -
(288832*mckin**2*(1 - mckin/mbkin)**11)/(56133*mbkin**2) +
(37786880*(1 - mckin/mbkin)**12)/3440151 -
(18893440*mbkin**2*(1 - mckin/mbkin)**12)/(3440151*mckin**2) -
(640768*mbkin*(1 - mckin/mbkin)**12)/(841995*mckin) +
(640768*mckin*(1 - mckin/mbkin)**12)/(841995*mbkin) -
(18893440*mckin**2*(1 - mckin/mbkin)**12)/(3440151*mbkin**2) +
(680271352*(1 - mckin/mbkin)**13)/57972915 -
(340135676*mbkin**2*(1 - mckin/mbkin)**13)/(57972915*mckin**2) -
(168874624*mbkin*(1 - mckin/mbkin)**13)/(223609815*mckin) +
(168874624*mckin*(1 - mckin/mbkin)**13)/(223609815*mbkin) -
(340135676*mckin**2*(1 - mckin/mbkin)**13)/(57972915*mbkin**2) +
(435369184*(1 - mckin/mbkin)**14)/34783749 -
(217684592*mbkin**2*(1 - mckin/mbkin)**14)/(34783749*mckin**2) -
(306181756*mbkin*(1 - mckin/mbkin)**14)/(405810405*mckin) +
(306181756*mckin*(1 - mckin/mbkin)**14)/(405810405*mbkin) -
(217684592*mckin**2*(1 - mckin/mbkin)**14)/(34783749*mbkin**2) +
(34745241056*(1 - mckin/mbkin)**15)/2608781175 -
(17372620528*mbkin**2*(1 - mckin/mbkin)**15)/(2608781175*mckin**2) -
(1972500064*mbkin*(1 - mckin/mbkin)**15)/(2608781175*mckin) +
(1972500064*mckin*(1 - mckin/mbkin)**15)/(2608781175*mbkin) -
(17372620528*mckin**2*(1 - mckin/mbkin)**15)/(2608781175*mbkin**2) +
(1462595593171*(1 - mckin/mbkin)**16)/103481653275 -
(1462595593171*mbkin**2*(1 - mckin/mbkin)**16)/(206963306550*
mckin**2) - (1979593631*mbkin*(1 - mckin/mbkin)**16)/
(2608781175*mckin) + (1979593631*mckin*(1 - mckin/mbkin)**16)/
(2608781175*mbkin) - (1462595593171*mckin**2*(1 - mckin/mbkin)**16)/
(206963306550*mbkin**2) + (17542717714153*(1 - mckin/mbkin)**17)/
1172792070450 - (17542717714153*mbkin**2*(1 - mckin/mbkin)**17)/
(2345584140900*mckin**2) - (1340564003491*mbkin*(1 - mckin/mbkin)**
17)/(1759188105675*mckin) + (1340564003491*mckin*
(1 - mckin/mbkin)**17)/(1759188105675*mbkin) -
(17542717714153*mckin**2*(1 - mckin/mbkin)**17)/(2345584140900*
mbkin**2) + (24672660896281*(1 - mckin/mbkin)**18)/1562707356210 -
(24672660896281*mbkin**2*(1 - mckin/mbkin)**18)/(3125414712420*
mckin**2) - (48474483141979*mbkin*(1 - mckin/mbkin)**18)/
(63330771804300*mckin) + (48474483141979*mckin*(1 - mckin/mbkin)**
18)/(63330771804300*mbkin) - (24672660896281*mckin**2*
(1 - mckin/mbkin)**18)/(3125414712420*mbkin**2) -
(22826272832761*mbkin*(1 - mckin/mbkin)**19)/(29691439767990*mckin) +
(22826272832761*mckin*(1 - mckin/mbkin)**19)/(29691439767990*mbkin) +
(3962*np.pi**2)/81 + (32*mbkin**2*np.pi**2)/(9*mckin**2) +
(1024*mbkin*np.pi**2)/(81*mckin) - (4096*mckin*np.pi**2)/(243*mbkin) -
(20960*mckin**2*np.pi**2)/(243*mbkin**2) - (2048*mckin**3*np.pi**2)/
(81*mbkin**3) + (3056*mckin**4*np.pi**2)/(81*mbkin**4) +
(4096*mckin**5*np.pi**2)/(81*mbkin**5) - (1792*mckin**6*np.pi**2)/
(81*mbkin**6) - (5120*mckin**7*np.pi**2)/(243*mbkin**7) +
(4418*mckin**8*np.pi**2)/(243*mbkin**8) +
(7808*(-17 + (16*mckin**2)/mbkin**2 + (12*mckin**4)/mbkin**4 -
(16*mckin**6)/mbkin**6 + (5*mckin**8)/mbkin**8 -
12*np.log(mckin**2/mbkin**2)))/243 +
(256*mbkin*(-17 + (16*mckin**2)/mbkin**2 + (12*mckin**4)/mbkin**4 -
(16*mckin**6)/mbkin**6 + (5*mckin**8)/mbkin**8 -
12*np.log(mckin**2/mbkin**2)))/(729*mckin) -
(256*np.pi**2*(-17 + (16*mckin**2)/mbkin**2 + (12*mckin**4)/mbkin**4 -
(16*mckin**6)/mbkin**6 + (5*mckin**8)/mbkin**8 -
12*np.log(mckin**2/mbkin**2)))/243 - (30848*np.log(mckin**2/mbkin**2))/27 -
(8192*mckin*np.log(mckin**2/mbkin**2))/(27*mbkin) -
(30464*mckin**2*np.log(mckin**2/mbkin**2))/(27*mbkin**2) +
(8192*mckin**3*np.log(mckin**2/mbkin**2))/(81*mbkin**3) +
(177328*mckin**4*np.log(mckin**2/mbkin**2))/(81*mbkin**4) +
32*np.pi**2*np.log(mckin**2/mbkin**2) + (32*mckin**2*np.pi**2*
np.log(mckin**2/mbkin**2))/mbkin**2 - (56*mckin**4*np.pi**2*
np.log(mckin**2/mbkin**2))/mbkin**4 +
(14480*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/
81 + (8*mbkin**2*(1 - (8*mckin**2)/mbkin**2 + (8*mckin**6)/mbkin**6 -
mckin**8/mbkin**8 - (12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/
(9*mckin**2) - (40*np.pi**2*(1 - (8*mckin**2)/mbkin**2 +
(8*mckin**6)/mbkin**6 - mckin**8/mbkin**8 -
(12*mckin**4*np.log(mckin**2/mbkin**2))/mbkin**4))/9 -
(16384*(1 - mckin/mbkin)**5*(np.log(2) + np.log(1 - mckin/mbkin)))/135 +
(8192*mbkin**2*(1 - mckin/mbkin)**5*(np.log(2) + np.log(1 - mckin/mbkin)))/
(135*mckin**2) + (8192*mckin**2*(1 - mckin/mbkin)**5*
(np.log(2) + np.log(1 - mckin/mbkin)))/(135*mbkin**2) +
(32768*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/405 -
(16384*mbkin**2*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/
(405*mckin**2) + (8192*mbkin*(1 - mckin/mbkin)**6*
(np.log(2) + np.log(1 - mckin/mbkin)))/(405*mckin) -
(8192*mckin*(1 - mckin/mbkin)**6*(np.log(2) + np.log(1 - mckin/mbkin)))/
(405*mbkin) - (16384*mckin**2*(1 - mckin/mbkin)**6*
(np.log(2) + np.log(1 - mckin/mbkin)))/(405*mbkin**2) -
(16384*(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)))/945 +
(8192*mbkin**2*(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)))/
(945*mckin**2) - (32768*mbkin*(1 - mckin/mbkin)**7*
(np.log(2) + np.log(1 - mckin/mbkin)))/(2835*mckin) +
(32768*mckin*(1 - mckin/mbkin)**7*(np.log(2) + np.log(1 - mckin/mbkin)))/
(2835*mbkin) + (8192*mckin**2*(1 - mckin/mbkin)**7*
(np.log(2) + np.log(1 - mckin/mbkin)))/(945*mbkin**2) -
(2048*(1 - mckin/mbkin)**8*(np.log(2) + np.log(1 - mckin/mbkin)))/189 +
(1024*mbkin**2*(1 - mckin/mbkin)**8*(np.log(2) + np.log(1 - mckin/mbkin)))/
(189*mckin**2) + (2048*mbkin*(1 - mckin/mbkin)**8*
(np.log(2) + np.log(1 - mckin/mbkin)))/(945*mckin) -
(2048*mckin*(1 - mckin/mbkin)**8*(np.log(2) + np.log(1 - mckin/mbkin)))/
(945*mbkin) + (1024*mckin**2*(1 - mckin/mbkin)**8*
(np.log(2) + np.log(1 - mckin/mbkin)))/(189*mbkin**2) -
(19456*(1 - mckin/mbkin)**9*(np.log(2) + np.log(1 - mckin/mbkin)))/1701 +
(9728*mbkin**2*(1 - mckin/mbkin)**9*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1701*mckin**2) + (2048*mbkin*(1 - mckin/mbkin)**9*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1701*mckin) -
(2048*mckin*(1 - mckin/mbkin)**9*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1701*mbkin) + (9728*mckin**2*(1 - mckin/mbkin)**9*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1701*mbkin**2) -
(1024*(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)))/81 +
(512*mbkin**2*(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)))/
(81*mckin**2) + (9728*mbkin*(1 - mckin/mbkin)**10*
(np.log(2) + np.log(1 - mckin/mbkin)))/(8505*mckin) -
(9728*mckin*(1 - mckin/mbkin)**10*(np.log(2) + np.log(1 - mckin/mbkin)))/
(8505*mbkin) + (512*mckin**2*(1 - mckin/mbkin)**10*
(np.log(2) + np.log(1 - mckin/mbkin)))/(81*mbkin**2) -
(5632*(1 - mckin/mbkin)**11*(np.log(2) + np.log(1 - mckin/mbkin)))/405 +
(2816*mbkin**2*(1 - mckin/mbkin)**11*(np.log(2) + np.log(1 - mckin/mbkin)))/
(405*mckin**2) + (1024*mbkin*(1 - mckin/mbkin)**11*
(np.log(2) + np.log(1 - mckin/mbkin)))/(891*mckin) -
(1024*mckin*(1 - mckin/mbkin)**11*(np.log(2) + np.log(1 - mckin/mbkin)))/
(891*mbkin) + (2816*mckin**2*(1 - mckin/mbkin)**11*
(np.log(2) + np.log(1 - mckin/mbkin)))/(405*mbkin**2) -
(202624*(1 - mckin/mbkin)**12*(np.log(2) + np.log(1 - mckin/mbkin)))/
13365 + (101312*mbkin**2*(1 - mckin/mbkin)**12*
(np.log(2) + np.log(1 - mckin/mbkin)))/(13365*mckin**2) +
(1408*mbkin*(1 - mckin/mbkin)**12*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1215*mckin) - (1408*mckin*(1 - mckin/mbkin)**12*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1215*mbkin) +
(101312*mckin**2*(1 - mckin/mbkin)**12*(np.log(2) +
np.log(1 - mckin/mbkin)))/(13365*mbkin**2) -
(189952*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/
11583 + (94976*mbkin**2*(1 - mckin/mbkin)**13*
(np.log(2) + np.log(1 - mckin/mbkin)))/(11583*mckin**2) +
(202624*mbkin*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/
(173745*mckin) - (202624*mckin*(1 - mckin/mbkin)**13*
(np.log(2) + np.log(1 - mckin/mbkin)))/(173745*mbkin) +
(94976*mckin**2*(1 - mckin/mbkin)**13*(np.log(2) + np.log(1 - mckin/mbkin)))/
(11583*mbkin**2) - (1428992*(1 - mckin/mbkin)**14*
(np.log(2) + np.log(1 - mckin/mbkin)))/81081 +
(714496*mbkin**2*(1 - mckin/mbkin)**14*(np.log(2) +
np.log(1 - mckin/mbkin)))/(81081*mckin**2) +
(13568*mbkin*(1 - mckin/mbkin)**14*(np.log(2) + np.log(1 - mckin/mbkin)))/
(11583*mckin) - (13568*mckin*(1 - mckin/mbkin)**14*
(np.log(2) + np.log(1 - mckin/mbkin)))/(11583*mbkin) +
(714496*mckin**2*(1 - mckin/mbkin)**14*(np.log(2) +
np.log(1 - mckin/mbkin)))/(81081*mbkin**2) -
(22912768*(1 - mckin/mbkin)**15*(np.log(2) + np.log(1 - mckin/mbkin)))/
1216215 + (11456384*mbkin**2*(1 - mckin/mbkin)**15*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1216215*mckin**2) +
(1428992*mbkin*(1 - mckin/mbkin)**15*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1216215*mckin) - (1428992*mckin*(1 - mckin/mbkin)**15*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1216215*mbkin) +
(11456384*mckin**2*(1 - mckin/mbkin)**15*(np.log(2) +
np.log(1 - mckin/mbkin)))/(1216215*mbkin**2) -
(2709104*(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)))/
135135 + (1354552*mbkin**2*(1 - mckin/mbkin)**16*
(np.log(2) + np.log(1 - mckin/mbkin)))/(135135*mckin**2) +
(1432048*mbkin*(1 - mckin/mbkin)**16*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1216215*mckin) - (1432048*mckin*(1 - mckin/mbkin)**16*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1216215*mbkin) +
(1354552*mckin**2*(1 - mckin/mbkin)**16*(np.log(2) +
np.log(1 - mckin/mbkin)))/(135135*mbkin**2) -
(6973984*(1 - mckin/mbkin)**17*(np.log(2) + np.log(1 - mckin/mbkin)))/
328185 + (3486992*mbkin**2*(1 - mckin/mbkin)**17*
(np.log(2) + np.log(1 - mckin/mbkin)))/(328185*mckin**2) +
(2709104*mbkin*(1 - mckin/mbkin)**17*(np.log(2) + np.log(1 - mckin/mbkin)))/
(2297295*mckin) - (2709104*mckin*(1 - mckin/mbkin)**17*
(np.log(2) + np.log(1 - mckin/mbkin)))/(2297295*mbkin) +
(3486992*mckin**2*(1 - mckin/mbkin)**17*(np.log(2) +
np.log(1 - mckin/mbkin)))/(328185*mbkin**2) -
(1205584*(1 - mckin/mbkin)**18*(np.log(2) + np.log(1 - mckin/mbkin)))/
53703 + (602792*mbkin**2*(1 - mckin/mbkin)**18*
(np.log(2) + np.log(1 - mckin/mbkin)))/(53703*mckin**2) +
(3486992*mbkin*(1 - mckin/mbkin)**18*(np.log(2) + np.log(1 - mckin/mbkin)))/
(2953665*mckin) - (3486992*mckin*(1 - mckin/mbkin)**18*
(np.log(2) + np.log(1 - mckin/mbkin)))/(2953665*mbkin) +
(602792*mckin**2*(1 - mckin/mbkin)**18*(np.log(2) +
np.log(1 - mckin/mbkin)))/(53703*mbkin**2) +
(1205584*mbkin*(1 - mckin/mbkin)**19*(np.log(2) + np.log(1 - mckin/mbkin)))/
(1020357*mckin) - (1205584*mckin*(1 - mckin/mbkin)**19*
(np.log(2) + np.log(1 - mckin/mbkin)))/(1020357*mbkin) +
(16384*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/189 -
(8192*mbkin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(567*mckin) -
(16384*mckin*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/(81*mbkin) +
(16384*mckin**2*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(81*mbkin**2) - (16384*mckin**4*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(81*mbkin**4) +
(16384*mckin**5*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(81*mbkin**5) - (16384*mckin**6*(1 + 7*np.log(2) +
7*np.log(1 - mckin/mbkin)))/(189*mbkin**6) +
(8192*mckin**7*(1 + 7*np.log(2) + 7*np.log(1 - mckin/mbkin)))/
(567*mbkin**7) - (4096*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/81 +
(4096*mbkin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(567*mckin) +
(81920*mckin*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/(567*mbkin) -
(16384*mckin**2*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(81*mbkin**2) + (8192*mckin**3*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(81*mbkin**3) +
(8192*mckin**4*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(81*mbkin**4) - (16384*mckin**5*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(81*mbkin**5) +
(81920*mckin**6*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(567*mbkin**6) - (4096*mckin**7*(1 + 8*np.log(2) +
8*np.log(1 - mckin/mbkin)))/(81*mbkin**7) +
(4096*mckin**8*(1 + 8*np.log(2) + 8*np.log(1 - mckin/mbkin)))/
(567*mbkin**8) + (16384*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
1701 - (2048*mbkin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(1701*mckin) - (2048*mckin*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(63*mbkin) + (32768*mckin**2*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(567*mbkin**2) -
(4096*mckin**3*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(81*mbkin**3) + (4096*mckin**5*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(81*mbkin**5) -
(32768*mckin**6*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(567*mbkin**6) + (2048*mckin**7*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(63*mbkin**7) -
(16384*mckin**8*(1 + 9*np.log(2) + 9*np.log(1 - mckin/mbkin)))/
(1701*mbkin**8) + (2048*mckin**9*(1 + 9*np.log(2) +
9*np.log(1 - mckin/mbkin)))/(1701*mbkin**9) +
(1024*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/189 -
(1024*mbkin*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(1701*mckin) - (5120*mckin*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(243*mbkin) +
(25600*mckin**2*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(567*mbkin**2) - (10240*mckin**3*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(189*mbkin**3) +
(2048*mckin**4*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(81*mbkin**4) + (2048*mckin**5*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(81*mbkin**5) -
(10240*mckin**6*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(189*mbkin**6) + (25600*mckin**7*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(567*mbkin**7) -
(5120*mckin**8*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(243*mbkin**8) + (1024*mckin**9*(1 + 10*np.log(2) +
10*np.log(1 - mckin/mbkin)))/(189*mbkin**9) -
(1024*mckin**10*(1 + 10*np.log(2) + 10*np.log(1 - mckin/mbkin)))/
(1701*mbkin**10) + (97280*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
18711 - (9728*mbkin*(1 + 11*np.log(2) + 11*np.log(1 - mckin/mbkin)))/
(18711*mckin) | |
<reponame>apivideo/api.video-python
import click, json, os, re, csv
import apivideo
from apivideo.apis import VideosApi
from apivideo.apis import UploadTokensApi
from apivideo.apis import LiveStreamsApi
from apivideo.apis import CaptionsApi
from apivideo.apis import PlayerThemesApi
from apivideo.apis import RawStatisticsApi
from apivideo.apis import WebhooksApi
from apivideo.exceptions import ApiAuthException
class ApiKey(click.ParamType):
name = 'api-key'
def convert(self, value, param, ctx):
if len(value) == 43:
found = re.match(r'[0-9a-zA-Z]{43}', value)
else:
self.fail(
f'{value} is not a 43-character string.',
param,
ctx,
)
return value
def setClient(api_key):
client = apivideo.AuthenticatedApiClient(api_key)
return client
@click.group()
@click.option(
'--api-key', '-a', type=ApiKey(),
help="""Your api.video API key.""",
)
@click.option(
'--config-file', '-c',
type=click.Path(),
default='~/.av_commandline.cfg',
)
@click.pass_context
def main(ctx, api_key, config_file):
filename = os.path.expanduser(config_file)
if not api_key and os.path.exists(filename):
with open(filename) as cfg:
api_key = cfg.read()
ctx.obj = {
'api_key': api_key,
'config_file': filename,
}
@main.command()
@click.pass_context
def config(ctx):
"""
Store the api key in a file for use with the api.video API.
"""
config_file = ctx.obj['config_file']
api_key = click.prompt(
"Please enter your api.video API key. We'll display what we have on file.",
default=ctx.obj.get('api_key', '')
)
with open(config_file,'w') as cfg:
cfg.write(api_key)
# VIDEOS
## list videos
@main.command()
@click.option('--payload', default={}, help="""Add a JSON dictionary containing all the search features you want to use. The format would be: '{"title":"My Title"}'""")
@click.pass_context
def listvideos(ctx, payload):
"""
List all videos in your account, or use a filtering option. All choices use snake case. For documentation
containing camelcase for parameters, change them to snake case. Choices: \n
* title - string, title of video \n
* tags - list \n
* metadata - list of key:value pairs \n
* description - string \n
* live_stream_id - ID for the live stream that created the video \n
* sort_by - string (asc or desc) \n
* current_page - integer \n
* page_size - integer \n
"""
api_key = ctx.obj['api_key']
kwargs = json.loads(payload)
client = setClient(api_key)
client.connect()
from apivideo.apis import VideosApi
videos_api = VideosApi(client)
if kwargs:
videos = videos_api.list(**kwargs)
else:
videos = videos_api.list()
click.echo(videos)
## upload video
@main.command()
@click.option('--payload', default='{"title":"video upload CLI", "description":"default info"}')
@click.argument('filepath')
@click.pass_context
def uploadvideo(ctx, payload, filepath):
"""
Upload a video to your account and provide info with a dictionary string. All choices use snake
case. If you're using documentation showing camel case, convert it to snake case for this tool.
You must provide the path to the file and a title for the upload. Everything else is optional.
This command combines creating the video container and uploading the video, so you don't need to have
the video_id. \n
* title - string
* description - string
* source - string
* public - boolean
* panoramic - boolean
* mp4Support - boolean
* player_id - string
* tags - list of strings
* metadata - list of key:value pairs
* published_at - date-time
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
video_create_payload = json.loads(payload)
# Create the container for your video and print the response
response = videos_api.create(video_create_payload)
print("Video Container", response)
# Retrieve the video ID, you can upload once to a video ID
video_id = response["video_id"]
# Prepare the file you want to upload. Place the file in the same folder as your code.
file = open(filepath, "rb")
video_response = videos_api.upload(video_id, file)
print("Video Upload Info", video_response)
## bulk upload
@main.command()
@click.argument('vidfolderpath')
@click.argument('pathtocsv')
@click.pass_context
def bulkdetailupload(ctx, vidfolderpath, pathtocsv):
"""
Upload in bulk. Provide the path to the folder containing your videos. Provide a path to a .csv file
with header filename,title,description,public,panoramic,mp4_support,player_id
For file name, put the complete name of the file as it appears in the folder containing your videos.
This command will read the file, add all the details for each video and upload them from the specified folder
using the information contained in the csv.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
with open(pathtocsv, mode='r') as readfile:
reader = csv.DictReader(readfile)
for row in reader:
fpath = row.pop('filename')
upload = vidfolderpath + '/' + fpath
if 'false' in row['public']:
row['public'] = False
elif 'False' in row['public']:
row['public'] = False
else:
row['public'] = True
if 'false' in row['panoramic']:
row['panoramic'] = False
elif 'False' in row['panoramic']:
row['panoramic'] = False
else:
row['panoramic'] = True
if 'false' in row['mp4_support']:
row['mp4_support'] = False
elif 'False' in row['mp4_support']:
row['mp4_support'] = False
else:
row['mp4_support'] = True
placeholder = {k: v for k, v in row.items() if v is not None}
payload = {k: v for k, v in placeholder.items() if v != ""}
response = videos_api.create(payload)
print("Video container for ", fpath, " created!")
video_id = response["video_id"]
file = open(upload, "rb")
video_response = videos_api.upload(video_id, file)
print("Video ", fpath, " uploaded! ", video_response)
## upload thumbnail for a video
@main.command()
@click.argument('videoid')
@click.argument('filepath')
@click.pass_context
def uploadthumb(ctx, videoid, filepath):
"""
Choose a JPG to upload as your image.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
file = open(filepath, "rb")
response = videos_api.upload_thumbnail(videoid, file)
click.echo(response)
## pick thumbnail for a video
@main.command()
@click.argument('videoid')
@click.argument('payload', default='{"timecode":"00:00:01:000"}')
@click.pass_context
def pickthumb(ctx, videoid, payload):
"""
Pick a thumbnail from a frame in the video timeline.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
payload = json.loads(payload)
videos_api = VideosApi(client)
response = videos_api.pick_thumbnail(videoid, payload)
click.echo(response)
## show a video / get video details
@main.command()
@click.argument('videoid')
@click.pass_context
def getvideo(ctx, videoid):
"""
Retrieve details for a video using the video ID.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
response = videos_api.get(videoid)
click.echo(response)
## delete a video
@main.command()
@click.argument('videoid')
@click.pass_context
def deletevideo(ctx, videoid):
"""
Delete a video using its video ID.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
response = videos_api.delete(videoid)
click.echo(response)
## bulk delete videos
@main.command()
@click.argument('vidlist')
@click.pass_context
def bulkdelete(ctx, vidlist):
"""
Delete list of videos presented by ID in a comma separated string.
"""
vidlist = vidlist.replace(" ", "")
del_list = vidlist.split(',')
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
for item in del_list:
response = videos_api.delete(item)
print(response)
## update a video
@main.command()
@click.argument('videoid')
@click.argument('payload')
@click.pass_context
def updatevideo(ctx, videoid, payload):
"""
Update a video's details using its video ID.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
payload = json.loads(payload)
response = videos_api.update(videoid, payload)
click.echo(response)
## show video status
@main.command()
@click.argument('videoid')
@click.pass_context
def showvideostatus(ctx, videoid):
"""
Show whether video is ready for playback.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
response = videos_api.get_status(videoid)
print(response)
# DELEGATED VIDEO
## list active tokens
@main.command()
@click.option ('--payload', default={}, help="Dictionary as string containing sorting choices.")
@click.pass_context
def listtokens(ctx, payload):
"""
List all active delegated tokens. You can sort tokens by including a dictionary
as a string. If you are reading documentation, instances of camelCase are snake case for this
tool, so convert them before including them. Available choices are:
* sort_by - string ('asc' or 'desc')
* current_page - string
* page_size - string
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
tokens_api = UploadTokensApi(client)
kwargs = json.loads(payload)
if kwargs:
response = tokens_api.list(**kwargs)
else:
response = tokens_api.list()
click.echo(response)
## generate upload token
@main.command()
@click.argument('ttl')
@click.pass_context
def createtoken(ctx, ttl):
"""
Create an upload token. Choose ttl in seconds. If one is not selected, the token never
expires.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
tokens_api = UploadTokensApi(client)
ttl = json.loads(ttl)
response = tokens_api.create_token(ttl)
click.echo(response)
## show upload token
@main.command()
@click.argument('token')
@click.pass_context
def gettoken(ctx, token):
"""
Get details about a single token using the token's ID.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
tokens_api = UploadTokensApi(client)
response = tokens_api.get_token(token)
click.echo(response)
## delete upload token
@main.command()
@click.argument('token')
@click.pass_context
def deletetoken(ctx, token):
"""
Delete a token.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
tokens_api = UploadTokensApi(client)
response = tokens_api.delete_token(token)
click.echo(response)
## upload with upload token
@main.command()
@click.argument('path')
@click.argument('token')
@click.pass_context
def tokenupload(ctx, path, token):
"""
Upload a video with a token. You must have created a token already.
You can edit its details afterwards with updatevideo.
"""
api_key = ctx.obj['api_key']
client = setClient(api_key)
client.connect()
videos_api = VideosApi(client)
file = open(path, "rb")
response = videos_api.upload_with_upload_token(token, file)
click.echo(response)
# LIVE STREAMS
## list live streams
@main.command()
@click.option('--payload', default={}, help="""Add a JSON dictionary containing all the search features you want to use. The format would be: '{"name":"My Live Stream", "sort_by":"asc"} '""")
@click.pass_context
def liststreams(ctx, payload):
"""
List all the live streams in your account, or use a filtering option. All choices use snake case. For documentation
containing camelcase for parameters, change them to snake case. Choices: \n
* stream_key | |
# -*- coding: utf-8 -*-
# Copyright (c) 2017 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from django.shortcuts import render,render_to_response
from django.http import HttpResponse,HttpResponseRedirect
from django.contrib.auth import authenticate,login ,logout,update_session_auth_hash
from django.conf import settings
from django import forms
from django.template import RequestContext
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.models import User
from django.utils.datastructures import MultiValueDictKeyError
from django.contrib.auth.decorators import login_required
import jpype
from traceback import format_exc
from json import dumps
from time import time
from urlparse import urljoin
from app.models import UserID
from app.forms import UserRegisterForm,UserProfileForm,InfoForm,OrgInfoForm
def index(request):
""" View for index
returns index.html template
"""
context_dict={}
return render(request,
'app/index.html',context_dict
)
def about(request):
""" View for about
returns about.html template
"""
context_dict={}
return render(request,
'app/about.html',context_dict
)
def validate(request):
""" View for validate tool
returns validate.html template
"""
if request.user.is_authenticated() or settings.ANONYMOUS_LOGIN_ENABLED:
context_dict={}
if request.method == 'POST':
if (jpype.isJVMStarted()==0):
""" If JVM not already started, start it."""
classpath = settings.JAR_ABSOLUTE_PATH
jpype.startJVM(jpype.getDefaultJVMPath(),"-ea","-Djava.class.path=%s"%classpath)
""" Attach a Thread and start processing the request. """
jpype.attachThreadToJVM()
package = jpype.JPackage("org.spdx.tools")
verifyclass = package.Verify
ajaxdict=dict()
try :
if request.FILES["file"]:
""" Saving file to the media directory """
myfile = request.FILES['file']
folder = str(request.user) + "/" + str(int(time()))
fs = FileSystemStorage(location=settings.MEDIA_ROOT +"/"+ folder,
base_url=urljoin(settings.MEDIA_URL, folder+'/')
)
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
""" Call the java function with parameters """
retval = verifyclass.verify(str(settings.APP_DIR+uploaded_file_url))
if (len(retval) > 0):
""" If any warnings are returned """
if (request.is_ajax()):
ajaxdict["type"] = "warning"
ajaxdict["data"] = "The following warning(s) were raised: " + str(retval)
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["error"] = retval
jpype.detachThreadFromJVM()
return render(request,
'app/validate.html',context_dict,status=400
)
if (request.is_ajax()):
""" Valid SPDX Document """
ajaxdict["data"] = "This SPDX Document is valid."
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=200)
jpype.detachThreadFromJVM()
return HttpResponse("This SPDX Document is valid.",status=200)
else :
""" If no file uploaded."""
if (request.is_ajax()):
ajaxdict=dict()
ajaxdict["type"] = "error"
ajaxdict["data"] = "No file uploaded"
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=404)
context_dict["error"] = "No file uploaded"
jpype.detachThreadFromJVM()
return render(request,
'app/validate.html',context_dict,status=404
)
except jpype.JavaException,ex :
""" Error raised by verifyclass.verify without exiting the application"""
if (request.is_ajax()):
ajaxdict=dict()
ajaxdict["type"] = "error"
ajaxdict["data"] = jpype.JavaException.message(ex)
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["error"] = jpype.JavaException.message(ex)
jpype.detachThreadFromJVM()
return render(request,
'app/validate.html',context_dict,status=400
)
except MultiValueDictKeyError:
""" If no files selected"""
if (request.is_ajax()):
ajaxdict=dict()
ajaxdict["type"] = "error"
ajaxdict["data"] = "No files selected."
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=404)
context_dict["error"] = "No files selected."
jpype.detachThreadFromJVM()
return render(request,
'app/validate.html',context_dict,status=404
)
except :
""" Other error raised """
if (request.is_ajax()):
ajaxdict=dict()
ajaxdict["type"] = "error"
ajaxdict["data"] = format_exc()
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["error"] = format_exc()
jpype.detachThreadFromJVM()
return render(request,
'app/validate.html',context_dict,status=400
)
else :
""" GET,HEAD """
return render(request,
'app/validate.html',context_dict
)
else :
return HttpResponseRedirect(settings.LOGIN_URL)
def compare(request):
""" View for compare tool
returns compare.html template
"""
if request.user.is_authenticated() or settings.ANONYMOUS_LOGIN_ENABLED:
context_dict={}
if request.method == 'POST':
if (jpype.isJVMStarted()==0):
""" If JVM not already started, start it, attach a Thread and start processing the request """
classpath =settings.JAR_ABSOLUTE_PATH
jpype.startJVM(jpype.getDefaultJVMPath(),"-ea","-Djava.class.path=%s"%classpath)
""" Attach a Thread and start processing the request """
jpype.attachThreadToJVM()
package = jpype.JPackage("org.spdx.tools")
verifyclass = package.Verify
compareclass = package.CompareMultpleSpdxDocs
ajaxdict = dict()
filelist = list()
errorlist = list()
if 'compare' in request.POST:
""" If files submitted one by one """
try:
if request.FILES["file1"]:
nofile = int(request.POST["nofile"])
rfilename = request.POST["rfilename"]+".xlsx"
folder = str(request.user)+"/"+ str(int(time()))
callfunc = [settings.MEDIA_ROOT+"/"+folder + "/" +rfilename]
erroroccurred = False
warningoccurred = False
fs = FileSystemStorage(location=settings.MEDIA_ROOT +"/"+ folder,
base_url=urljoin(settings.MEDIA_URL, folder+'/')
)
for i in range(1,nofile+1):
""" Check if file selected or not"""
try:
a = 'file'+str(i)
myfile = request.FILES['file'+str(i)]
except MultiValueDictKeyError:
""" If no files selected"""
if (request.is_ajax()):
filelist.append("File " + str(i) + " not selected.")
errorlist.append("Please select a file.")
ajaxdict["type"] = "error"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=404)
context_dict["error"] = "No files selected."
context_dict["type"] = "error"
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=404
)
""" If file exist and uploaded, save it"""
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
callfunc.append(settings.APP_DIR+uploaded_file_url)
try :
"""Call the java function to verify for valid RDF Files."""
retval = verifyclass.verifyRDFFile(settings.APP_DIR+uploaded_file_url)
if (len(retval) > 0):
""" If warnings raised"""
warningoccurred = True
filelist.append(myfile.name)
errorlist.append("The following warning(s) were raised: " +str(retval))
else :
filelist.append(myfile.name)
errorlist.append("No errors found")
except jpype.JavaException,ex :
""" Error raised by verifyclass.verifyRDFFile without exiting the application"""
erroroccurred = True
filelist.append(myfile.name)
errorlist.append(jpype.JavaException.message(ex))
except :
""" Other Exceptions"""
erroroccurred = True
filelist.append(myfile.name)
errorlist.append(format_exc())
if (erroroccurred==False):
"""If no errors in any of the file, call the java function with parameters as list"""
try :
compareclass.onlineFunction(callfunc)
except :
"""Error raised by onlineFunction"""
if (request.is_ajax()):
ajaxdict["type"] = "warning2"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
ajaxdict["toolerror"] = format_exc()
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["type"] = "warning2"
context_dict["error"]= errorlist
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=400
)
if (warningoccurred==False):
"""If no warnings raised """
if (request.is_ajax()):
newajaxdict=dict()
newajaxdict["medialink"] = settings.MEDIA_URL + folder + "/" + rfilename
response = dumps(newajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response)
context_dict['Content-Disposition'] = 'attachment; filename="{}"'.format(rfilename)
context_dict["Content-Type"] = "application/vnd.ms-excel"
context_dict["medialink"] = settings.MEDIA_URL + folder + "/" + rfilename
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=200
)
#return HttpResponseRedirect(settings.MEDIA_URL+ folder + "/" +rfilename)
else :
if (request.is_ajax()):
ajaxdict["type"] = "warning"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
ajaxdict["medialink"] = settings.MEDIA_URL + folder + "/" + rfilename
response = dumps(newajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=406)
context_dict['Content-Disposition'] = 'attachment; filename="{}"'.format(rfilename)
context_dict["Content-Type"] = "application/vnd.ms-excel"
context_dict["type"] = "warning"
context_dict["medialink"] = settings.MEDIA_URL + folder + "/" + rfilename
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=406
)
else :
if (request.is_ajax()):
ajaxdict["type"] = "error"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["error"]= errorlist
context_dict["type"] = "error"
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=400
)
else :
context_dict["error"]= "File Not Uploaded"
context_dict["type"] = "error"
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=404
)
except MultiValueDictKeyError:
""" If no files uploaded"""
if (request.is_ajax()):
filelist.append("File-1 not selected.")
errorlist.append("Please select a file.")
ajaxdict["type"] = "error"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=404)
context_dict["error"] = "No files selected."
context_dict["type"] = "error"
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=404
)
elif 'compareall' in request.POST:
""" If all files submitted at once"""
try:
if request.FILES["files"]:
rfilename = request.POST["rfilename2"]+".xlsx"
folder = str(request.user)+"/"+ str(int(time()))
callfunc = [settings.MEDIA_ROOT+"/"+folder + "/" +rfilename]
erroroccurred = False
warningoccurred = False
if (len(request.FILES.getlist("files"))<2):
context_dict["error"]= "Please select atleast 2 files"
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict
)
"""Loop through the list of files"""
folder = str(request.user) + "/" + str(int(time()))
fs = FileSystemStorage(location=settings.MEDIA_ROOT +"/"+ folder,
base_url=urljoin(settings.MEDIA_URL, folder+'/')
)
for myfile in request.FILES.getlist("files"):
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
callfunc.append(settings.APP_DIR+uploaded_file_url)
try :
"""Call the java function to verify for valid RDF Files."""
retval = verifyclass.verifyRDFFile(settings.APP_DIR+uploaded_file_url)
if (len(retval) > 0):
"""If warnings raised"""
warningoccurred = True
filelist.append(myfile.name)
errorlist.append(str(retval))
else :
filelist.append(myfile.name)
errorlist.append("No errors found")
except jpype.JavaException,ex :
""" Error raised by verifyclass.verifyRDFFile without exiting the application"""
erroroccurred = True
filelist.append(myfile.name)
errorlist.append(jpype.JavaException.message(ex))
except :
""" Other Exceptions"""
erroroccurred = True
filelist.append(myfile.name)
errorlist.append(format_exc())
if (erroroccurred==False):
""" If no errors in any of the file,call the java function with parameters as list"""
try :
compareclass.onlineFunction(callfunc)
except :
"""Error raised by onlineFunction"""
if (request.is_ajax()):
ajaxdict["type"] = "warning2"
ajaxdict["files"] = filelist
ajaxdict["errors"] = errorlist
ajaxdict["toolerror"] = format_exc()
response = dumps(ajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response,status=400)
context_dict["type"] = "warning2"
context_dict["error"]= errorlist
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=400
)
if (warningoccurred==False):
"""If no warning raised """
if (request.is_ajax()):
newajaxdict=dict()
newajaxdict["medialink"] = settings.MEDIA_URL + folder + "/"+ rfilename
response = dumps(newajaxdict)
jpype.detachThreadFromJVM()
return HttpResponse(response)
context_dict["Content-Type"] = "application/vnd.ms-excel"
context_dict['Content-Disposition'] = 'attachment; filename="{}"'.format(rfilename)
context_dict["medialink"] = settings.MEDIA_URL + folder + "/" + rfilename
jpype.detachThreadFromJVM()
return render(request,
'app/compare.html',context_dict,status=200
)
#return HttpResponseRedirect(settings.MEDIA_URL+ folder + "/"+rfilename)
else :
if (request.is_ajax()):
ajaxdict["type"] = "warning"
ajaxdict["files"] | |
"Special",
"pp": 25,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"encore": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 5,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "authentic"},
"volatileStatus": 'encore',
"secondary": False,
"target": "normal",
"type": "Normal"
},
"endeavor": {
"accuracy": 100,
"basePower": 0,
"category": "Physical",
"pp": 5,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"endure": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 4,
"flags": {},
"stallingMove": True,
"volatileStatus": 'endure',
"secondary": False,
"target": "self",
"type": "Normal"
},
"energyball": {
"accuracy": 100,
"basePower": 90,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"bullet", "protect", "mirror"},
"secondary": {
"chance": 10,
"boosts": {
"spd": -1
}
},
"target": "normal",
"type": "Grass"
},
"entrainment": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"eruption": {
"accuracy": 100,
"basePower": 150,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "allAdjacentFoes",
"type": "Fire"
},
"explosion": {
"accuracy": 100,
"basePower": 250,
"category": "Physical",
"pp": 5,
"priority": 0,
"flags": {"protect", "mirror"},
"selfdestruct": True,
"secondary": False,
"target": "allAdjacent",
"type": "Normal"
},
"extrasensory": {
"accuracy": 100,
"basePower": 80,
"category": "Special",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"volatileStatus": 'flinch'
},
"target": "normal",
"type": "Psychic"
},
"extremespeed": {
"accuracy": 100,
"basePower": 80,
"category": "Physical",
"pp": 5,
"priority": 2,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"facade": {
"accuracy": 100,
"basePower": 70,
"category": "Physical",
"pp": 20,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"feintattack": {
"accuracy": True,
"basePower": 60,
"category": "Physical",
"pp": 20,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"fairylock": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"mirror", "authentic"},
"pseudoWeather": 'fairylock',
"secondary": False,
"target": "all",
"type": "Fairy"
},
"fairywind": {
"accuracy": 100,
"basePower": 40,
"category": "Special",
"pp": 30,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Fairy"
},
"fakeout": {
"accuracy": 100,
"basePower": 40,
"category": "Physical",
"pp": 10,
"priority": 3,
"flags": {"contact", "protect", "mirror"},
"secondary": {
"chance": 100,
"volatileStatus": 'flinch'
},
"target": "normal",
"type": "Normal"
},
"faketears": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"spd": -2
},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"falseswipe": {
"accuracy": 100,
"basePower": 40,
"category": "Physical",
"pp": 40,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"noFaint": True,
"secondary": False,
"target": "normal",
"type": "Normal"
},
"featherdance": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"atk": -2
},
"secondary": False,
"target": "normal",
"type": "Flying"
},
"feint": {
"accuracy": 100,
"basePower": 30,
"category": "Physical",
"pp": 10,
"priority": 2,
"flags": {"mirror"},
"breaksProtect": True,
"secondary": False,
"target": "normal",
"type": "Normal"
},
"fellstinger": {
"accuracy": 100,
"basePower": 30,
"category": "Physical",
"pp": 25,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Bug"
},
"fierydance": {
"accuracy": 100,
"basePower": 80,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 50,
"self": {
"boosts": {
"spa": 1
}
}
},
"target": "normal",
"type": "Fire"
},
"finalgambit": {
"accuracy": 100,
"basePower": 0,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"protect"},
"selfdestruct": True,
"secondary": False,
"target": "normal",
"type": "Fighting"
},
"fireblast": {
"accuracy": 85,
"basePower": 110,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"firefang": {
"accuracy": 95,
"basePower": 65,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"bite", "contact", "protect", "mirror"},
"secondary": [
{
"chance": 10,
"status": 'brn'
}, {
"chance": 10,
"volatileStatus": 'flinch'
}
],
"target": "normal",
"type": "Fire"
},
"firepledge": {
"accuracy": 100,
"basePower": 80,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror", "nonsky"},
"secondary": False,
"target": "normal",
"type": "Fire"
},
"firepunch": {
"accuracy": 100,
"basePower": 75,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "protect", "mirror", "punch"},
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"firespin": {
"accuracy": 85,
"basePower": 35,
"category": "Special",
"pp": 15,
"priority": 0,
"flags": {"protect", "mirror"},
"volatileStatus": 'partiallytrapped',
"secondary": False,
"target": "normal",
"type": "Fire"
},
"fissure": {
"accuracy": 30,
"basePower": 0,
"category": "Physical",
"pp": 5,
"priority": 0,
"flags": {"protect", "mirror", "nonsky"},
"ohko": True,
"secondary": False,
"target": "normal",
"type": "Ground"
},
"flail": {
"accuracy": 100,
"basePower": 0,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"flameburst": {
"accuracy": 100,
"basePower": 70,
"category": "Special",
"pp": 15,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Fire"
},
"flamecharge": {
"accuracy": 100,
"basePower": 50,
"category": "Physical",
"pp": 20,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": {
"chance": 100,
"self": {
"boosts": {
"spe": 1
}
}
},
"target": "normal",
"type": "Fire"
},
"flamewheel": {
"accuracy": 100,
"basePower": 60,
"category": "Physical",
"pp": 25,
"priority": 0,
"flags": {"contact", "protect", "mirror", "defrost"},
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"flamethrower": {
"accuracy": 100,
"basePower": 90,
"category": "Special",
"pp": 15,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"flareblitz": {
"accuracy": 100,
"basePower": 120,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "protect", "mirror", "defrost"},
"recoil": [33, 100],
"secondary": {
"chance": 10,
"status": 'brn'
},
"target": "normal",
"type": "Fire"
},
"flash": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"boosts": {
"accuracy": -1
},
"secondary": False,
"target": "normal",
"type": "Normal"
},
"flashcannon": {
"accuracy": 100,
"basePower": 80,
"category": "Special",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": {
"chance": 10,
"boosts": {
"spd": -1
}
},
"target": "normal",
"type": "Steel"
},
"flatter": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 15,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"volatileStatus": 'confusion',
"boosts": {
"spa": 1
},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"fling": {
"accuracy": 100,
"basePower": 0,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": False,
"target": "normal",
"type": "Dark"
},
"flowershield": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 10,
"priority": 0,
"flags": {"distance"},
"secondary": False,
"target": "all",
"type": "Fairy"
},
"fly": {
"accuracy": 95,
"basePower": 90,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "charge", "protect", "mirror", "gravity", "distance"},
"secondary": False,
"target": "any",
"type": "Flying"
},
"flyingpress": {
"accuracy": 95,
"basePower": 80,
"category": "Physical",
"pp": 10,
"flags": {"contact", "protect", "mirror", "gravity", "distance", "nonsky"},
"priority": 0,
"secondary": False,
"target": "any",
"type": "Fighting"
},
"focusblast": {
"accuracy": 70,
"basePower": 120,
"category": "Special",
"pp": 5,
"priority": 0,
"flags": {"bullet", "protect", "mirror"},
"secondary": {
"chance": 10,
"boosts": {
"spd": -1
}
},
"target": "normal",
"type": "Fighting"
},
"focusenergy": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 30,
"priority": 0,
"flags": {"snatch"},
"volatileStatus": 'focusenergy',
"secondary": False,
"target": "self",
"type": "Normal"
},
"focuspunch": {
"accuracy": 100,
"basePower": 150,
"category": "Physical",
"pp": 20,
"priority": -3,
"flags": {"contact", "protect", "punch"},
"secondary": False,
"target": "normal",
"type": "Fighting"
},
"followme": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 2,
"flags": {},
"volatileStatus": 'followme',
"secondary": False,
"target": "self",
"type": "Normal"
},
"forcepalm": {
"accuracy": 100,
"basePower": 60,
"category": "Physical",
"pp": 10,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"secondary": {
"chance": 30,
"status": 'par'
},
"target": "normal",
"type": "Fighting"
},
"foresight": {
"accuracy": True,
"basePower": 0,
"category": "Status",
"pp": 40,
"priority": 0,
"flags": {"protect", "reflectable", "mirror", "authentic"},
"volatileStatus": 'foresight',
"secondary": False,
"target": "normal",
"type": "Normal"
},
"forestscurse": {
"accuracy": 100,
"basePower": 0,
"category": "Status",
"pp": 20,
"priority": 0,
"flags": {"protect", "reflectable", "mirror"},
"secondary": False,
"target": "normal",
"type": "Grass"
},
"foulplay": {
"accuracy": 100,
"basePower": 95,
"category": "Physical",
"pp": 15,
"priority": 0,
"flags": {"contact", "protect", "mirror"},
"useTargetOffensive": True,
"secondary": False,
"target": "normal",
"type": "Dark"
},
"freezedry": {
"accuracy": 100,
"basePower": 70,
"category": "Special",
"pp": 20,
"priority": 0,
"flags": {"protect", "mirror"},
"secondary": | |
#!/usr/bin/python
"""
This module has the classes associated with parsing XTAF parititions
To use this class try something like:
from partition import *
xtafpart = Partition('/mnt/data/201010.bin')
"""
import sys
import mmap
import struct
from threading import Lock
from cStringIO import StringIO
# TODO: Optional thread safety
class XTAFFD(object):
""" A File-like object for representing FileObjs """
def __init__(self, partition, fileobj):
self.pointer = 0
self.fileobj = fileobj
self.partition = partition
def read(self, length=-1):
buf = self.partition.read_file(fileobj = self.fileobj, size=length, offset=self.pointer)
self.pointer += len(buf)
return buf
def seek(self, offset, whence=0):
if whence == 0:
self.pointer = offset
if whence == 1:
self.pointer = self.pointer + offset
if whence == 2:
self.pointer = self.fileobj.fr.fsize - offset
if self.pointer > self.fileobj.fr.fsize:
self.pointer = self.fileobj.fr.fsize
if self.pointer < 0:
self.pointer = 0
def tell(self):
return self.pointer
class FileRecord(object):
"""FileRecord is straight off of the disk (but with everything in host byte order)"""
def __str__(self):
return "XTAF FileRecord: %s" % self.filename
def __init__(self, **kwargs):
self.fnsize = kwargs["fnsize"]
self.attribute = kwargs["attribute"]
self.filename = kwargs["filename"]
self.cluster = kwargs["cluster"]
self.fsize = kwargs["fsize"]
self.mtime = kwargs["mtime"]
self.mdate = kwargs["mdate"]
self.ctime = kwargs["ctime"]
self.cdate = kwargs["cdate"]
self.atime = kwargs["atime"]
self.adate = kwargs["adate"]
def isDirectory(self):
if self.fsize == 0:
return True
return False
class FileObj(object):
""" FileObj is a container with a FileRecord and a list of clusters """
def __str__(self):
return "XTAF File: %s" % self.fr
def __init__(self, fr, clusters):
self.fr = fr
self.clusters = clusters
def isDirectory(self):
return False
class Directory(FileObj):
""" Directory is a FileObj with a dict of FileObj """
def __str__(self):
return "%s (Directory)" % (super(Directory, self).__str__())
def __init__(self, fr, clusters):
super(Directory, self).__init__(fr, clusters)
self.files = {}
self.root = False
def isDirectory(self):
return True
class Partition(object):
"""
Main class representing the partition
The allfiles member has a dictionary of all the files in the partition
The rootfile member contains a directory object that represents the root directory
"""
def __str__(self):
return "XTAF Partition: %s" % self.filename
def __init__(self, filename, threadsafe=False, precache=False):
self.filename = filename
self.threadsafe = threadsafe
self.SIZE_OF_FAT_ENTRIES = 4
#TODO: Error checking
fd = open(filename, 'r') # The 'r' is very imporant
if fd.read(4) != 'XTAF':
start = 0x130EB0000L # TODO: Improve this detection mechanism
else:
start = 0
fat = start + 0x1000L
fd.seek(0, 2)
end = fd.tell()
rootdir = -(-((end - start) >> 12L) & -0x1000L) + fat #TODO: Understand this better
size = end - rootdir
fatsize = size >> 14L
# This doesn't work because unlike the C version of mmap you can't give it a 64 bit offset
#fatfd = mmap.mmap(fd.fileno(), fatsize, mmap.PROT_READ, mmap.PROT_READ, offset=fat)
# So we have to keep the whole FAT in memory during processing
fd.seek(fat, 0)
fatdata = fd.read(fatsize * 4)
fd.seek(0, 0)
# Setup internal variables
self.root_dir_cluster = 1
self.start = start
self.fat = fat
self.root_dir = rootdir
self.size = size
self.fat_num = fatsize
self.fd = fd
self.fat_data = fatdata # <- FAT is in BIG ENDIAN
self.allfiles = {}
self.lock = Lock()
#self.rootfile = self.parse_directory()
self.rootfile = self.init_root_directory(recurse = precache)
def read_cluster(self, cluster, length=0x4000, offset=0L):
""" Given a cluster number returns that cluster """
if length + offset <= 0x4000: #Sanity check
diskoffset = (cluster - 1 << 14L) + self.root_dir + offset
# Thread safety is optional because the extra function calls are a large burden
if self.threadsafe:
self.lock.acquire()
try:
self.fd.seek(diskoffset)
buf = self.fd.read(length)
except IOError:
buf = ""
if self.threadsafe:
self.lock.release()
return buf
else:
return ""
#TODO: Refactor into something smaller
def read_file(self, filename=None, fileobj=None, size=-1, offset=0):
""" Reads an entire file given a filename or fileobj """
#TODO: Error checking
if not fileobj:
fileobj = self.get_file(filename)
if size == -1:
if fileobj.isDirectory():
size = 2**32 # Read the whole directory (all the clusters)
else:
size = fileobj.fr.fsize # Read the whole file (skip the slack space)
if len(fileobj.clusters) == 0: # Initialise cluster list if necessary
fileobj.clusters = self.get_clusters(fileobj.fr)
if len(fileobj.clusters) == 0: # Check the return of get_clusters
print "Reading Empty File"
return ""
clusters_to_skip = offset // 0x4000
offset %= 0x4000
buf = StringIO()
try:
readlen = min(0x4000, size)
buf.write(self.read_cluster(fileobj.clusters[clusters_to_skip], readlen, offset))
size -= readlen
for cl in fileobj.clusters[clusters_to_skip+1:]:
if size <= 0:
break # If we're finished, stop reading clusters
readlen = min(0x4000, size)
buf.write(self.read_cluster(cl, readlen, 0))
size -= readlen
return buf.getvalue()
except IndexError:
print "Read overflow?", len(fileobj.clusters), clusters_to_skip
return buf.getvalue()
def get_clusters(self, fr):
""" Builds a list of the clusters a file hash by parsing the FAT """
if fr.cluster == 0:
print "Empty file"
return []
clusters = [fr.cluster]
cl = 0x0
cl = fr.cluster
cldata = ''
while cl & 0xFFFFFFF != 0xFFFFFFF:
cl_off = cl * self.SIZE_OF_FAT_ENTRIES
cldata = self.fat_data[cl_off:cl_off + self.SIZE_OF_FAT_ENTRIES]
if len(cldata) == 4:
cl = struct.unpack(">I", cldata)[0]
if cl & 0xFFFFFFF != 0xFFFFFFF:
clusters.append(cl)
else:
if fr.filename[0] != '~':
print "get_clusters fat offset warning %s %x vs %x, %x" %\
(fr.filename, cl_off, len(self.fat_data), len(cldata))
cl = 0xFFFFFFF
return clusters
def open_fd(self, filename):
f = self.get_file(filename)
""" Return an XTAFFD object for a file """
if f != None:
return XTAFFD(self, f)
else:
return None
def parse_file_records(self, data):
"""
While not end of file records
Create a file record object
Return list of file records
Date format:
"""
file_records = []
pos = 0
while pos + 64 < len(data): # FileRecord struct offsets
fnlen = data[pos]
flags = data[pos+1]
if ord(fnlen) == 0xE5: # Handle deleted files
name = '~' + data[pos+2:pos+2+42].strip("\xff\x00")
elif ord(fnlen) > 42: # Technically >42 should be an error condition
break
elif ord(fnlen) == 0: # A vacant entry, maybe the end of the directory?
pos += 64
continue
else:
name = data[pos+2:pos+2+42].strip("\xff\x00") # Ignoring fnlen is a bit wasteful
cl = struct.unpack(">I", data[pos+0x2c:pos+0x2c+4])[0]
size = struct.unpack(">I", data[pos+0x30:pos+0x30+4])[0]
creation_date = struct.unpack(">H", data[pos+0x34:pos+0x34+2])[0]
creation_time = struct.unpack(">H", data[pos+0x36:pos+0x36+2])[0]
access_date = struct.unpack(">H", data[pos+0x38:pos+0x38+2])[0]
access_time = struct.unpack(">H", data[pos+0x3A:pos+0x3A+2])[0]
update_date = struct.unpack(">H", data[pos+0x3C:pos+0x3C+2])[0]
update_time = struct.unpack(">H", data[pos+0x3E:pos+0x3E+2])[0]
#if not (fnlen == '\xff' and flags == '\xff') and not fnlen == '\x00':
if (ord(fnlen) < 43 and ord(fnlen) != 0) or (ord(fnlen) == 0xE5):
file_records.append(FileRecord(fnsize=fnlen, attribute=flags, filename=name, cluster=cl,\
fsize=size, mtime=update_time, mdate=update_date,\
adate=access_date, atime=access_time,\
cdate=creation_date, ctime=creation_time))
else:
pass
pos += 64
return file_records
def walk(self, path = '/'):
""" A generator that will return every fileobj on a system below path.
This is designed to be used instead of iterating over self.allfiles.
self.allfiles can still be used if the partition is created with precache = True
Using this will eliminate much of the advantage of precache = False.
The only remaining speedup will be the lazy caching of file cluster lists
"""
f = self.get_file(path)
if f == None or not f.isDirectory():
return
files = [f]
while len(files) > 0:
f = files.pop(0)
if f.isDirectory():
if not f.root and len(f.clusters) == 0:
f = self.parse_directory(f)
files = files + f.files.values()
yield f.fullpath
return
def get_file(self, filename):
""" Returns a fileobj from a filename.
Checks allfiles and if it isn't present starts walking the allfiles directory.
Not the same as self.allfiles[filename] anymore. """
if filename in self.allfiles:
currentfile = self.allfiles[filename]
if currentfile.isDirectory() and not currentfile.root and len(currentfile.clusters) == 0:
# If we're asked for a directory, initialise it before returning
currentfile = self.parse_directory(currentfile)
return currentfile # A previously accessed file
else:
return self.walk_for_file(filename)
def walk_for_file(self, filename):
""" Walks the file system parsing directories where necessary looking for a fileobj """
# Parse subdirectories looking for the requested file
file_components = filename[1:].split("/") # Skip first slash
currentfile = self.rootfile
for component in file_components:
#print "f:%s\t c:%s\t" % (filename, component), currentfile, self.rootfile
if currentfile == None:
break
# If this is a directory (that isn't root) and it has no clusters listed, try to initialise it
if currentfile.isDirectory() and not currentfile.root and len(currentfile.clusters) == 0:
currentfile = self.parse_directory(currentfile)
try:
currentfile = currentfile.files[component]
except KeyError:
| |
different LC ages.
"""
fig = p.gcf()
Nax = len(bands)
if Nax > 25 : Nrow, Ncol = 5,6
elif Nax > 20 : Nrow, Ncol = 5,5
elif Nax > 16 : Nrow, Ncol = 4,5
elif Nax > 12 : Nrow, Ncol = 4,4
elif Nax > 9 : Nrow, Ncol = 3,4
elif Nax > 6 : Nrow, Ncol = 3,3
elif Nax > 4 : Nrow, Ncol = 2,3
elif Nax > 3 : Nrow, Ncol = 2,2
elif Nax > 2 : Nrow, Ncol = 1,3
elif Nax > 1 : Nrow, Ncol = 1,2
else: Nrow,Ncol = 1, 1
iax = 0
for band in bands :
iax += 1
ax = fig.add_subplot( Nrow, Ncol, iax )
plot_mag_z( sim, band, mjd=mjd, restbands=restbands, **kwargs )
if band in sndat.keys() :
ax.axhline( sndat[band], color='k',ls='-',lw=2 )
if 'z' in sndat.keys() :
ax.axvline( sndat['z'], color='k',ls='-',lw=2 )
def plot_obscolor_z( sim, bands='WH', mjd='peak', clobber=False, **kwargs ):
""" plot the observed color at the given MJD against redshift.
mjd='peak' is a special case that samples all simulated SNe
at their respective peaks. Otherwise we sample all at the
same MJD, which probably means they are at different LC ages.
"""
z = sim.z
band1 = bands[0]
band2 = bands[1]
if mjd in [ None, 0, 'pk','peak'] :
# read in the peak mags
obsmag1 = sim.__dict__['SIM_PEAKMAG_'+band1]
obsmag2 = sim.__dict__['SIM_PEAKMAG_'+band2]
else :
# sample the photometry for all SNe at the given mjd, with an enormous match window
# so that we always sample the observation nearest to mjd, regardless of
# how far from mjd it actually is.
sim.samplephot( mjd=mjd, tmatch=1000, clobber=clobber )
obsmag1 = sim.__dict__['%s%i'%(band1,int(mjd))]
obsmag2 = sim.__dict__['%s%i'%(band2,int(mjd))]
# limit to observations with legit data
igood = np.where( (obsmag1<99) & (obsmag1>-99) &
(obsmag2<99) & (obsmag2>-99) )[0]
if not len(igood) :
print( "ERROR: no good mags for %s vs z"%(bands))
return( None )
obscolor = obsmag1[igood] - obsmag2[igood]
z = z[igood]
# Plot it
if band1 in BANDCOLOR.keys(): color = BANDCOLOR[band1]
else : color = 'k'
plotdefaults={'ls':' ','mew':0.2,'ms':5,'alpha':0.4, 'mfc':color,'mec':color,
'marker':'o'}
plotargs = dict( plotdefaults.items() + kwargs.items() )
ax = p.gca()
ax.plot( z, obscolor, **plotargs)
ax.text( 0.9,0.9, '%s - %s vs z'%(band1,band2), ha='right', va='top',
color=color, backgroundcolor='w', transform=ax.transAxes)
return( 1 )
def plot_color_z( sim, band1='W', band2='H', mjd='peak',
plotstyle='median', snmags={}, **kwargs ):
"""
plot the colors against redshift: band1-band2 vs z
at the given mjd day or list of days
mjd='peak' is a special case that samples all simulated SNe
at their respective peaks. Otherwise we sample all at the
same MJD, which probably means they are at different LC ages.
plotstyle = 'median' or 'points'
"""
from matplotlib import cm
# For now, assume that all SNe in the sim are of the same type
sntype = SNTYPEDICT[ sim.SNTYPE[0] ]
plotdefaults = {'ls':' ','marker':'o','mew':0.2,'ms':5,'alpha':0.4 }
if sntype in ['II','IIn','IIP','IIL'] :
plotdefaults['mfc'] = 'b'
plotdefaults['mec'] = 'b'
plotdefaults['color'] = 'b'
cmap = cm.Blues
elif sntype in ['Ib','Ic','Ibc'] :
plotdefaults['mfc'] = 'g'
plotdefaults['mec'] = 'g'
plotdefaults['color'] = 'g'
cmap = cm.Greens
elif sntype == 'Ia':
plotdefaults['mfc'] = 'r'
plotdefaults['mec'] = 'r'
plotdefaults['color'] = 'r'
cmap = cm.Reds
plotargs = dict( plotdefaults.items() + kwargs.items() )
if mjd in [ 0, 'pk','peak'] :
# read in the peak mags
mag1 = sim.__dict__['SIM_PEAKMAG_'+band1]
mag2 = sim.__dict__['SIM_PEAKMAG_'+band2]
else :
# sample the light curves at the given obs-frame age (rel. to peak)
sim.samplephot( mjd )
mag1 = sim.__dict__['%s%i'%(band1, int(mjd))]
mag2 = sim.__dict__['%s%i'%(band2, int(mjd))]
# limit to observations with legit data
igood = np.where( (mag1<99) & (mag1>-99) &
(mag2<99) & (mag2>-99) )[0]
if not len(igood) :
print( "ERROR: no good mags for %s-%s vs %s"%(band1,band2,band2))
return( None )
mag1 = mag1[igood]
mag2 = mag2[igood]
color = mag1-mag2
z = sim.z[igood]
ax = p.gca()
if plotstyle == 'points' :
# Plot a point for every simulated SN
if band1 in BANDCOLOR.keys(): color1 = BANDCOLOR[band1]
else : color1 = 'k'
if band2 in BANDCOLOR.keys(): color2 = BANDCOLOR[band2]
else : color2 = 'k'
kwargs['mfc'] = color1
kwargs['mec'] = color2
p.plot( z, color, **kwargs )
elif plotstyle == 'median' :
# Plot a rolling median at each redshift.
# We use the 3-sigma-clipped mean and associated robust sigma
# using astrolib-ported python functions defined below.
# sort the color and z arrays by redshift
zsortidx = z.argsort()
zsorted = z[zsortidx]
colorbyz = color[zsortidx]
# compute the sigma-clipped mean and associated robust sigma
# over bins containing 5% of the simulated SNe
from numpy import array
Nsim = len(sim.z)
Nmed = int(0.05*Nsim)
cmed,cmederr = [],[]
for icolor in range( len(color) ) :
colorsample = colorbyz[ max(0,icolor-Nmed/2) : min(len(colorbyz),max(0,icolor-Nmed/2)+Nmed) ]
mean, sigma = meanclip( colorsample, clipsig=3, maxiter=3, converge_num=0.1 )
cmed.append( mean )
cmederr.append( sigma )
cmed, cmederr = array(cmed),array(cmederr)
ax = p.gca()
fill_between( ax, zsorted, cmed-cmederr, cmed+cmederr, **kwargs )
#p.plot( zsorted, cmed, ls='-', color=kwargs['color'], lw=2 )
if band1 in snmags.keys() and band2 in snmags.keys() and 'z' in snmags.keys() :
sncolor = snmags[band1]-snmags[band2]
snmag = snmags[band2]
snz = snmags['z']
if 'd'+band1 in snmags.keys() and 'd'+band2 in snmags.keys() and 'dz' in snmags.keys():
dsncolor = np.sqrt( snmags['d'+band1]**2 + snmags['d'+band2]**2 )
dsnz = snmags['dz']
p.errorbar( snz, sncolor, dsncolor, dsnz, color='k', marker='o', capsize=0, elinewidth=2, ecolor='k' )
p.plot( snz, snmags[band1]-snmags[band2], color='k', marker='o' )
ax = p.gca()
ax.set_ylabel('%s-%s'%(band1,band2) )
ax.set_xlabel('Redshift')
ax.set_xlim( sim.z.min(), sim.z.max() )
ax.set_ylim( color.min(), color.max() )
return(1)
def multiplot_color_z( sim, mjd='peak', bluebands='GRXIZMH', redbands='XH',
tobs=0, snmags={}, **kwargs ):
""" multi-panel plot showing color-mag diagrams.
mjd='peak' is a special case that samples all simulated SNe
at their respective peaks. Otherwise we sample all at the
same MJD, which probably means they are at different LC ages.
"""
fig = p.gcf()
Nax = 0
if len(bluebands)==1 : bluebands=[bluebands]
if len(redbands)==1 : redbands=[redbands]
for bband in bluebands :
ibband = BANDORDER.find( bband )
for rband in redbands :
irband = BANDORDER.find( rband )
if irband <= ibband : continue
Nax += 1
break
Nrow = 1
Ncol = 1
if Nax > 25 : Nrow, Ncol = 5,6
elif Nax > 20 : Nrow, Ncol = 5,5
elif Nax > 16 : Nrow, Ncol = 4,5
elif Nax > 12 : Nrow, Ncol = 4,4
elif Nax > 9 : Nrow, Ncol = 3,4
elif Nax > 6 : Nrow, Ncol = 3,3
elif Nax > 4 : Nrow, Ncol = 2,3
elif Nax > 3 : Nrow, Ncol = 2,2
elif Nax > 2 : Nrow, Ncol = 1,3
elif Nax > 1 : Nrow, Ncol = 1,2
else: Nrow,Ncol = 1, 1
iax = 0
for bband in bluebands :
ibband = BANDORDER.find( bband )
for rband in redbands :
irband = BANDORDER.find( rband )
if irband <= ibband : continue
iax += 1
ax = fig.add_subplot( Nrow, Ncol, iax )
plot_color_z( sim, mjd=mjd, band1=bband, band2=rband, tobs=tobs, **kwargs )
if bband in snmags.keys() and rband in snmags.keys() :
p.plot( snmags['z'], snmags[bband]-snmags[rband], marker='D',
mec='w', mfc='k',mew=1.5,ms=12 )
break
def plotSALT2par(sim ) :
""" plot histograms showing the range of light curve
shapes and colors (assumes a SALT2 simulation)"""
fig = p.figure(1)
p.clf()
idet = sim.DUMP['idet']
# Color distribution
ax1 = fig.add_subplot(2,2,1)
c = sim.DUMP['S2c']
cbin, cedge = np.histogram( c, bins=30 )
cdetbin, cdetedge = np.histogram( c[idet], bins=30 )
p.plot( cedge[:-1], cbin, drawstyle='steps-post',color='r', label='simulated')
p.plot( cdetedge[:-1], cdetbin, drawstyle='steps-post',color='g', label='detected')
ax1.set_ylabel('Number of SNe')
ax1.text(0.05,0.95, 'SALT2 Color: c', transform=ax1.transAxes, ha='left',va='top')
# Stretch distribution
ax2 = fig.add_subplot(2,2,2)
x1 = sim.DUMP['S2x1']
x1bin, x1edge = np.histogram( x1, bins=30 )
x1detbin, x1detedge = np.histogram( x1[idet], bins=30 )
p.plot( x1edge[:-1], x1bin, drawstyle='steps-post', color='r', label='sim' )
p.plot( x1detedge[:-1], x1detbin, | |
"""
Created on Jan 09 2021
<NAME> and <NAME>
database analysis from
https://data.gov.il/dataset/covid-19
Israel sities coordinates data
https://data-israeldata.opendata.arcgis.com/
"""
import json
import requests
import sys
import extract_israel_data
from Utils import *
import time
import pandas as pd
import os
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import datetime
import numpy as np
import warnings
plt.style.use('default')
warnings.filterwarnings("ignore")
line_statistic_plot_log=None
line_statistic_plot_fix_date=False
# data line plot
def line_statistic_plot(db, base, fields, title, ylabel, legend, text, save_name, log=None, fix_date=False):
f, ax = plt.subplots(figsize=(18, 6))
date = db[base]
date = pd.to_datetime(date)
len_data = len(date)
colors = plotly.colors.qualitative.Dark24 # ['blue', 'green', 'magenta', 'black', 'red', 'cyan', 'yellow']
sum_case = []
for cnt in range(len(fields)):
case = fields[cnt]
sum_case.append(db[case].max())
plt.plot(date, db[case], zorder=1, color=colors[cnt], linewidth=3)
plt.title(title, fontsize=20)
plt.ylabel(ylabel, fontsize=16)
plt.legend(legend, fontsize=14)
if fix_date:
datemin = pd.to_datetime('2020-03-01')
datemax = pd.to_datetime('2021-03-01')
else:
datemin = date.min()
datemax = date.max()
ax.set_xlim(datemin, datemax)
ax.grid(True)
# rotate and align the tick labels so they look better
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.fmt_xdata = formatter
f.autofmt_xdate()
if log:
ax.set_yscale('log')
if text is not None:
tline = 0.25*max(sum_case)
for kk in range(len(text)):
plt.plot((text[kk], text[kk]), (0, tline), '-k', linewidth=3)
plt.text(text[kk], 1.1*tline, text[kk].strftime('%d/%m/%y'), horizontalalignment='center', fontweight='bold', fontsize=14)
save_string = save_name + datemax.strftime('%d%m%y') + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
# Begin
full_data_file = os.path.join(os.getcwd(), time.strftime("%d%m%Y"), time.strftime("%d%m%Y") + '_loaded_files.csv')
if os.path.exists(full_data_file):
files_db = pd.read_csv(full_data_file, encoding="ISO-8859-8")
first_plt = False
else:
os.makedirs(os.path.join(os.getcwd(), time.strftime("%d%m%Y")), exist_ok=True)
# Extract Data from Israel Dataset COVID-19
files_db = extract_israel_data.extract_israel_data()
first_plt = True
# Print LOG to file
stdoutOrigin = sys.stdout
fout = open(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), 'israel_status_log.txt'), 'a')
sys.stdout = MyWriter(sys.stdout, fout)
text = None
# text = pd.date_range('2020-04-01', '2021-04-01', freq="MS")
# Isolation
isolated = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('isolation').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('isolation').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'date'
isolated[base] = pd.to_datetime(isolated[base])
isolated = isolated.sort_values([base])
for key in isolated.keys():
try:
isolated.loc[isolated[key].str.contains('15>') != False, key] = 15
isolated[key] = isolated[key].astype(int)
except:
pass
iso1 = isolated.new_contact_with_confirmed.astype(int).sum()
iso2 = isolated.new_from_abroad.astype(int).sum()
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
ylabel = 'Number of individuals'
legend = ('Isolated due to contact with confirmed, total ' + str(iso1), 'Isolated due to arrived from abroad, total ' + str(iso2))
save_name = 'israelIsolatedPersons_'
fields = ['isolated_today_contact_with_confirmed', 'isolated_today_abroad']
# plot Isolated Total
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot isolated daily
fields = ['new_contact_with_confirmed', 'new_from_abroad']
save_name = 'israelIsolatedPersons_Daily_'
title = 'Israel (data from ' + isolated[base].max().strftime('%d/%m/%y') + ') - Daily isolated persons, total ' + str(iso1+iso2) + ', now ' +\
str(isolated.isolated_today_contact_with_confirmed.iloc[-1] + isolated.isolated_today_abroad.iloc[-1])
line_statistic_plot(isolated, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del isolated
###################################################################################################################
# Medical Staff
coronaMediaclStaffD = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('medical_staff').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('medical_staff').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'Date'
coronaMediaclStaffD[base] = pd.to_datetime(coronaMediaclStaffD[base])
coronaMediaclStaffD = coronaMediaclStaffD.sort_values([base])
for key in coronaMediaclStaffD.keys():
try:
coronaMediaclStaffD.loc[coronaMediaclStaffD[key].str.contains('<15') != False, key] = 15
coronaMediaclStaffD[key] = coronaMediaclStaffD[key].astype(int)
except:
pass
ylabel = 'Number of individuals'
title = 'Israel - medical staff confirmed (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
save_name = 'coronaMediaclStaffConfirmed_'
fields = ['confirmed_cases_physicians', 'confirmed_cases_nurses', 'confirmed_cases_other_healthcare_workers']
legend = ['Confirmed physicians', 'Confirmed nurses', 'Confirmed other healthcare workers']
# plot coronaMediaclStaffConfirmed Total
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot coronaMediaclStaffIsolated daily
title = 'Israel - medical staff in isolation (data from ' + coronaMediaclStaffD[base].max().strftime('%d/%m/%y') + ')'
fields = ['isolated_physicians', 'isolated_nurses', 'isolated_other_healthcare_workers']
legend = ['Isolated physicians', 'Isolated nurses', 'Isolated other healthcare workers']
save_name = 'coronaMediaclStaffIsolated_'
line_statistic_plot(coronaMediaclStaffD, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del coronaMediaclStaffD
###################################################################################################################
# Hospitalization
hospitalization = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('hospitalization').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('hospitalization').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'תאריך'
hospitalization[base] = pd.to_datetime(hospitalization[base])
hospitalization = hospitalization.sort_values([base])
for key in hospitalization.keys():
try:
hospitalization.loc[hospitalization[key].str.contains('15>') != False, key] = 15
hospitalization.loc[hospitalization[key].str.contains('<15') != False, key] = 15
hospitalization[key] = hospitalization[key].astype(int)
except:
pass
ylabel = 'Number of individuals [persons]'
title = 'Israel - Critical conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalized_'
fields = ['מונשמים', 'חולים קשה', 'מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalized Total
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInAge_'
fields = ['גיל ממוצע מונשמים', 'גיל ממוצע חולים קשה', 'גיל ממוצע מאושפזים']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
title = 'Israel - Critical conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
save_name = 'israelHospitalizedInWomens_'
fields = ['אחוז נשים מונשמות', 'אחוז נשים חולות קשה', 'אחוז נשים מאושפזות']
legend = ('Ventilated patients', 'Seriously ill', 'Hospitalized')
# plot israelHospitalizeInAgeTotal
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Ill
title = 'Israel - ill conditions (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['חולים קל', 'חולים בינוני', 'חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditions_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel mean Age Ill
title = 'Israel - ill conditions mean Age division (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['גיל ממוצע חולים קל', 'גיל ממוצע חולים בינוני', 'גיל ממוצע חולים קשה']
legend = ('Light ill', 'Mild ill', 'Seriously ill')
save_name = 'illConditionsInAge_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot israel Women Percentage Ill
title = 'Israel - ill conditions percentage of Women (data from ' + hospitalization[base].max().strftime('%d/%m/%y') + ')'
fields = ['אחוז נשים חולות קל', 'אחוז נשים חולות בינוני', 'אחוז נשים חולות קשה']
legend = ('Light ill', 'Middle ill', 'Seriously ill')
save_name = 'illConditionsInWomens_'
line_statistic_plot(hospitalization, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
del hospitalization
###################################################################################################################
# Recovered
recovered = pd.read_excel(files_db.current_file_path[files_db.current_file_name.str.find('recovered').values.argmax()], encoding="ISO-8859-8")
###################################################################################################################
id = files_db.current_file_name.str.find('recovered').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
recoveredMeanTime = recovered.days_between_pos_and_recovery.mean()
recoveredMedianTime = recovered.days_between_pos_and_recovery.median()
print('Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
print('Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
NN = int(recovered.days_between_pos_and_recovery.max())
hh = np.histogram(recovered.days_between_pos_and_recovery, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
# ax.set_yscale('log')
plt.plot([recoveredMedianTime, recoveredMedianTime], [0, hh[0].max()], 'k--')
plt.text(recoveredMedianTime, hh[0].max(), ' Recovered Median Time: ' + str(int(recoveredMedianTime*100)/100) + ' days')
plt.plot([recoveredMeanTime, recoveredMeanTime], [0, hh[0][int(recoveredMeanTime)]], 'k--')
plt.text(recoveredMeanTime, hh[0][int(recoveredMeanTime)], ' Recovered Mean Time: ' + str(int(recoveredMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to recovered [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
data_from = pd.to_datetime(str(files_db.last_update[id]))
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to recovered. Num of persons ' + str(int(hh[0].sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelRecovered_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del recovered
###################################################################################################################
# Deceased
deceased = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('deceased').values.argmax()], encoding='latin-1')
###################################################################################################################
id = files_db.current_file_name.str.find('deceased').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
deceasedMeanTime = deceased.Time_between_positive_and_death.mean()
deceasedMedianTime = deceased.Time_between_positive_and_death.median()
print('Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
print('Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
NN = int(deceased.Time_between_positive_and_death.max())
hh = np.histogram(deceased.Time_between_positive_and_death, bins=np.arange(NN+1))
f, ax = plt.subplots(figsize=(15, 6))
plt.plot(hh[1][1:], hh[0], linewidth=3)
plt.plot([deceasedMedianTime, deceasedMedianTime], [0, hh[0].max()], 'k--')
plt.text(deceasedMedianTime, hh[0].max(), ' Deceased Median Time: ' + str(int(deceasedMedianTime*100)/100) + ' days')
plt.plot([deceasedMeanTime, deceasedMeanTime], [0, hh[0][int(deceasedMeanTime)]], 'k--')
plt.text(deceasedMeanTime, hh[0][int(deceasedMeanTime)], ' Deceased Mean Time: ' + str(int(deceasedMeanTime*100)/100) + ' days')
plt.grid()
plt.xlabel('Time to deceased [days]', fontsize=16)
plt.ylabel('Number of individuals [persons]', fontsize=16)
try:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + data_from.strftime('%d/%m/%y') + ')', fontsize=16)
except:
plt.title('Israel - Time to deceased. Num of persons ' + str(int(hh[0].sum())) + '. Num of Ventilated ' +
str(int(deceased.Ventilated.sum())) + ' (data from ' + str(files_db.last_update[id]) + ')', fontsize=16)
save_string = 'israelDeceased_' + str(files_db.last_update[id]) + '.png'
f.savefig(os.path.join(os.getcwd(), time.strftime("%d%m%Y"), save_string))
del deceased
###################################################################################################################
plt.close('all')
# Lab Test
lab_tests = pd.read_csv(files_db.current_file_path[files_db.current_file_name.str.find('lab_tests').values.argmax()])
###################################################################################################################
id = files_db.current_file_name.str.find('lab_tests').values.argmax()
print([files_db.last_update[id], files_db.current_file_name[id], files_db.name[id]])
base = 'result_date'
# lab_tests.loc[lab_tests['result_date'].isna() != False, 'result_date'] = lab_tests.loc[lab_tests['result_date'].isna() != False, 'test_date']
lab_tests = lab_tests[lab_tests['result_date'].isna() != True]
N = len(lab_tests.corona_result)
lab_tests[base] = pd.to_datetime(lab_tests[base])
lab_tests = lab_tests.sort_values([base])
possible_results = lab_tests.corona_result.unique()
FirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != False, ['result_date', 'corona_result']].reset_index()
first_grouped = FirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
first = first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('ראשון ')
del FirstTest, first_grouped
first_positive = first.xs("ראשון חיובי", level="corona_result", axis=1).values.squeeze()
first_negative = first.xs("ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_first = first.sum(axis=1).values.squeeze()
other_first = all_first - first_negative - first_positive
NotFirstTest = lab_tests.loc[lab_tests['is_first_Test'].str.contains('Yes') != True, ['result_date', 'corona_result']].reset_index()
not_first_grouped = NotFirstTest.groupby(['result_date', 'corona_result'], as_index=False).count()
not_first = not_first_grouped.set_index(['result_date', 'corona_result']).unstack().fillna(0).astype(int).add_prefix('לא ראשון ')
del NotFirstTest, not_first_grouped
not_first_positive = not_first.xs("לא ראשון חיובי", level="corona_result", axis=1).values.squeeze()
not_first_negative = not_first.xs("לא ראשון שלילי", level="corona_result", axis=1).values.squeeze()
all_not_first = not_first.sum(axis=1).values.squeeze()
other_not_first = all_not_first - not_first_positive - not_first_negative
full_lab_data = pd.concat([first.squeeze(), not_first.squeeze()], axis=1, sort=False)
# Saving full data
full_lab_data.to_csv(os.path.join(os.getcwd(), time.strftime("%d%m%Y"),
time.strftime("%d%m%Y") + 'complete_laboratory_data.csv'), encoding="windows-1255")
dateList = pd.DataFrame(lab_tests[base].unique(), columns=['Date'])
fields = ['PositiveFirst', 'NegativeFirst', 'OtherFirst', 'PositiveNotFirst', 'NegativeNotFirst', 'OtherNotFirst']
lab_data = pd.concat([dateList, pd.DataFrame(first_positive, columns=[fields[0]]),
pd.DataFrame(first_negative, columns=[fields[1]]),
pd.DataFrame(other_first, columns=[fields[2]]),
pd.DataFrame(not_first_positive, columns=[fields[3]]),
pd.DataFrame(not_first_negative, columns=[fields[4]]),
pd.DataFrame(other_not_first, columns=[fields[5]])],
axis=1, sort=False)
title = 'Israel ' + dateList.Date.max().strftime('%d/%m/%y') + ' - count of first test per person. Total tests performed ' + str(int(N))
ylabel = 'Number of individuals'
save_name = 'israelTestPerformed_'
base = 'Date'
legend = ['Positive First test, total ' + str(int(lab_data.PositiveFirst.sum())),
'Negative First test, total ' + str(int(lab_data.NegativeFirst.sum())),
'Other First test, total ' + str(int(lab_data.OtherFirst.sum())),
'Positive not a First test, total ' + str(int(lab_data.PositiveNotFirst.sum())),
'Negative not a First test, total ' + str(int(lab_data.NegativeNotFirst.sum())),
'Other not a First test, total ' + str(int(lab_data.OtherNotFirst.sum())), ]
# plot Test Performed Total
line_statistic_plot(lab_data, base, fields, title, ylabel, legend, text, save_name,line_statistic_plot_log,line_statistic_plot_fix_date)
# plot Test | |
# -*- coding: utf-8 -*-
#===============================================================================
# returns the maximum possible size of a payload
def getPayloadSizeMax(message_size_max):
payload_size_max = message_size_max
payload_size_max -= 8 # NetworkID
payload_size_max -= 1 # ParentCount
payload_size_max -= 32 # Parent
payload_size_max -= 4 # PayloadLength
# Placeholder for Payload
payload_size_max -= 8 # Nonce
return payload_size_max
#===============================================================================
# returns the maximum possible size of an output
def getOutputSizeMax(transaction_size_max, inputs=1):
output_size_max = transaction_size_max
output_size_max -= 4 # Payload Type
output_size_max -= 1 # Transaction Type
output_size_max -= 2 # Inputs Count
for i in range(inputs):
output_size_max -= (1+32+2) # 1x UTXO Input (Input Type + Transaction ID + Transaction Output Index)
output_size_max -= 2 # Outputs Count
# Placeholder for Outputs
output_size_max -= 4 # Payload Length
# Placeholder for Payload
output_size_max -= 2 # Unlock Blocks Count
for i in range(inputs):
output_size_max -= (1+1+32+64) # 1x Unlock Blocks (Unlock Type - Signature Type + Public key + Signature)
return output_size_max
#===============================================================================
class Output_VBytes(object):
#---------------------------------------------------------------------------
def __init__(self, name, name_plot, max_byte_size, weight_key, weight_data, metadata_length_max, plot_row_index=0, plot_column_index=0):
self.name = name
self.name_plot = name_plot
self.max_byte_size = max_byte_size # maximum available bytes
self.weight_key = weight_key
self.weight_data = weight_data
self.metadata_length_max = metadata_length_max
self.plot_row_index = plot_row_index
self.plot_column_index = plot_column_index
self.byte_size_max = 0 # currently used bytes max
self.v_bytes_min = 0
self.v_bytes_max = 0
self.plot_y_values = []
#---------------------------------------------------------------------------
def summary(self):
print("\nName: %s\n\tbytes_max: %6d\n\tv_byte_min: %6d\n\tv_byte_max: %6d" % (self.name, self.byte_size_max, self.v_bytes_min, self.v_bytes_max))
#---------------------------------------------------------------------------
def addField(self, field_byte_size_min, field_byte_size_max, weight):
self.byte_size_max += field_byte_size_max
if self.byte_size_max > self.max_byte_size:
raise Exception("Output too big: %s, Current: %d, Max: %d" % (self.name, self.byte_size_max, self.max_byte_size))
self.v_bytes_min += field_byte_size_min*weight
self.v_bytes_max += field_byte_size_max*weight
#---------------------------------------------------------------------------
def byteSizeMax(self):
return self.byte_size_max
#---------------------------------------------------------------------------
def bytesRemaining(self):
return self.max_byte_size - self.byte_size_max
#---------------------------------------------------------------------------
def vBytesMin(self):
return self.v_bytes_min
#---------------------------------------------------------------------------
def vBytesMax(self):
return self.v_bytes_max
#---------------------------------------------------------------------------
def totalOutputsPossible(self, db_size_bytes_max):
return int(db_size_bytes_max / self.byte_size_max)
#################
# Output Fields #
#################
#---------------------------------------------------------------------------
def addField_OutputID(self):
self.addField(field_byte_size_min=32+2, field_byte_size_max=32+2, weight=self.weight_key) # Output ID (Transaction ID + Transaction Output Index)
#---------------------------------------------------------------------------
def addField_OutputMetadataOffsets(self):
self.addField(field_byte_size_min=32, field_byte_size_max=32, weight=self.weight_data) # MessageID Included
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Milestone Index Booked
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Milestone Timestamp Booked
#---------------------------------------------------------------------------
def addField_OutputType(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Output Type
#---------------------------------------------------------------------------
def addField_IotaAmount(self):
self.addField(field_byte_size_min=8, field_byte_size_max=8, weight=self.weight_data) # Amount
#---------------------------------------------------------------------------
def addField_NativeTokens(self, native_token_count):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Native Tokens Count
for i in range(native_token_count):
self.addField(field_byte_size_min=38+32, field_byte_size_max=38+32, weight=self.weight_data) # Native Tokens (TokenID+Amount)
#---------------------------------------------------------------------------
def addField_AliasID(self):
self.addField(field_byte_size_min=20, field_byte_size_max=20, weight=self.weight_data) # Alias ID
#---------------------------------------------------------------------------
def addField_NFTID(self):
self.addField(field_byte_size_min=20, field_byte_size_max=20, weight=self.weight_data) # NFT ID
#---------------------------------------------------------------------------
def addField_StateIndex(self):
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # State Index
#---------------------------------------------------------------------------
def addField_StateMetadata(self, max_data_length):
self.addField(field_byte_size_min=2, field_byte_size_max=2, weight=self.weight_data) # State Metadata Length
if max_data_length == None:
max_data_length = 0 # zero for now, the remaining space will be taken into account in the metadata block
if (self.metadata_length_max != None) and (max_data_length > self.metadata_length_max):
max_data_length = self.metadata_length_max
self.addField(field_byte_size_min=0, field_byte_size_max=max_data_length, weight=self.weight_data)
#---------------------------------------------------------------------------
def addField_ImmutableMetadata(self, max_data_length):
self.addField(field_byte_size_min=2, field_byte_size_max=2, weight=self.weight_data) # Immutable Metadata Length
if max_data_length == None:
max_data_length = 0 # zero for now, the remaining space will be taken into account in the metadata block
if (self.metadata_length_max != None) and (max_data_length > self.metadata_length_max):
max_data_length = self.metadata_length_max
self.addField(field_byte_size_min=0, field_byte_size_max=max_data_length, weight=self.weight_data) # Immutable Metadata
#---------------------------------------------------------------------------
def addField_MetadataBlock(self, max_data_length):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Block Type
self.addField(field_byte_size_min=2, field_byte_size_max=2, weight=self.weight_data) # Metadata Data Length
if max_data_length == None:
max_data_length = self.bytesRemaining() # we can just use the remaining size here since every other dynamic field has the same weight
if (self.metadata_length_max != None) and (max_data_length > self.metadata_length_max):
max_data_length = self.metadata_length_max
self.addField(field_byte_size_min=1, field_byte_size_max=max_data_length, weight=self.weight_data) # Metadata Data
#---------------------------------------------------------------------------
def addField_FoundryCounter(self):
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Foundry Counter
#---------------------------------------------------------------------------
def addField_SerialNumber(self):
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Serial Number
#---------------------------------------------------------------------------
def addField_TokenTag(self):
self.addField(field_byte_size_min=12, field_byte_size_max=12, weight=self.weight_data) # Token Tag
#---------------------------------------------------------------------------
def addField_CirculatingSupply(self):
self.addField(field_byte_size_min=32, field_byte_size_max=32, weight=self.weight_data) # Circulating Supply
#---------------------------------------------------------------------------
def addField_MaximumSupply(self):
self.addField(field_byte_size_min=32, field_byte_size_max=32, weight=self.weight_data) # Maximum Supply
#---------------------------------------------------------------------------
def addField_TokenScheme(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Token Scheme
#---------------------------------------------------------------------------
def addField_UnlockConditionsCount(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Conditions Count
#---------------------------------------------------------------------------
def addField_AddressUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
#---------------------------------------------------------------------------
def addField_AddressUnlockCondition_AliasOnly(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=20, weight=self.weight_data) # Address (Alias Address)
#---------------------------------------------------------------------------
def addField_StateControllerAddressUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
#---------------------------------------------------------------------------
def addField_GovernorAddressUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
#---------------------------------------------------------------------------
def addField_DustDepositReturnUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Return Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Return Address (Alias Address, NFT Address, Ed25519 Address)
self.addField(field_byte_size_min=8, field_byte_size_max=8, weight=self.weight_data) # Return Amount
#---------------------------------------------------------------------------
def addField_TimelockUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Milestone Index
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Unix Time
#---------------------------------------------------------------------------
def addField_ExpirationUnlockCondition(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Unlock Condition Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Milestone Index
self.addField(field_byte_size_min=4, field_byte_size_max=4, weight=self.weight_data) # Unix Time
#---------------------------------------------------------------------------
def addField_FeatureBlocksCount(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Blocks Count
#---------------------------------------------------------------------------
def addField_SenderBlock(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Block Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
#---------------------------------------------------------------------------
def addField_IssuerBlock(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Block Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Address Type
self.addField(field_byte_size_min=20, field_byte_size_max=32, weight=self.weight_data) # Address (Alias Address, NFT Address, Ed25519 Address)
#---------------------------------------------------------------------------
def addField_TagBlock(self):
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Block Type
self.addField(field_byte_size_min=1, field_byte_size_max=1, weight=self.weight_data) # Tag Length
self.addField(field_byte_size_min=1, field_byte_size_max=255, weight=self.weight_data) # Tag
#===============================================================================
def getVBytes_SingleByte():
vbytes = Output_VBytes(name="byte", name_plot="byte", max_byte_size=1, weight_key=1.0, weight_data=1.0)
vbytes.addField(field_byte_size_min=1, field_byte_size_max=1, weight=1.0)
return vbytes
#===============================================================================
def getVBytes_SigLockedSingleOutput(weight_key,
weight_data,
additional_name,
output_size_max):
name = "SigLockedSingleOutput"
name_plot = name
if additional_name != None:
name = "%s (%s)" % (name, additional_name)
name_plot = "%s\n(%s)" % (name_plot, additional_name)
vbytes = Output_VBytes(name = name,
name_plot = name_plot,
max_byte_size = output_size_max,
weight_key = weight_key,
weight_data = weight_data,
metadata_length_max = None)
vbytes.addField_OutputID()
vbytes.addField_OutputType()
vbytes.addField(field_byte_size_min=1, field_byte_size_max=1, weight=weight_data) # Address Type
vbytes.addField(field_byte_size_min=32, field_byte_size_max=32, weight=weight_data) # Address (Ed25519 Address)
vbytes.addField_IotaAmount()
return vbytes
#===============================================================================
def getVBytes_ExtendedOutput(weight_key,
weight_data,
additional_name,
output_size_max,
metadata_length_max,
native_token_count,
dust_deposit_return_unlock_condition,
timelock_unlock_condition,
expiration_unlock_condition,
sender_block,
tag_block,
metadata_block,
metadata_length):
name = "ExtendedOutput"
name_plot = name
if additional_name != None:
name = "%s (%s)" % (name, additional_name)
name_plot = "%s\n(%s)" % (name_plot, additional_name)
vbytes = Output_VBytes(name = name,
name_plot = name_plot,
max_byte_size = output_size_max,
weight_key = weight_key,
weight_data = weight_data,
metadata_length_max = metadata_length_max)
vbytes.addField_OutputID()
vbytes.addField_OutputMetadataOffsets()
vbytes.addField_OutputType()
vbytes.addField_IotaAmount()
vbytes.addField_NativeTokens(native_token_count)
# Unlock conditions
vbytes.addField_UnlockConditionsCount()
vbytes.addField_AddressUnlockCondition()
if dust_deposit_return_unlock_condition:
vbytes.addField_DustDepositReturnUnlockCondition()
if timelock_unlock_condition:
vbytes.addField_TimelockUnlockCondition()
if expiration_unlock_condition:
vbytes.addField_ExpirationUnlockCondition()
# Feature blocks
vbytes.addField_FeatureBlocksCount()
if sender_block:
vbytes.addField_SenderBlock()
if tag_block:
vbytes.addField_TagBlock()
if metadata_block:
vbytes.addField_MetadataBlock(max_data_length=metadata_length)
return vbytes
#===============================================================================
def getVBytes_AliasOutput(weight_key,
weight_data,
additional_name,
output_size_max,
metadata_length_max,
native_token_count,
state_metadata_length,
governor_address_unlock_condition,
sender_block,
issuer_block,
metadata_block,
metadata_length):
name = "AliasOutput"
name_plot = name
if additional_name != None:
name = "%s (%s)" % (name, additional_name)
name_plot = "%s\n(%s)" % (name_plot, additional_name)
vbytes = Output_VBytes(name = name,
name_plot = name_plot,
max_byte_size = output_size_max,
weight_key = weight_key,
weight_data = weight_data,
metadata_length_max = metadata_length_max)
vbytes.addField_OutputID()
vbytes.addField_OutputMetadataOffsets()
vbytes.addField_OutputType()
vbytes.addField_IotaAmount()
vbytes.addField_NativeTokens(native_token_count)
vbytes.addField_AliasID()
vbytes.addField_StateIndex()
vbytes.addField_StateMetadata(max_data_length=state_metadata_length)
vbytes.addField_FoundryCounter()
# Unlock conditions
vbytes.addField_UnlockConditionsCount()
vbytes.addField_StateControllerAddressUnlockCondition()
if governor_address_unlock_condition:
vbytes.addField_GovernorAddressUnlockCondition()
# Feature blocks
vbytes.addField_FeatureBlocksCount()
if sender_block:
vbytes.addField_SenderBlock()
if issuer_block:
vbytes.addField_IssuerBlock()
if metadata_block:
vbytes.addField_MetadataBlock(max_data_length=metadata_length)
return vbytes
#===============================================================================
def getVBytes_FoundryOutput(weight_key,
weight_data,
additional_name,
output_size_max,
metadata_length_max,
native_token_count,
metadata_block,
metadata_length):
name = "FoundryOutput"
name_plot = name
if additional_name != None:
name = "%s (%s)" % (name, additional_name)
name_plot = "%s\n(%s)" % (name_plot, additional_name)
vbytes = Output_VBytes(name = name,
name_plot = name_plot,
max_byte_size = output_size_max,
weight_key = weight_key,
weight_data = weight_data,
metadata_length_max = metadata_length_max)
vbytes.addField_OutputID()
vbytes.addField_OutputMetadataOffsets()
vbytes.addField_OutputType()
vbytes.addField_IotaAmount()
vbytes.addField_NativeTokens(native_token_count)
vbytes.addField_SerialNumber()
vbytes.addField_TokenTag()
vbytes.addField_CirculatingSupply()
vbytes.addField_MaximumSupply()
vbytes.addField_TokenScheme()
# Unlock conditions
vbytes.addField_UnlockConditionsCount()
vbytes.addField_AddressUnlockCondition_AliasOnly()
# Feature blocks
vbytes.addField_FeatureBlocksCount()
if metadata_block:
vbytes.addField_MetadataBlock(max_data_length=metadata_length)
return vbytes
#===============================================================================
def getVBytes_NFTOutput(weight_key,
weight_data,
additional_name,
| |
import numpy as np
import cv2
import glob
import math
from sklearn.linear_model import LinearRegression
from scipy.cluster.hierarchy import ward, fclusterdata
from scipy.spatial.distance import pdist
import copy
import matplotlib
from matplotlib import pyplot as plt
def cal_curvature_2nd(x, coefs):
return np.power(1 + np.power((2 * coefs[0] * x + coefs[1]), 2), 1.5) / (2 * np.absolute(coefs[0]))
def measure_curvature_pixels(ploty, left_fit, right_fit):
'''
Calculates the curvature of polynomial functions in pixels.
'''
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### TO-DO: Implement the calculation of R_curve (radius of curvature) #####
left_curverad = cal_curvature_2nd(y_eval, left_fit) ## Implement the calculation of the left line here
right_curverad = cal_curvature_2nd(y_eval, right_fit) ## Implement the calculation of the right line here
return left_curverad, right_curverad
def fit_polynomial(binary_warped):
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30 / 720 # meters per pixel in y dimension
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
# Find our lane pixels first
x_size, y_size = binary_warped.shape[1], binary_warped.shape[0]
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx = nonzerox[nonzerox < x_size/2]
lefty = nonzeroy[nonzerox < x_size/2]
rightx = nonzerox[nonzerox > x_size/2]
righty = nonzeroy[nonzerox > x_size/2]
# leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
### TO-DO: Fit a second order polynomial to each using `np.polyfit` ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
left_fit_m = np.polyfit(lefty * ym_per_pix, leftx * xm_per_pix, 2)
right_fit_m = np.polyfit(righty * ym_per_pix, rightx * xm_per_pix, 2)
dir = 1
if len(leftx) > len(rightx):
dir = 0
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0])
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
output_img = cv2.cvtColor(binary_warped, cv2.COLOR_GRAY2RGB)
output_img[lefty, leftx] = [255, 0, 0]
output_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
ploty = np.reshape(ploty, (-1, 1))
left_fitx = np.reshape(left_fitx, (-1, 1))
right_fitx = np.reshape(right_fitx, (-1, 1))
cv2.polylines(output_img, np.int32([np.hstack((left_fitx, ploty))]), False, (0, 255, 255), 3)
cv2.polylines(output_img, np.int32([np.hstack((right_fitx, ploty))]), False, (0, 255, 255), 3)
return output_img, np.int32(ploty * ym_per_pix), left_fit_m, right_fit_m, dir
def fit_line(pt1, pt2):
x1, y1, x2, y2 = float(pt1[0]), float(pt1[1]), float(pt2[0]), float(pt2[1])
try:
a = (y1 - y2) / (x1 - x2)
except:
a = (y1 - y2) / ((x1 - x2) + 0.000001)
b = y1 - a * x1
return a, b
def line_dist(p1, p2):
a1, b1 = p1[0], p1[1]
a2, b2 = p2[0], p2[1]
return ((p1[0] + p1[1])/2 - (p2[0] + p2[1])/2)**2 + ((p1[2] + p1[2])/2 - (p2[3] + p2[3])/2)**2
# return np.abs(b1 - b2) * 10000 + 500 * ((a1/a2)**2 + (a2/a1)**2 - 2)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2, cluster=True):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
Y_l, Y_r, x_l, x_r = [], [], [], []
left_coefs = []
right_coefs = []
# Slopes and Region thresholding
for i in range(len(lines)):
for x1, y1, x2, y2 in lines[i]:
a, b = fit_line((y1, x1), (y2, x2))
# n_copies = int(math.pow(math.sqrt(math.pow((y2 - y1), 2) + math.pow((x2 - x1), 2)) / 20., 2))
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), thickness)
if abs(a) > 0.5:
if a < 0 and max(x1, x2) < img.shape[1] / 2:
Y_l.append([[y1], [y2]])
x_l.append([[x1], [x2]])
left_coefs.append([a, b])
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)
elif a > 0 and min(x1, x2) > img.shape[1] / 2:
Y_r.append([[y1], [y2]])
x_r.append([[x1], [x2]])
right_coefs.append([a, b])
# cv2.line(img, (x1, y1), (x2, y2), (0, 255, 0), 5)
Y_l, Y_r, x_l, x_r = np.array(Y_l), np.array(Y_r), np.array(x_l), np.array(x_r)
n_lostLines = 0
y1 = int(img.shape[0] * 13/ 20.0)
y2 = img.shape[0]
# Clutering and Linear regression
if cluster:
# Clustering
try:
left_clusts = fclusterdata(np.squeeze(np.hstack((x_l, Y_l)), axis=2), t=2, criterion='maxclust', metric=line_dist)
right_clusts = fclusterdata(np.squeeze(np.hstack((x_r, Y_r)), axis=2), t=2, criterion='maxclust', metric=line_dist)
left_idxs = (left_clusts == np.argmax(np.bincount(left_clusts)))
right_idxs = (right_clusts == np.argmax(np.bincount(right_clusts)))
new_Y_l, new_Y_r, new_x_l, new_x_r = Y_l[left_idxs], Y_r[right_idxs], x_l[left_idxs], x_r[right_idxs]
new_Y_l, new_Y_r, new_x_l, new_x_r = np.reshape(new_Y_l, (-1, 1)), np.reshape(new_Y_r, (-1, 1)), np.reshape(new_x_l, (-1, 1)), np.reshape(new_x_r, (-1, 1))
except:
print(x_r.shape, x_r)
print(Y_r.shape, Y_r)
print(np.hstack((x_r, Y_r)).shape, np.hstack((x_r, Y_r)))
return False
# Linear regression
try:
new_clf_l = LinearRegression().fit(new_Y_l, new_x_l)
new_x1_l = int(new_clf_l.predict(y1))
new_x2_l = int(new_clf_l.predict(y2))
cv2.line(img, (new_x1_l, y1), (new_x2_l, y2), (255, 255, 255), 30)
except:
n_lostLines += 1
try:
new_clf_r = LinearRegression().fit(new_Y_r, new_x_r)
new_x1_r = int(new_clf_r.predict(y1))
new_x2_r = int(new_clf_r.predict(y2))
cv2.line(img, (new_x1_r, y1), (new_x2_r, y2), (255, 255, 255), 30)
except:
n_lostLines += 1
return [[new_x1_l, y1], [new_x1_r, y1], [new_x2_r, y2], [new_x2_l, y2]]
else:
Y_l, Y_r, x_l, x_r = np.reshape(Y_l, (-1, 1)), np.reshape(Y_r, (-1, 1)), np.reshape(x_l, (-1, 1)), np.reshape(x_r, (
-1, 1))
# Linear regression
try:
clf_l = LinearRegression().fit(Y_l, x_l)
x1_l = int(clf_l.predict(y1))
x2_l = int(clf_l.predict(y2))
cv2.line(img, (x1_l, y1), (x2_l, y2), (255, 255, 255), 30)
except:
n_lostLines += 1
try:
clf_r = LinearRegression().fit(Y_r, x_r)
x1_r = int(clf_r.predict(y1))
x2_r = int(clf_r.predict(y2))
cv2.line(img, (x1_r, y1), (x2_r, y2), (255, 255, 255), 30)
except:
n_lostLines += 1
return [[x1_l, y1], [x1_r, y1], [x2_r, y2], [x2_l, y2]]
def redraw_lines(img, lines, low_slope_threshold=0.5, high_slope_threshold=0.6):
for line in lines:
for x1, y1, x2, y2 in line:
_slope = abs(float((y2 - y1)) / (x2 - x1))
if _slope < low_slope_threshold:
cv2.line(img, (x1, y1), (x2, y2), (0, 0, 0), 1)
# elif _slope > high_slope_threshold:
# cv2.line(img, (x1, y1), (x2, y2), (255, 255, 255), 1)
return img
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
# Get original lines
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
# for t in range(2):
# img = redraw_lines(img, lines)
# lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
# maxLineGap=max_line_gap)
line_img= np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
pts = draw_lines(line_img, lines, cluster=False)
# return line_img, pts
line_img = cv2.cvtColor(line_img, cv2.COLOR_RGB2GRAY)
if pts == False:
return img, pts
# Get lines from masked image
mask_img = np.zeros_like(img)
mask_img[(img == 1) & ((line_img == 1) | (line_img == 255))] = 255
new_lines = cv2.HoughLinesP(mask_img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
new_line_img= np.zeros((mask_img.shape[0], img.shape[1], 3), dtype=np.uint8)
new_pts = draw_lines(new_line_img, | |
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run all tests with:
$ python -m unittest test
Run minidom parser tests with:
$ python -m unittest test.MinidomTests
Run lxml parser tests with:
$ python -m unittest test.LxmlTests
Run single test with:
$ python -m unittest test.LxmlTests.test_method
"""
from __future__ import print_function
import pdb
import logging as mod_logging
import os as mod_os
import unittest as mod_unittest
import time as mod_time
import copy as mod_copy
import datetime as mod_datetime
import random as mod_random
import math as mod_math
import sys as mod_sys
import gpxpy as mod_gpxpy
import gpxpy.gpx as mod_gpx
import gpxpy.parser as mod_parser
import gpxpy.geo as mod_geo
from gpxpy.utils import make_str
PYTHON_VERSION = mod_sys.version.split(' ')[0]
mod_logging.basicConfig(level=mod_logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
def equals(object1, object2, ignore=None):
""" Testing purposes only """
if not object1 and not object2:
return True
if not object1 or not object2:
print('Not obj2')
return False
if not object1.__class__ == object2.__class__:
print('Not obj1')
return False
attributes = []
for attr in dir(object1):
if not ignore or not attr in ignore:
if not hasattr(object1, '__call__') and not attr.startswith('_'):
if not attr in attributes:
attributes.append(attr)
for attr in attributes:
attr1 = getattr(object1, attr)
attr2 = getattr(object2, attr)
if attr1 == attr2:
return True
if not attr1 and not attr2:
return True
if not attr1 or not attr2:
print('Object differs in attribute %s (%s - %s)' % (attr, attr1, attr2))
return False
if not equals(attr1, attr2):
print('Object differs in attribute %s (%s - %s)' % (attr, attr1, attr2))
return None
return True
def cca(number1, number2):
return 1 - number1 / number2 < 0.999
# TODO: Track segment speed in point test
class AbstractTests:
"""
Add tests here.
Tests will be run twice (once with Lxml and once with Minidom Parser).
If you run 'make test' then all tests will be run with python2 and python3
To be even more sure that everything works as expected -- try...
python -m unittest test.MinidomTests
...with python-lxml and without python-lxml installed.
"""
def get_parser_type(self):
raise Exception('Implement this in subclasses')
def parse(self, file, encoding=None):
if PYTHON_VERSION[0] == '3':
f = open('test_files/%s' % file, encoding=encoding)
else:
f = open('test_files/%s' % file)
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
if not gpx:
print('Parser error: %s' % parser.get_error())
return gpx
def reparse(self, gpx):
xml = gpx.to_xml()
parser = mod_parser.GPXParser(xml, parser=self.get_parser_type())
gpx = parser.parse()
if not gpx:
print('Parser error while reparsing: %s' % parser.get_error())
return gpx
def test_parse_with_all_parser_types(self):
self.assertTrue(mod_gpxpy.parse(open('test_files/cerknicko-jezero.gpx')))
self.assertTrue(mod_gpxpy.parse(open('test_files/cerknicko-jezero.gpx'), parser='minidom'))
#self.assertTrue(mod_gpxpy.parse(open('test_files/cerknicko-jezero.gpx'), parser='lxml'))
def test_simple_parse_function(self):
# Must not throw any exception:
mod_gpxpy.parse(open('test_files/korita-zbevnica.gpx'), parser=self.get_parser_type())
def test_simple_parse_function_invalid_xml(self):
try:
mod_gpxpy.parse('<gpx></gpx', parser=self.get_parser_type())
self.fail()
except mod_gpx.GPXException as e:
self.assertTrue(('unclosed token: line 1, column 5' in str(e)) or ('expected \'>\'' in str(e)))
self.assertTrue(isinstance(e, mod_gpx.GPXXMLSyntaxException))
self.assertTrue(e.__cause__)
try:
# more checks if lxml:
import lxml.etree as mod_etree
import xml.parsers.expat as mod_expat
self.assertTrue(isinstance(e.__cause__, mod_etree.XMLSyntaxError)
or isinstance(e.__cause__, mod_expat.ExpatError))
except:
pass
def test_creator_field(self):
gpx = self.parse('cerknicko-jezero.gpx')
self.assertEquals(gpx.creator, "GPSBabel - http://www.gpsbabel.org")
def test_no_creator_field(self):
gpx = self.parse('cerknicko-jezero-no-creator.gpx')
self.assertEquals(gpx.creator, None)
def test_to_xml_creator(self):
gpx = self.parse('cerknicko-jezero.gpx')
xml = gpx.to_xml()
self.assertTrue('creator="GPSBabel - http://www.gpsbabel.org"' in xml)
gpx2 = self.reparse(gpx)
self.assertEquals(gpx2.creator, "GPSBabel - http://www.gpsbabel.org")
def test_waypoints_equality_after_reparse(self):
gpx = self.parse('cerknicko-jezero.gpx')
gpx2 = self.reparse(gpx)
self.assertTrue(equals(gpx.waypoints, gpx2.waypoints))
self.assertTrue(equals(gpx.routes, gpx2.routes))
self.assertTrue(equals(gpx.tracks, gpx2.tracks))
self.assertTrue(equals(gpx, gpx2))
def test_waypoint_time(self):
gpx = self.parse('cerknicko-jezero.gpx')
self.assertTrue(gpx.waypoints[0].time)
self.assertTrue(isinstance(gpx.waypoints[0].time, mod_datetime.datetime))
def test_add_elevation(self):
gpx = mod_gpx.GPX()
gpx.tracks.append(mod_gpx.GPXTrack())
gpx.tracks[0].segments.append(mod_gpx.GPXTrackSegment())
gpx.tracks[0].segments[0].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13, elevation=100))
gpx.tracks[0].segments[0].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13))
gpx.add_elevation(10)
self.assertEqual(gpx.tracks[0].segments[0].points[0].elevation, 110)
self.assertEqual(gpx.tracks[0].segments[0].points[1].elevation, None)
gpx.add_elevation(-20)
self.assertEqual(gpx.tracks[0].segments[0].points[0].elevation, 90)
self.assertEqual(gpx.tracks[0].segments[0].points[1].elevation, None)
def test_get_duration(self):
gpx = mod_gpx.GPX()
gpx.tracks.append(mod_gpx.GPXTrack())
gpx.tracks[0].segments.append(mod_gpx.GPXTrackSegment())
gpx.tracks[0].segments[0].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13,
time=mod_datetime.datetime(2013, 1, 1, 12, 30)))
self.assertEqual(gpx.get_duration(), 0)
gpx.tracks[0].segments.append(mod_gpx.GPXTrackSegment())
gpx.tracks[0].segments[1].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13))
self.assertEqual(gpx.get_duration(), 0)
gpx.tracks[0].segments.append(mod_gpx.GPXTrackSegment())
gpx.tracks[0].segments[2].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13,
time=mod_datetime.datetime(2013, 1, 1, 12, 30)))
gpx.tracks[0].segments[2].points.append(mod_gpx.GPXTrackPoint(latitude=12, longitude=13,
time=mod_datetime.datetime(2013, 1, 1, 12, 31)))
self.assertEqual(gpx.get_duration(), 60)
def test_remove_elevation(self):
gpx = self.parse('cerknicko-jezero.gpx')
for point, track_no, segment_no, point_no in gpx.walk():
self.assertTrue(point.elevation is not None)
gpx.remove_elevation(tracks=True, waypoints=True, routes=True)
for point, track_no, segment_no, point_no in gpx.walk():
self.assertTrue(point.elevation is None)
xml = gpx.to_xml()
self.assertFalse('<ele>' in xml)
def test_remove_time(self):
gpx = self.parse('cerknicko-jezero.gpx')
for point, track_no, segment_no, point_no in gpx.walk():
self.assertTrue(point.time is not None)
gpx.remove_time()
for point, track_no, segment_no, point_no in gpx.walk():
self.assertTrue(point.time is None)
def test_has_times_false(self):
gpx = self.parse('cerknicko-without-times.gpx')
self.assertFalse(gpx.has_times())
def test_has_times(self):
gpx = self.parse('korita-zbevnica.gpx')
self.assertTrue(len(gpx.tracks) == 4)
# Empty -- True
self.assertTrue(gpx.tracks[0].has_times())
# Not times ...
self.assertTrue(not gpx.tracks[1].has_times())
# Times OK
self.assertTrue(gpx.tracks[2].has_times())
self.assertTrue(gpx.tracks[3].has_times())
def test_unicode(self):
gpx = self.parse('unicode.gpx', encoding='utf-8')
name = gpx.waypoints[0].name
self.assertTrue(make_str(name) == 'šđčćž')
def test_nearest_location_1(self):
gpx = self.parse('korita-zbevnica.gpx')
location = mod_geo.Location(45.451058791, 14.027903696)
nearest_location, track_no, track_segment_no, track_point_no = gpx.get_nearest_location(location)
point = gpx.tracks[track_no].segments[track_segment_no].points[track_point_no]
self.assertTrue(point.distance_2d(location) < 0.001)
self.assertTrue(point.distance_2d(nearest_location) < 0.001)
location = mod_geo.Location(1, 1)
nearest_location, track_no, track_segment_no, track_point_no = gpx.get_nearest_location(location)
point = gpx.tracks[track_no].segments[track_segment_no].points[track_point_no]
self.assertTrue(point.distance_2d(nearest_location) < 0.001)
location = mod_geo.Location(50, 50)
nearest_location, track_no, track_segment_no, track_point_no = gpx.get_nearest_location(location)
point = gpx.tracks[track_no].segments[track_segment_no].points[track_point_no]
self.assertTrue(point.distance_2d(nearest_location) < 0.001)
def test_long_timestamps(self):
# Check if timestamps in format: 1901-12-13T20:45:52.2073437Z work
gpx = self.parse('Mojstrovka.gpx')
# %Y-%m-%dT%H:%M:%SZ'
def test_reduce_gpx_file(self):
f = open('test_files/Mojstrovka.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
max_reduced_points_no = 200
started = mod_time.time()
gpx = parser.parse()
points_original = gpx.get_track_points_no()
time_original = mod_time.time() - started
gpx.reduce_points(max_reduced_points_no)
points_reduced = gpx.get_track_points_no()
result = gpx.to_xml()
if mod_sys.version_info[0] != 3:
result = result.encode('utf-8')
started = mod_time.time()
parser = mod_parser.GPXParser(result, parser=self.get_parser_type())
parser.parse()
time_reduced = mod_time.time() - started
print(time_original)
print(points_original)
print(time_reduced)
print(points_reduced)
self.assertTrue(time_reduced < time_original)
self.assertTrue(points_reduced < points_original)
self.assertTrue(points_reduced < max_reduced_points_no)
def test_smooth_without_removing_extreemes_preserves_point_count(self):
gpx = self.parse('first_and_last_elevation.gpx')
l = len(list(gpx.walk()))
gpx.smooth(vertical=True, horizontal=False)
self.assertEquals(l, len(list(gpx.walk())))
def test_smooth_without_removing_extreemes_preserves_point_count_2(self):
gpx = self.parse('first_and_last_elevation.gpx')
l = len(list(gpx.walk()))
gpx.smooth(vertical=False, horizontal=True)
self.assertEquals(l, len(list(gpx.walk())))
def test_smooth_without_removing_extreemes_preserves_point_count_3(self):
gpx = self.parse('first_and_last_elevation.gpx')
l = len(list(gpx.walk()))
gpx.smooth(vertical=True, horizontal=True)
self.assertEquals(l, len(list(gpx.walk())))
def test_clone_and_smooth(self):
f = open('test_files/cerknicko-jezero.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
original_2d = gpx.length_2d()
original_3d = gpx.length_3d()
cloned_gpx = gpx.clone()
self.assertTrue(hash(gpx) == hash(cloned_gpx))
cloned_gpx.reduce_points(2000, min_distance=10)
cloned_gpx.smooth(vertical=True, horizontal=True)
cloned_gpx.smooth(vertical=True, horizontal=False)
print('2d:', gpx.length_2d())
print('2d cloned and smoothed:', cloned_gpx.length_2d())
print('3d:', gpx.length_3d())
print('3d cloned and smoothed:', cloned_gpx.length_3d())
self.assertTrue(gpx.length_3d() == original_3d)
self.assertTrue(gpx.length_2d() == original_2d)
self.assertTrue(gpx.length_3d() > cloned_gpx.length_3d())
self.assertTrue(gpx.length_2d() > cloned_gpx.length_2d())
def test_reduce_by_min_distance(self):
gpx = mod_gpxpy.parse(open('test_files/cerknicko-jezero.gpx'), parser=self.get_parser_type())
min_distance_before_reduce = 1000000
for point, track_no, segment_no, point_no in gpx.walk():
if point_no > 0:
previous_point = gpx.tracks[track_no].segments[segment_no].points[point_no - 1]
print(point.distance_3d(previous_point))
if point.distance_3d(previous_point) < min_distance_before_reduce:
min_distance_before_reduce = point.distance_3d(previous_point)
gpx.reduce_points(min_distance=10)
min_distance_after_reduce = 1000000
for point, track_no, segment_no, point_no in gpx.walk():
if point_no > 0:
previous_point = gpx.tracks[track_no].segments[segment_no].points[point_no - 1]
d = point.distance_3d(previous_point)
if point.distance_3d(previous_point) < min_distance_after_reduce:
min_distance_after_reduce = point.distance_3d(previous_point)
self.assertTrue(min_distance_before_reduce < min_distance_after_reduce)
self.assertTrue(min_distance_before_reduce < 10)
self.assertTrue(10 < min_distance_after_reduce)
def test_moving_stopped_times(self):
f = open('test_files/cerknicko-jezero.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
print(gpx.get_track_points_no())
#gpx.reduce_points(1000, min_distance=5)
print(gpx.get_track_points_no())
length = gpx.length_3d()
print('Distance: %s' % length)
gpx.reduce_points(2000, min_distance=10)
gpx.smooth(vertical=True, horizontal=True)
gpx.smooth(vertical=True, horizontal=False)
moving_time, stopped_time, moving_distance, stopped_distance, max_speed = gpx.get_moving_data(stopped_speed_threshold=0.1)
print('-----')
print('Length: %s' % length)
print('Moving time: %s (%smin)' % (moving_time, moving_time / 60.))
print('Stopped time: %s (%smin)' % (stopped_time, stopped_time / 60.))
print('Moving distance: %s' % moving_distance)
print('Stopped distance: %s' % stopped_distance)
print('Max speed: %sm/s' % max_speed)
print('-----')
# TODO: More tests and checks
self.assertTrue(moving_distance < length)
print('Dakle:', moving_distance, length)
self.assertTrue(moving_distance > 0.75 * length)
self.assertTrue(stopped_distance < 0.1 * length)
def test_split_on_impossible_index(self):
f = open('test_files/cerknicko-jezero.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
track = gpx.tracks[0]
before = len(track.segments)
track.split(1000, 10)
after = len(track.segments)
self.assertTrue(before == after)
def test_split(self):
f = open('test_files/cerknicko-jezero.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
track = gpx.tracks[1]
track_points_no = track.get_points_no()
before = len(track.segments)
track.split(0, 10)
after = len(track.segments)
self.assertTrue(before + 1 == after)
print('Points in first (splitted) part:', len(track.segments[0].points))
# From 0 to 10th point == 11 points:
self.assertTrue(len(track.segments[0].points) == 11)
self.assertTrue(len(track.segments[0].points) + len(track.segments[1].points) == track_points_no)
# Now split the second track
track.split(1, 20)
self.assertTrue(len(track.segments[1].points) == 21)
self.assertTrue(len(track.segments[0].points) + len(track.segments[1].points) + len(track.segments[2].points) == track_points_no)
def test_split_and_join(self):
f = open('test_files/cerknicko-jezero.gpx')
parser = mod_parser.GPXParser(f, parser=self.get_parser_type())
gpx = parser.parse()
f.close()
track = gpx.tracks[1]
original_track = track.clone()
track.split(0, 10)
track.split(1, 20)
self.assertTrue(len(track.segments) == 3)
track.join(1)
self.assertTrue(len(track.segments) == 2)
track.join(0)
self.assertTrue(len(track.segments) == 1)
# Check that this splitted and joined track is | |
"""
The base classes for inverse problem solving.
See :mod:`geoist.inversion` for examples, regularization, and more.
This module defines base classes that are used by the rest of the
``inversion`` package:
* :class:`~geoist.inversion.base.MultiObjective`: A "container" class that
emulates a sum of different objective (goal) functions (like
:class:`~geoist.inversion.misfit.Misfit` or some form of
:mod:`~geoist.inversion.regularization`). When two of those classes are
added they generate a ``MultiObjective`` object.
* :class:`~geoist.inversion.base.OperatorMixin`: A mix-in class that defines
the operators ``+`` and ``*`` (by a scalar). Used to give these properties to
``Misfit`` and the regularizing functions. Adding results in a
``MultiObjective``. Multiplying sets the ``regul_param`` of the class (like a
scalar weight factor).
* :class:`~geoist.inversion.base.OptimizerMixin`: A mix-in class that defines
the ``fit`` and ``config`` methods for optimizing a ``Misfit`` or
``MultiObjective`` and fitting the model to the data.
* :class:`~geoist.inversion.base.CachedMethod`: A class that wraps a method
and caches the returned value. When the same argument (an array) is passed
twice in a row, the class returns the cached value instead of recomputing.
* :class:`~geoist.inversion.base.CachedMethodPermanent`: Like
``CachedMethod`` but always returns the cached value, regardless of the
input. Effectively calculates only the first time the method is called.
Useful for caching the Jacobian matrix in a linear problem.
----
"""
from six import with_metaclass
#from future.builtins import super, object, isinstance, zip, map
import hashlib
import copy
from abc import ABCMeta, abstractmethod
import numpy as np
from . import optimization
class OperatorMixin(object):
"""
Implements the operators + and * for the goal functions classes.
This class is not meant to be used on its own. Use it as a parent to give
the child class the + and * operators.
Used in :class:`~geoist.inversion.base.Misfit` and the regularization
classes in :mod:`geoist.inversion.regularization`.
.. note::
Performing ``A + B`` produces a
:class:`~geoist.inversion.base.MultiObjetive` with copies of ``A``
and ``B``.
.. note::
Performing ``scalar*A`` produces a copy of ``A`` with ``scalar`` set as
the ``regul_param`` attribute.
"""
@property
def regul_param(self):
"""
The regularization parameter (scale factor) for the objetive function.
Defaults to 1.
"""
return getattr(self, '_regularizing_parameter', 1)
@regul_param.setter
def regul_param(self, value):
"""
Set the value of the regularizing parameter.
"""
self._regularizing_parameter = value
for name in ['hessian', 'gradient', 'value']:
if hasattr(self, name):
method = getattr(self, name)
iscached = (isinstance(method, CachedMethodPermanent) or
isinstance(method, CachedMethod))
if iscached:
method.hard_reset()
def copy(self, deep=False):
"""
Make a copy of me.
"""
if deep:
obj = copy.deepcopy(self)
else:
obj = copy.copy(self)
return obj
def __add__(self, other):
"""
Add two objective functions to make a MultiObjective.
"""
assert self.nparams == other.nparams, \
"Can't add goals with different number of parameters:" \
+ ' {}, {}'.format(self.nparams, other.nparams)
# Make a shallow copy of self to return. If returned self, doing
# 'a = b + c' a and b would reference the same object.
res = MultiObjective(self.copy(), other.copy())
return res
def __mul__(self, other):
"""
Multiply the objective function by a scallar to set the `regul_param`
attribute.
"""
if not isinstance(other, int) and not isinstance(other, float):
raise TypeError('Can only multiply a Objective by a float or int')
# Make a shallow copy of self to return. If returned self, doing
# 'a = 10*b' a and b would reference the same object.
obj = self.copy()
obj.regul_param = obj.regul_param*other
return obj
def __rmul__(self, other):
return self.__mul__(other)
class OptimizerMixin(with_metaclass(ABCMeta)):
"""
Defines ``fit`` and ``config`` methods plus all the optimization methods.
This class is not meant to be used on its own. Use it as a parent to give
the child class the methods it implements.
Used in :class:`~geoist.inversion.base.Misfit` and
:class:`geoist.inversion.base.MultiObjetive`.
The :meth:`~geoist.inversion.base.OptimizerMixin.config` method is used
to configure the optimization method that will be used.
The :meth:`~geoist.inversion.base.OptimizerMixin.fit` method runs the
optimization method configured and stores the computed parameter vector in
the ``p_`` attribute.
Some stats about the optimization process are stored in the ``stats_``
attribute as a dictionary.
The minimum requirement for a class to inherit from ``OptimizerMixin`` is
that it must define at least a
:meth:`~geoist.inversion.base.OptimizerMixin.value` method.
"""
def config(self, method, **kwargs):
"""
Configure the optimization method and its parameters.
This sets the method used by
:meth:`~geoist.inversion.base.Objective.fit` and the keyword
arguments that are passed to it.
Parameters:
* method : string
The optimization method. One of: ``'linear'``, ``'newton'``,
``'levmarq'``, ``'steepest'``, ``'acor'``
Other keyword arguments that can be passed are the ones allowed by each
method.
Some methods have required arguments:
* *newton*, *levmarq* and *steepest* require the ``initial`` argument
(an initial estimate for the gradient descent)
* *acor* requires the ``bounds`` argument (min/max values for the
search space)
See the corresponding docstrings for more information:
* :meth:`~geoist.inversion.optimization.linear`
* :meth:`~geoist.inversion.optimization.newton`
* :meth:`~geoist.inversion.optimization.levmarq`
* :meth:`~geoist.inversion.optimization.steepest`
* :meth:`~geoist.inversion.optimization.acor`
"""
kwargs = copy.deepcopy(kwargs)
assert method in ['linear', 'newton', 'levmarq', 'steepest', 'acor'], \
"Invalid optimization method '{}'".format(method)
if method in ['newton', 'levmarq', 'steepest']:
assert 'initial' in kwargs, \
"Missing required *initial* argument for '{}'".format(method)
if method == 'acor':
assert 'bounds' in kwargs, \
"Missing required *bounds* argument for '{}'".format(method)
if method == 'acor' and 'nparams' not in kwargs:
kwargs['nparams'] = self.nparams
self.fit_method = method
self.fit_args = kwargs
return self
def fit(self):
"""
Solve for the parameter vector that minimizes this objective function.
Uses the optimization method and parameters defined using the
:meth:`~geoist.inversion.base.OptimizerMixin.config` method.
The estimated parameter vector can be accessed through the
``p_`` attribute. A (possibly) formatted version (converted to a more
manageable type) of the estimate can be accessed through the property
``estimate_``.
"""
not_configured = (getattr(self, 'fit_method', None) is None or
getattr(self, 'fit_args', None) is None)
if not_configured:
if self.islinear:
self.config('linear')
else:
self.config('levmarq', initial=np.ones(self.nparams))
optimizer = getattr(optimization, self.fit_method)
# Make the generators from the optimization function
if self.fit_method == 'linear':
solver = optimizer(self.hessian(None), self.gradient(None),
**self.fit_args)
elif self.fit_method in ['newton', 'levmarq']:
solver = optimizer(self.hessian, self.gradient, self.value,
**self.fit_args)
elif self.fit_method == 'steepest':
solver = optimizer(self.gradient, self.value, **self.fit_args)
elif self.fit_method == 'acor':
solver = optimizer(self.value, **self.fit_args)
# Run the optimizer to the end
for i, p, stats in solver:
continue
self.p_ = p
self.stats_ = stats
return self
def fmt_estimate(self, p):
"""
Called when accessing the property ``estimate_``.
Use this to convert the parameter vector (p) to a more useful form,
like a geometric object, etc.
Parameters:
* p : 1d-array
The parameter vector.
Returns:
* formatted
Pretty much anything you want.
"""
return p
@property
def estimate_(self):
"""
A nicely formatted version of the estimate.
If the class implements a `fmt_estimate` method, this will its results.
This can be used to convert the parameter vector to a more useful form,
like a :mod:`geoist.mesher` object.
"""
assert self.p_ is not None, "No estimate found. Run 'fit' first."
return self.fmt_estimate(self.p_)
class MultiObjective(OptimizerMixin, OperatorMixin):
"""
An objective (goal) function with more than one component.
This class is a linear combination of other goal functions (like
:class:`~geoist.inversion.misfit.Misfit` and regularization classes).
It is automatically created by adding two goal functions that have the
:class:`~geoist.inversion.base.OperatorMixin` as a base class.
Alternatively, you can create a ``MultiObjetive`` by passing the other
goals function instances as arguments to the constructor.
The ``MultiObjetive`` behaves like any other goal function object. It has
``fit`` and ``config`` methods and can be added and multiplied by a scalar
with the same effects.
Indexing a ``MultiObjetive`` will iterate over the component goal
functions.
Examples:
To show how this class is generated and works, let's create a simple class
that subclasses ``OperatorMixin``.
>>> class MyGoal(OperatorMixin):
... def __init__(self, name, nparams, islinear):
... self.name = name
... self.islinear = islinear
... self.nparams = nparams
... def value(self, p):
... return 1
... def gradient(self, p):
... return 2
... def hessian(self, p):
... return 3
>>> a = MyGoal('A', 10, True)
>>> b = MyGoal('B', 10, True)
>>> c = a + b
>>> type(c)
<class 'geoist.inversion.base.MultiObjective'>
>>> c.size
2
>>> c.nparams
10
>>> c.islinear
True
>>> c[0].name
'A'
>>> c[1].name
'B'
Asking for the value, gradient, and Hessian of the ``MultiObjective`` will
give me the sum of both components.
>>> c.value(None)
2
>>> c.gradient(None)
4
>>> c.hessian(None)
6
Multiplying the ``MultiObjective`` by a scalar will set the | |
return FAILURE
if self.kib is None:
return WARNINGS # not sure how 'du' could fail, but whatever
return SUCCESS
def getText(self, cmd, results):
if self.kib is not None:
return ["treesize", "%d KiB" % self.kib]
return ["treesize", "unknown"]
class SetProperty(ShellCommand):
name = "setproperty"
renderables = [ 'property' ]
def __init__(self, property=None, extract_fn=None, strip=True, **kwargs):
self.property = property
self.extract_fn = extract_fn
self.strip = strip
assert (property is not None) ^ (extract_fn is not None), \
"Exactly one of property and extract_fn must be set"
ShellCommand.__init__(self, **kwargs)
self.addFactoryArguments(property=self.property)
self.addFactoryArguments(extract_fn=self.extract_fn)
self.addFactoryArguments(strip=self.strip)
self.property_changes = {}
def commandComplete(self, cmd):
if self.property:
result = cmd.logs['stdio'].getText()
if self.strip: result = result.strip()
propname = self.property
self.setProperty(propname, result, "SetProperty Step")
self.property_changes[propname] = result
else:
log = cmd.logs['stdio']
new_props = self.extract_fn(cmd.rc,
''.join(log.getChunks([STDOUT], onlyText=True)),
''.join(log.getChunks([STDERR], onlyText=True)))
for k,v in new_props.items():
self.setProperty(k, v, "SetProperty Step")
self.property_changes = new_props
def createSummary(self, log):
props_set = [ "%s: %r" % (k,v) for k,v in self.property_changes.items() ]
self.addCompleteLog('property changes', "\n".join(props_set))
def getText(self, cmd, results):
if self.property_changes:
return [ "set props:" ] + self.property_changes.keys()
else:
return [ "no change" ]
class Configure(ShellCommand):
name = "configure"
haltOnFailure = 1
flunkOnFailure = 1
description = ["configuring"]
descriptionDone = ["configure"]
command = ["./configure"]
class StringFileWriter(pb.Referenceable):
"""
FileWriter class that just puts received data into a buffer.
Used to upload a file from slave for inline processing rather than
writing into a file on master.
"""
def __init__(self):
self.buffer = ""
def remote_write(self, data):
self.buffer += data
def remote_close(self):
pass
class SilentRemoteCommand(RemoteCommand):
"""
Remote command subclass used to run an internal file upload command on the
slave. We do not need any progress updates from such command, so override
remoteUpdate() with an empty method.
"""
def remoteUpdate(self, update):
pass
class WarningCountingShellCommand(ShellCommand):
renderables = [ 'suppressionFile' ]
warnCount = 0
warningPattern = '.*warning[: ].*'
# The defaults work for GNU Make.
directoryEnterPattern = "make.*: Entering directory [\"`'](.*)['`\"]"
directoryLeavePattern = "make.*: Leaving directory"
suppressionFile = None
commentEmptyLineRe = re.compile(r"^\s*(\#.*)?$")
suppressionLineRe = re.compile(r"^\s*(.+?)\s*:\s*(.+?)\s*(?:[:]\s*([0-9]+)(?:-([0-9]+))?\s*)?$")
def __init__(self, workdir=None,
warningPattern=None, warningExtractor=None, maxWarnCount=None,
directoryEnterPattern=None, directoryLeavePattern=None,
suppressionFile=None, **kwargs):
self.workdir = workdir
# See if we've been given a regular expression to use to match
# warnings. If not, use a default that assumes any line with "warning"
# present is a warning. This may lead to false positives in some cases.
if warningPattern:
self.warningPattern = warningPattern
if directoryEnterPattern:
self.directoryEnterPattern = directoryEnterPattern
if directoryLeavePattern:
self.directoryLeavePattern = directoryLeavePattern
if suppressionFile:
self.suppressionFile = suppressionFile
if warningExtractor:
self.warningExtractor = warningExtractor
else:
self.warningExtractor = WarningCountingShellCommand.warnExtractWholeLine
self.maxWarnCount = maxWarnCount
# And upcall to let the base class do its work
ShellCommand.__init__(self, workdir=workdir, **kwargs)
self.addFactoryArguments(warningPattern=warningPattern,
directoryEnterPattern=directoryEnterPattern,
directoryLeavePattern=directoryLeavePattern,
warningExtractor=warningExtractor,
maxWarnCount=maxWarnCount,
suppressionFile=suppressionFile)
self.suppressions = []
self.directoryStack = []
def setDefaultWorkdir(self, workdir):
if self.workdir is None:
self.workdir = workdir
ShellCommand.setDefaultWorkdir(self, workdir)
def addSuppression(self, suppressionList):
"""
This method can be used to add patters of warnings that should
not be counted.
It takes a single argument, a list of patterns.
Each pattern is a 4-tuple (FILE-RE, WARN-RE, START, END).
FILE-RE is a regular expression (string or compiled regexp), or None.
If None, the pattern matches all files, else only files matching the
regexp. If directoryEnterPattern is specified in the class constructor,
matching is against the full path name, eg. src/main.c.
WARN-RE is similarly a regular expression matched against the
text of the warning, or None to match all warnings.
START and END form an inclusive line number range to match against. If
START is None, there is no lower bound, similarly if END is none there
is no upper bound."""
for fileRe, warnRe, start, end in suppressionList:
if fileRe != None and isinstance(fileRe, str):
fileRe = re.compile(fileRe)
if warnRe != None and isinstance(warnRe, str):
warnRe = re.compile(warnRe)
self.suppressions.append((fileRe, warnRe, start, end))
def warnExtractWholeLine(self, line, match):
"""
Extract warning text as the whole line.
No file names or line numbers."""
return (None, None, line)
def warnExtractFromRegexpGroups(self, line, match):
"""
Extract file name, line number, and warning text as groups (1,2,3)
of warningPattern match."""
file = match.group(1)
lineNo = match.group(2)
if lineNo != None:
lineNo = int(lineNo)
text = match.group(3)
return (file, lineNo, text)
def maybeAddWarning(self, warnings, line, match):
if self.suppressions:
(file, lineNo, text) = self.warningExtractor(self, line, match)
if file != None and file != "" and self.directoryStack:
currentDirectory = self.directoryStack[-1]
if currentDirectory != None and currentDirectory != "":
file = "%s/%s" % (currentDirectory, file)
# Skip adding the warning if any suppression matches.
for fileRe, warnRe, start, end in self.suppressions:
if ( (file == None or fileRe == None or fileRe.search(file)) and
(warnRe == None or warnRe.search(text)) and
((start == None and end == None) or
(lineNo != None and start <= lineNo and end >= lineNo)) ):
return
warnings.append(line)
self.warnCount += 1
def start(self):
if self.suppressionFile == None:
return ShellCommand.start(self)
version = self.slaveVersion("uploadFile")
if not version:
m = "Slave is too old, does not know about uploadFile"
raise BuildSlaveTooOldError(m)
self.myFileWriter = StringFileWriter()
args = {
'slavesrc': self.suppressionFile,
'workdir': self.workdir,
'writer': self.myFileWriter,
'maxsize': None,
'blocksize': 32*1024,
}
cmd = SilentRemoteCommand('uploadFile', args)
d = self.runCommand(cmd)
d.addCallback(self.uploadDone)
d.addErrback(self.failed)
def uploadDone(self, dummy):
lines = self.myFileWriter.buffer.split("\n")
del(self.myFileWriter)
list = []
for line in lines:
if self.commentEmptyLineRe.match(line):
continue
match = self.suppressionLineRe.match(line)
if (match):
file, test, start, end = match.groups()
if (end != None):
end = int(end)
if (start != None):
start = int(start)
if end == None:
end = start
list.append((file, test, start, end))
self.addSuppression(list)
return ShellCommand.start(self)
def createSummary(self, log):
"""
Match log lines against warningPattern.
Warnings are collected into another log for this step, and the
build-wide 'warnings-count' is updated."""
self.warnCount = 0
# Now compile a regular expression from whichever warning pattern we're
# using
if not self.warningPattern:
return
wre = self.warningPattern
if isinstance(wre, str):
wre = re.compile(wre)
directoryEnterRe = self.directoryEnterPattern
if directoryEnterRe != None and isinstance(directoryEnterRe, str):
directoryEnterRe = re.compile(directoryEnterRe)
directoryLeaveRe = self.directoryLeavePattern
if directoryLeaveRe != None and isinstance(directoryLeaveRe, str):
directoryLeaveRe = re.compile(directoryLeaveRe)
# Check if each line in the output from this command matched our
# warnings regular expressions. If did, bump the warnings count and
# add the line to the collection of lines with warnings
warnings = []
# TODO: use log.readlines(), except we need to decide about stdout vs
# stderr
for line in log.getText().split("\n"):
if directoryEnterRe:
match = directoryEnterRe.search(line)
if match:
self.directoryStack.append(match.group(1))
if (directoryLeaveRe and
self.directoryStack and
directoryLeaveRe.search(line)):
self.directoryStack.pop()
match = wre.match(line)
if match:
self.maybeAddWarning(warnings, line, match)
# If there were any warnings, make the log if lines with warnings
# available
if self.warnCount:
self.addCompleteLog("warnings (%d)" % self.warnCount,
"\n".join(warnings) + "\n")
warnings_stat = self.step_status.getStatistic('warnings', 0)
self.step_status.setStatistic('warnings', warnings_stat + self.warnCount)
try:
old_count = self.getProperty("warnings-count")
except KeyError:
old_count = 0
self.setProperty("warnings-count", old_count + self.warnCount, "WarningCountingShellCommand")
def evaluateCommand(self, cmd):
if ( cmd.rc != 0 or
( self.maxWarnCount != None and self.warnCount > self.maxWarnCount ) ):
return FAILURE
if self.warnCount:
return WARNINGS
return SUCCESS
class Compile(WarningCountingShellCommand):
name = "compile"
haltOnFailure = 1
flunkOnFailure = 1
description = ["compiling"]
descriptionDone = ["compile"]
command = ["make", "all"]
OFFprogressMetrics = ('output',)
# things to track: number of files compiled, number of directories
# traversed (assuming 'make' is being used)
def createSummary(self, log):
# TODO: grep for the characteristic GCC error lines and
# assemble them into a pair of buffers
WarningCountingShellCommand.createSummary(self, log)
class Test(WarningCountingShellCommand):
name = "test"
warnOnFailure = 1
description = ["testing"]
descriptionDone = ["test"]
command = ["make", "test"]
def setTestResults(self, total=0, failed=0, passed=0, warnings=0):
"""
Called by subclasses to set the relevant statistics; this actually
adds to any statistics already present
"""
total += self.step_status.getStatistic('tests-total', 0)
self.step_status.setStatistic('tests-total', total)
failed += self.step_status.getStatistic('tests-failed', 0)
self.step_status.setStatistic('tests-failed', failed)
warnings += self.step_status.getStatistic('tests-warnings', 0)
self.step_status.setStatistic('tests-warnings', warnings)
passed += self.step_status.getStatistic('tests-passed', 0)
self.step_status.setStatistic('tests-passed', passed)
def describe(self, done=False):
description = WarningCountingShellCommand.describe(self, done)
if done:
if self.step_status.hasStatistic('tests-total'):
total = self.step_status.getStatistic("tests-total", 0)
failed = self.step_status.getStatistic("tests-failed", 0)
passed = self.step_status.getStatistic("tests-passed", 0)
warnings = self.step_status.getStatistic("tests-warnings", 0)
if not total:
total = failed + passed + warnings
if total:
description.append('%d tests' % total)
if passed:
description.append('%d passed' % passed)
if warnings:
description.append('%d warnings' % warnings)
| |
'E': 'Signals', 'F': 'Other', 'O': 'Other'}),
'G': ('Turning versus same direction',{'A': 'Rear of left turning vehicle', 'B': 'Left turn side swipe', 'C': 'Stopped or turning from left side', 'D': 'Near centre line', 'E': 'Overtaking vehicle', 'F': 'Two turning', 'O': 'Other'}),
'H': ('Crossing (no turns)',{'A': 'Right angle (70 to 110 degress)', 'O': 'Other'}),
'J': ('Crossing (vehicle turning)',{'A': 'Right turn right side', 'B': 'Opposing right turns', 'C': 'Two turning', 'O': 'Other'}),
'K': ('Merging',{'A': 'Left turn in', 'B': 'Opposing right turns', 'C': 'Two turning', 'O': 'Other'}),
'L': ('Right turn against',{'A': 'Stopped waiting to turn', 'B': 'Making turn', 'O': 'Other'}),
'M': ('Manoeuvring',{'A': 'Parking or leaving', 'B': 'U turn', 'C': 'U turn', 'D': 'Driveway manoeuvre', 'E': 'Entering or leaving from opposite side', 'F': 'Enetering or leaving from same side', 'G': 'Reversing along road', 'O': 'Other'}),
'N': ('Pedestrians crossing road',{'A': 'Left side', 'B': 'Right side', 'C': 'Left turn left side', 'D': 'Right turn right side', 'E': 'Left turn right side', 'F': 'Right turn left side', 'G': 'Manoeuvring vehicle', 'O': 'Other'}),
'P': ('Pedestrians other',{'A': 'Walking with traffic', 'B': 'Walking facing traffic', 'C': 'Walking on footpath', 'D': 'Child playing (including tricycle)', 'E': 'Attending to vehicle', 'F': 'Entering or leaving vehicle', 'O': 'Other'}),
'Q': ('Miscellaneous',{'A': 'Fell while boarding or alighting', 'B': 'Fell from moving vehicle', 'C': 'Train', 'D': 'Parked vehicle ran away', 'E': 'Equestrian', 'F': 'Fell inside vehicle', 'G': 'Trailer or load', 'O': 'Other'})}
try:
return (decoder[self.mvmt[0]][0], decoder[self.mvmt[0]][1][self.mvmt[1]])
except KeyError:
return None
def getKeyVehicle(self, decode=False):
'''Returns the key vehicle code (or the decoded value), which is one part
of self.vehicles'''
if self.vehicles != None:
code = self.vehicles[0]
if not decode:
return code
else:
try:
decoder = {'C': 'car',
'V': 'van/ute',
'X': 'taxi/taxi van',
'B': 'bus',
'L': 'school bus',
'4': 'SUV/4X4',
'T': 'truck',
'M': 'motorcycle',
'P': 'moped',
'S': 'bicycle',
'K': 'skateboard/in-line skater/etc.',
'O': 'other/unknown',
'E': 'pedestrian'}
except KeyError:
return None
return decoder[code]
else:
return None
def getKeyVehicleMovement(self, decode=False):
'''Returns the key vehicle movement (or the decoded value), which is the
second part of self.vehicles'''
if self.vehicles != None:
code = self.vehicles[1:]
if not decode:
return code
else:
try:
decoder = {'N': 'North', 'S': 'South', 'E': 'East', 'W': 'West', '1': 'on the first street', '2': 'on the second street'}
try:
return '%s %s' % (decoder[code[0]], decoder[code[1]])
except IndexError:
return None
except KeyError:
return None
def getSecondaryVehicles(self, decode=False):
'''Returns the secondary vehicle type codes (or the decoded values)
as a list of strings'''
if len(self.vehicles) > 3:
# Other vehicles were involved
# Get a list of the other vehicle codes
vehicles = self.vehicles[3:]
if not decode:
return [v for v in vehicles]
else:
try:
decoder = {'C': 'car',
'V': 'van/ute',
'X': 'taxi/taxi van',
'B': 'bus',
'L': 'school bus',
'4': 'SUV/4X4',
'T': 'truck',
'M': 'motorcycle',
'P': 'moped',
'S': 'bicycle',
'E': 'pedestrian',
'K': 'skateboard/in-line skater/etc.',
'Q': 'equestrian',
'H': 'wheeled pedestrian (wheelchairs, etc.)',
'O': 'other/unknown'}
return [decoder[v] for v in vehicles]
except KeyError:
return None
else:
# There were no other vehicles
return None
def getObjectsStruck(self, decode=False):
'''Returns the objects struck as a list, or their decoded value, also
as a list.
During a crash the vehicle(s) involved may strike objects either in the
roadway or on the roadside. Since the same vehicle might not have
struck all the objects involved, each object is linked to the vehicle
that hit it, but this is not shown on the listing.
The coded crash listings show only the first three objects struck. The
same object type may appear twice but only if it has been struck by
different vehicles.
Note:
If one vehicle strikes the same object type more than once (i.e. 2
parked cars) then only the first is coded.
'''
if self.objects_struck == None:
return None
decoder = {'A': 'driven or accompanied animals, i.e. under control',
'B': 'bridge abutment, handrail or approach, includes tunnels',
'C': 'upright cliff or bank, retaining walls',
'D': 'debris, boulder or object dropped from vehicle',
'E': 'over edge of bank',
'F': 'fence, letterbox, hoarding etc.',
'G': 'guard or guide rail (including median barriers)',
'H': 'house or building',
'I': 'traffic island or median strip',
'J': 'public furniture, eg phone boxes, bus shelters, signal controllers, etc.',
'K': 'kerb, when directly contributing to incident',
'L': 'landslide, washout or floodwater',
'M': 'parked motor vehicle',
'N': 'train',
'P': 'utility pole, includes lighting columns',
'Q': 'broken down vehicle, workmen\'s vehicle, taxis picking up, etc.',
'R': 'roadwork signs or drums, holes and excavations, etc',
'S': 'traffic signs or signal bollards',
'T': 'trees, shrubbery of a substantial nature',
'V': 'ditch',
'W': 'wild animal, strays, or out of control animals',
'X': 'other',
'Y': 'objects thrown at or dropped onto vehicles',
'Z': 'into water, river or sea'}
try:
return [decoder[o] for o in self.objects_struck]
except KeyError:
return None
def get_crashroad(self):
if self.crash_intsn == 'I':
# The crash happened at an intersection
crashroad = self.crash_road + ' at ' + self.side_road
else:
if self.side_road != None:
# Not stated as occuring at a side road, but one still provided
crashroad = self.crash_road + ' near ' + self.side_road
else:
# Only one road provided
crashroad = self.crash_road
return crashroad
def decodeLight(self):
'''Takes self.light (a list of strings) and applies a decoder to it,
returning a list of strings that are human-readable.'''
decoder1 = {'B': 'Bright sun',
'O': 'Overcast',
'T': 'Twilight',
'D': 'Dark',
' ': None}
decoder2 = {'O': 'street lights on',
'F': 'street lights off',
'N': 'No street lights present',
' ': None}
return [decoder1[self.light[0]], decoder2[self.light[1]]]
def decodeWeather(self):
'''Takes self.wthr_a (a list of strings) and applies a decoder to it,
returning a list of strings that are human-readable.'''
decoder1 = {'F': 'Fine',
'M': 'Mist/fog',
'L': 'Light rain',
'H': 'Heavy rain',
'S': 'Snow',
' ': None}
decoder2 = {'F': 'Frost',
'S': 'Strong wind',
' ': None}
try:
return [decoder1[self.wthr_a[0]], decoder2[self.wthr_a[1]]]
except KeyError:
return None
def weatherIcon(self):
'''Takes self.wthr_a (a list of strings) and applies a decoder to it,
return a list of strings that represent paths to PNG icons that represent
the weather.'''
if self.light[0] in ['T', 'D']:
# If not daytime
light = 'Night'
else:
light = 'Day'
decoder1 = {'F': {'Night': ['weather-moon-icon.svg','Clear Night'], 'Day': ['weather-sun-icon.svg','Clear Day']},
'M': {'Night': ['Fog-Night-icon.svg','Night Fog'], 'Day': ['Fog-Day-icon.svg','Day Fog']},
'L': ['weather-little-rain-icon.svg','Light Rain'],
'H': ['weather-downpour-icon.svg','Heavy Rain'],
'S': ['weather-snow-icon.svg','Snow'],
' ': None}
decoder2 = {'F': ['weather-frost-icon.svg','Frost'],
'S': ['weather-wind-icon.svg','Strong Winds'],
' ': None}
if len(self.wthr_a) > 2:
raise Exception # More than 2 weather indicators are not permitted
w1 = self.wthr_a[0]
if w1 != ' ':
# Get the appropriate icon
if w1 in ['F','M']:
# Also need the light parameter
icon = decoder1[w1][light]
else:
icon = decoder1[w1]
icon1 = icon[0]
alt1 = icon[1]
else:
icon1 = None
alt1 = None
w2 = self.wthr_a[1]
if w2 != ' ':
# Get the appropriate secondary icon
icon = decoder2[w2]
icon2 = icon[0]
alt2 = icon[1]
else:
icon2 = None
alt2 = None
ret = ''
h,w = 30,30
hspace = 5
base = './icons'
title1 = alt1
title2 = alt2
if icon1 == None and icon2 == None:
# No weather data at all
return ''
if icon1 != None:
ret += '<img src="%s/%s" title="%s">' % (base,icon1,title1)
if icon2 != None:
ret += '<img src="%s/%s" title="%s">' % (base,icon2,title2)
return ret
def decodeJunction(self):
'''Takes self.junc_type (a single-character string) and applies a decoder to
it, returning a human-readable string.
Note:
When one of the vehicles involved is attempting to enter or leave a
driveway at an intersection location, the driveway code takes
precedence.'''
if self.junc_type == None:
return None
decoder = {'D': 'Driveway',
'R': 'Roundabout',
'X': 'Crossroads',
'T': 'T intersection',
'Y': 'Y intersection',
'M': 'Multi-leg intersection'}
try:
return decoder[self.junc_type]
except KeyError:
return None
def projectedpt(self, target=pyproj.Proj(init='epsg:3728')):
'''Takes the original NZTM point coordinates, and transforms them into
a `target` pyproj.Proj() projected coordinate system, returning the
crash location as | |
+ m.b134 - m.b192 <= 0)
m.c3285 = Constraint(expr= - m.b127 + m.b135 - m.b193 <= 0)
m.c3286 = Constraint(expr= - m.b127 + m.b136 - m.b194 <= 0)
m.c3287 = Constraint(expr= - m.b127 + m.b137 - m.b195 <= 0)
m.c3288 = Constraint(expr= - m.b127 + m.b138 - m.b196 <= 0)
m.c3289 = Constraint(expr= - m.b127 + m.b139 - m.b197 <= 0)
m.c3290 = Constraint(expr= - m.b127 + m.b140 - m.b198 <= 0)
m.c3291 = Constraint(expr= - m.b128 + m.b129 - m.b199 <= 0)
m.c3292 = Constraint(expr= - m.b128 + m.b130 - m.b200 <= 0)
m.c3293 = Constraint(expr= - m.b128 + m.b131 - m.b201 <= 0)
m.c3294 = Constraint(expr= - m.b128 + m.b132 - m.b202 <= 0)
m.c3295 = Constraint(expr= - m.b128 + m.b133 - m.b203 <= 0)
m.c3296 = Constraint(expr= - m.b128 + m.b134 - m.b204 <= 0)
m.c3297 = Constraint(expr= - m.b128 + m.b135 - m.b205 <= 0)
m.c3298 = Constraint(expr= - m.b128 + m.b136 - m.b206 <= 0)
m.c3299 = Constraint(expr= - m.b128 + m.b137 - m.b207 <= 0)
m.c3300 = Constraint(expr= - m.b128 + m.b138 - m.b208 <= 0)
m.c3301 = Constraint(expr= - m.b128 + m.b139 - m.b209 <= 0)
m.c3302 = Constraint(expr= - m.b128 + m.b140 - m.b210 <= 0)
m.c3303 = Constraint(expr= - m.b129 + m.b130 - m.b211 <= 0)
m.c3304 = Constraint(expr= - m.b129 + m.b131 - m.b212 <= 0)
m.c3305 = Constraint(expr= - m.b129 + m.b132 - m.b213 <= 0)
m.c3306 = Constraint(expr= - m.b129 + m.b133 - m.b214 <= 0)
m.c3307 = Constraint(expr= - m.b129 + m.b134 - m.b215 <= 0)
m.c3308 = Constraint(expr= - m.b129 + m.b135 - m.b216 <= 0)
m.c3309 = Constraint(expr= - m.b129 + m.b136 - m.b217 <= 0)
m.c3310 = Constraint(expr= - m.b129 + m.b137 - m.b218 <= 0)
m.c3311 = Constraint(expr= - m.b129 + m.b138 - m.b219 <= 0)
m.c3312 = Constraint(expr= - m.b129 + m.b139 - m.b220 <= 0)
m.c3313 = Constraint(expr= - m.b129 + m.b140 - m.b221 <= 0)
m.c3314 = Constraint(expr= - m.b130 + m.b131 - m.b222 <= 0)
m.c3315 = Constraint(expr= - m.b130 + m.b132 - m.b223 <= 0)
m.c3316 = Constraint(expr= - m.b130 + m.b133 - m.b224 <= 0)
m.c3317 = Constraint(expr= - m.b130 + m.b134 - m.b225 <= 0)
m.c3318 = Constraint(expr= - m.b130 + m.b135 - m.b226 <= 0)
m.c3319 = Constraint(expr= - m.b130 + m.b136 - m.b227 <= 0)
m.c3320 = Constraint(expr= - m.b130 + m.b137 - m.b228 <= 0)
m.c3321 = Constraint(expr= - m.b130 + m.b138 - m.b229 <= 0)
m.c3322 = Constraint(expr= - m.b130 + m.b139 - m.b230 <= 0)
m.c3323 = Constraint(expr= - m.b130 + m.b140 - m.b231 <= 0)
m.c3324 = Constraint(expr= - m.b131 + m.b132 - m.b232 <= 0)
m.c3325 = Constraint(expr= - m.b131 + m.b133 - m.b233 <= 0)
m.c3326 = Constraint(expr= - m.b131 + m.b134 - m.b234 <= 0)
m.c3327 = Constraint(expr= - m.b131 + m.b135 - m.b235 <= 0)
m.c3328 = Constraint(expr= - m.b131 + m.b136 - m.b236 <= 0)
m.c3329 = Constraint(expr= - m.b131 + m.b137 - m.b237 <= 0)
m.c3330 = Constraint(expr= - m.b131 + m.b138 - m.b238 <= 0)
m.c3331 = Constraint(expr= - m.b131 + m.b139 - m.b239 <= 0)
m.c3332 = Constraint(expr= - m.b131 + m.b140 - m.b240 <= 0)
m.c3333 = Constraint(expr= - m.b132 + m.b133 - m.b241 <= 0)
m.c3334 = Constraint(expr= - m.b132 + m.b134 - m.b242 <= 0)
m.c3335 = Constraint(expr= - m.b132 + m.b135 - m.b243 <= 0)
m.c3336 = Constraint(expr= - m.b132 + m.b136 - m.b244 <= 0)
m.c3337 = Constraint(expr= - m.b132 + m.b137 - m.b245 <= 0)
m.c3338 = Constraint(expr= - m.b132 + m.b138 - m.b246 <= 0)
m.c3339 = Constraint(expr= - m.b132 + m.b139 - m.b247 <= 0)
m.c3340 = Constraint(expr= - m.b132 + m.b140 - m.b248 <= 0)
m.c3341 = Constraint(expr= - m.b133 + m.b134 - m.b249 <= 0)
m.c3342 = Constraint(expr= - m.b133 + m.b135 - m.b250 <= 0)
m.c3343 = Constraint(expr= - m.b133 + m.b136 - m.b251 <= 0)
m.c3344 = Constraint(expr= - m.b133 + m.b137 - m.b252 <= 0)
m.c3345 = Constraint(expr= - m.b133 + m.b138 - m.b253 <= 0)
m.c3346 = Constraint(expr= - m.b133 + m.b139 - m.b254 <= 0)
m.c3347 = Constraint(expr= - m.b133 + m.b140 - m.b255 <= 0)
m.c3348 = Constraint(expr= - m.b134 + m.b135 - m.b256 <= 0)
m.c3349 = Constraint(expr= - m.b134 + m.b136 - m.b257 <= 0)
m.c3350 = Constraint(expr= - m.b134 + m.b137 - m.b258 <= 0)
m.c3351 = Constraint(expr= - m.b134 + m.b138 - m.b259 <= 0)
m.c3352 = Constraint(expr= - m.b134 + m.b139 - m.b260 <= 0)
m.c3353 = Constraint(expr= - m.b134 + m.b140 - m.b261 <= 0)
m.c3354 = Constraint(expr= - m.b135 + m.b136 - m.b262 <= 0)
m.c3355 = Constraint(expr= - m.b135 + m.b137 - m.b263 <= 0)
m.c3356 = Constraint(expr= - m.b135 + m.b138 - m.b264 <= 0)
m.c3357 = Constraint(expr= - m.b135 + m.b139 - m.b265 <= 0)
m.c3358 = Constraint(expr= - m.b135 + m.b140 - m.b266 <= 0)
m.c3359 = Constraint(expr= - m.b136 + m.b137 - m.b267 <= 0)
m.c3360 = Constraint(expr= - m.b136 + m.b138 - m.b268 <= 0)
m.c3361 = Constraint(expr= - m.b136 + m.b139 - m.b269 <= 0)
m.c3362 = Constraint(expr= - m.b136 + m.b140 - m.b270 <= 0)
m.c3363 = Constraint(expr= - m.b137 + m.b138 - m.b271 <= 0)
m.c3364 = Constraint(expr= - m.b137 + m.b139 - m.b272 <= 0)
m.c3365 = Constraint(expr= - m.b137 + m.b140 - m.b273 <= 0)
m.c3366 = Constraint(expr= - m.b138 + m.b139 - m.b274 <= 0)
m.c3367 = Constraint(expr= - m.b138 + m.b140 - m.b275 <= 0)
m.c3368 = Constraint(expr= - m.b139 + m.b140 - m.b276 <= 0)
m.c3369 = Constraint(expr= - m.b141 + m.b142 - m.b157 <= 0)
m.c3370 = Constraint(expr= - m.b141 + m.b143 - m.b158 <= 0)
m.c3371 = Constraint(expr= - m.b141 + m.b144 - m.b159 <= 0)
m.c3372 = Constraint(expr= - m.b141 + m.b145 - m.b160 <= 0)
m.c3373 = Constraint(expr= - m.b141 + m.b146 - m.b161 <= 0)
m.c3374 = Constraint(expr= - m.b141 + m.b147 - m.b162 <= 0)
m.c3375 = Constraint(expr= - m.b141 + m.b148 - m.b163 <= 0)
m.c3376 = Constraint(expr= - m.b141 + m.b149 - m.b164 <= 0)
m.c3377 = Constraint(expr= - m.b141 + m.b150 - m.b165 <= 0)
m.c3378 = Constraint(expr= - m.b141 + m.b151 - m.b166 <= 0)
m.c3379 = Constraint(expr= - m.b141 + m.b152 - m.b167 <= 0)
m.c3380 = Constraint(expr= - m.b141 + m.b153 - m.b168 <= 0)
m.c3381 = Constraint(expr= - m.b141 + m.b154 - m.b169 <= 0)
m.c3382 = Constraint(expr= - m.b141 + m.b155 - m.b170 <= 0)
m.c3383 = Constraint(expr= - m.b141 + m.b156 - m.b171 <= 0)
m.c3384 = Constraint(expr= - m.b142 + m.b143 - m.b172 <= 0)
m.c3385 = Constraint(expr= - m.b142 + m.b144 - m.b173 <= 0)
m.c3386 = Constraint(expr= - m.b142 + m.b145 - m.b174 <= 0)
m.c3387 = Constraint(expr= - m.b142 + m.b146 - m.b175 <= 0)
m.c3388 = Constraint(expr= - m.b142 + m.b147 - m.b176 <= 0)
m.c3389 = Constraint(expr= - m.b142 + m.b148 - m.b177 <= 0)
m.c3390 = Constraint(expr= - m.b142 + m.b149 - m.b178 <= 0)
m.c3391 = Constraint(expr= - m.b142 + m.b150 - m.b179 <= 0)
m.c3392 = Constraint(expr= - m.b142 + m.b151 - m.b180 <= 0)
m.c3393 = Constraint(expr= - m.b142 + m.b152 - m.b181 <= 0)
m.c3394 = Constraint(expr= - m.b142 + m.b153 - m.b182 <= 0)
m.c3395 = Constraint(expr= - m.b142 + m.b154 - m.b183 <= 0)
m.c3396 = Constraint(expr= - m.b142 + m.b155 - m.b184 <= 0)
m.c3397 = Constraint(expr= - m.b142 + m.b156 - m.b185 <= 0)
m.c3398 = Constraint(expr= - m.b143 + m.b144 - m.b186 <= 0)
m.c3399 = Constraint(expr= - m.b143 + m.b145 - m.b187 <= 0)
m.c3400 = Constraint(expr= - m.b143 + m.b146 - m.b188 <= 0)
m.c3401 = Constraint(expr= - m.b143 + m.b147 - m.b189 <= 0)
m.c3402 = Constraint(expr= - m.b143 + m.b148 - m.b190 <= 0)
m.c3403 = Constraint(expr= - m.b143 + m.b149 - m.b191 <= 0)
m.c3404 = Constraint(expr= - m.b143 + m.b150 - m.b192 <= 0)
m.c3405 = Constraint(expr= - m.b143 + m.b151 - m.b193 <= 0)
m.c3406 = Constraint(expr= - m.b143 + m.b152 - m.b194 <= 0)
m.c3407 = Constraint(expr= - m.b143 + m.b153 - m.b195 <= 0)
m.c3408 = Constraint(expr= - m.b143 + m.b154 - m.b196 <= 0)
m.c3409 = Constraint(expr= - m.b143 + m.b155 - m.b197 <= 0)
m.c3410 = Constraint(expr= - m.b143 + m.b156 - m.b198 <= 0)
m.c3411 = Constraint(expr= - m.b144 + m.b145 - m.b199 <= 0)
m.c3412 = Constraint(expr= - m.b144 | |
<gh_stars>10-100
"""legacy_starting_point
Revision ID: 8995c5200c07
Revises:
Create Date: 2020-08-30 06:48:48.958520+00:00
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.schema import DropTable
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = None
branch_labels = None
depends_on = None
@compiles(DropTable, "postgresql")
def _compile_drop_table(element, compiler, **kwargs):
"""Adds DROP ... CASCADE to PostgreSQL drop statements
https://stackoverflow.com/a/38679457
"""
return compiler.visit_drop_table(element) + " CASCADE"
def upgrade():
# Creates all tables and indexes to match the legacy database schema (only necessary if
# building the API from scratch; the live site will migrate data and schema over from MySQL)
op.create_table(
"user",
sa.Column(
"id",
sa.BIGINT(),
server_default=sa.text("nextval('user_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("email", sa.VARCHAR(length=254), autoincrement=False, nullable=False),
sa.Column("badge", sa.VARCHAR(length=8), autoincrement=False, nullable=False),
sa.Column(
"username", sa.VARCHAR(length=42), autoincrement=False, nullable=False
),
sa.Column(
"password", sa.VARCHAR(length=255), autoincrement=False, nullable=False
),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column(
"modified",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column(
"reset_uuid", sa.VARCHAR(length=36), autoincrement=False, nullable=True
),
sa.Column(
"newsletter_opt_in", sa.BOOLEAN(), autoincrement=False, nullable=False
),
sa.Column("description", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"is_admin",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"exclude_subscriptions",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"is_banned",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column("moderation_notes", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"email_subscriptions", sa.BOOLEAN(), autoincrement=False, nullable=False
),
sa.Column("colorize_icons", sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="idx_17073_primary"),
postgresql_ignore_search_path=False,
)
op.create_index("idx_17073_ix_user_reset_uuid", "user", ["reset_uuid"], unique=True)
op.create_index(
"idx_17073_ix_user_email_subscriptions",
"user",
["email_subscriptions"],
unique=False,
)
op.create_index("idx_17073_ix_user_email", "user", ["email"], unique=True)
op.create_index("idx_17073_ix_user_badge", "user", ["badge"], unique=True)
op.create_table(
"releases",
sa.Column(
"id",
sa.BIGINT(),
server_default=sa.text("nextval('releases_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("name", sa.VARCHAR(length=60), autoincrement=False, nullable=False),
sa.Column(
"is_phg",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"is_promo",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"designer_name", sa.VARCHAR(length=100), autoincrement=False, nullable=True
),
sa.Column(
"designer_url", sa.VARCHAR(length=255), autoincrement=False, nullable=True
),
sa.Column("is_retiring", sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint("id", name="idx_17031_primary"),
postgresql_ignore_search_path=False,
)
op.create_index("idx_17031_name", "releases", ["name"], unique=True)
op.create_table(
"card",
sa.Column(
"id",
sa.BIGINT(),
server_default=sa.text("nextval('card_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("name", sa.VARCHAR(length=30), autoincrement=False, nullable=False),
sa.Column("stub", sa.VARCHAR(length=30), autoincrement=False, nullable=False),
sa.Column("json", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"release_id",
sa.BIGINT(),
server_default=sa.text("'0'::bigint"),
autoincrement=False,
nullable=False,
),
sa.Column(
"card_type", sa.VARCHAR(length=25), autoincrement=False, nullable=False
),
sa.Column("cost_weight", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"dice_flags",
sa.BIGINT(),
server_default=sa.text("'0'::bigint"),
autoincrement=False,
nullable=False,
),
sa.Column(
"phoenixborn", sa.VARCHAR(length=25), autoincrement=False, nullable=True
),
sa.Column("copies", sa.SMALLINT(), autoincrement=False, nullable=True),
sa.Column(
"is_summon_spell",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column("text", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"alt_dice_flags",
sa.BIGINT(),
server_default=sa.text("'0'::bigint"),
autoincrement=False,
nullable=False,
),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"version",
sa.BIGINT(),
server_default=sa.text("'1'::bigint"),
autoincrement=False,
nullable=False,
),
sa.Column(
"artist_name", sa.VARCHAR(length=100), autoincrement=False, nullable=True
),
sa.Column(
"artist_url", sa.VARCHAR(length=255), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
["release_id"],
["releases.id"],
name="card_release_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("id", name="idx_16961_primary"),
postgresql_ignore_search_path=False,
)
op.create_index("idx_16961_ix_card_text", "card", ["name", "text"], unique=False)
op.create_index("idx_16961_ix_card_stub", "card", ["stub"], unique=True)
op.create_index(
"idx_16961_ix_card_release_id", "card", ["release_id"], unique=False
)
op.create_index(
"idx_16961_ix_card_phoenixborn", "card", ["phoenixborn"], unique=False
)
op.create_index("idx_16961_ix_card_name", "card", ["name"], unique=True)
op.create_index("idx_16961_ix_card_entity_id", "card", ["entity_id"], unique=True)
op.create_index(
"idx_16961_ix_card_dice_flags", "card", ["dice_flags"], unique=False
)
op.create_index(
"idx_16961_ix_card_cost_weight", "card", ["cost_weight"], unique=False
)
op.create_index("idx_16961_ix_card_card_type", "card", ["card_type"], unique=False)
op.create_index(
"idx_16961_ix_card_alt_dice_flags", "card", ["alt_dice_flags"], unique=False
)
op.create_table(
"comment",
sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("source_entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"source_type", sa.VARCHAR(length=16), autoincrement=False, nullable=True
),
sa.Column("source_version", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column("text", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column("order", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column(
"is_deleted",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"is_moderated",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column("original_text", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column("moderation_notes", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column(
"modified",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
name="comment_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("id", name="idx_16978_primary"),
)
op.create_index("idx_16978_user_id", "comment", ["user_id"], unique=False)
op.create_index(
"idx_16978_ix_comment_source_entity_id",
"comment",
["source_entity_id"],
unique=False,
)
op.create_index("idx_16978_ix_comment_order", "comment", ["order"], unique=False)
op.create_index(
"idx_16978_ix_comment_is_deleted", "comment", ["is_deleted"], unique=False
)
op.create_index(
"idx_16978_ix_comment_entity_id", "comment", ["entity_id"], unique=True
)
op.create_index(
"idx_16978_ix_comment_created", "comment", ["created"], unique=False
)
op.create_table(
"section",
sa.Column(
"id",
sa.BIGINT(),
server_default=sa.text("nextval('section_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("title", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column("stub", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column("description", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"is_restricted",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.PrimaryKeyConstraint("id", name="idx_17039_primary"),
postgresql_ignore_search_path=False,
)
op.create_index("idx_17039_ix_section_stub", "section", ["stub"], unique=True)
op.create_index(
"idx_17039_ix_section_is_restricted", "section", ["is_restricted"], unique=False
)
op.create_index(
"idx_17039_ix_section_entity_id", "section", ["entity_id"], unique=True
)
op.create_table(
"card_conjuration",
sa.Column("card_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("conjuration_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["card_id"],
["card.id"],
name="card_conjuration_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["conjuration_id"],
["card.id"],
name="card_conjuration_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("card_id", "conjuration_id", name="idx_16973_primary"),
)
op.create_index(
"idx_16973_conjuration_id", "card_conjuration", ["conjuration_id"], unique=False
)
op.create_table(
"ashes500_revision",
sa.Column(
"id",
sa.BIGINT(),
server_default=sa.text("nextval('ashes500_revision_id_seq'::regclass)"),
autoincrement=True,
nullable=False,
),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("description", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.PrimaryKeyConstraint("id", name="idx_16946_primary"),
postgresql_ignore_search_path=False,
)
op.create_index(
"idx_16946_ix_ashes500_revision_entity_id",
"ashes500_revision",
["entity_id"],
unique=False,
)
op.create_index(
"idx_16946_ix_ashes500_revision_created",
"ashes500_revision",
["created"],
unique=False,
)
op.create_table(
"deck",
sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column("title", sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column("description", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"is_public",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column(
"modified",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("phoenixborn_id", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column("is_snapshot", sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column("source_id", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column(
"is_preconstructed",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"ashes_500_revision_id", sa.BIGINT(), autoincrement=False, nullable=True
),
sa.Column("ashes_500_score", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column(
"preconstructed_release", sa.BIGINT(), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
["ashes_500_revision_id"],
["ashes500_revision.id"],
name="fk_ashes_500_revision_id",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["phoenixborn_id"],
["card.id"],
name="deck_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["source_id"],
["deck.id"],
name="deck_ibfk_3",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
name="deck_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("id", name="idx_16989_primary"),
)
op.create_index("idx_16989_ix_deck_user_id", "deck", ["user_id"], unique=False)
op.create_index("idx_16989_ix_deck_title", "deck", ["title"], unique=False)
op.create_index("idx_16989_ix_deck_source_id", "deck", ["source_id"], unique=False)
op.create_index(
"idx_16989_ix_deck_preconstructed_release",
"deck",
["preconstructed_release"],
unique=False,
)
op.create_index(
"idx_16989_ix_deck_phoenixborn_id", "deck", ["phoenixborn_id"], unique=False
)
op.create_index("idx_16989_ix_deck_modified", "deck", ["modified"], unique=False)
op.create_index(
"idx_16989_ix_deck_is_snapshot", "deck", ["is_snapshot"], unique=False
)
op.create_index("idx_16989_ix_deck_is_public", "deck", ["is_public"], unique=False)
op.create_index(
"idx_16989_ix_deck_is_preconstructed",
"deck",
["is_preconstructed"],
unique=False,
)
op.create_index("idx_16989_ix_deck_entity_id", "deck", ["entity_id"], unique=True)
op.create_index("idx_16989_ix_deck_created", "deck", ["created"], unique=False)
op.create_index(
"idx_16989_fk_ashes_500_revision_id",
"deck",
["ashes_500_revision_id"],
unique=False,
)
op.create_table(
"deck_selected_card",
sa.Column("deck_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("card_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("is_first_five", sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column("is_paid_effect", sa.BOOLEAN(), autoincrement=False, nullable=False),
sa.Column(
"tutor_card_id",
sa.BIGINT(),
server_default=sa.text("'0'::bigint"),
autoincrement=False,
nullable=False,
),
sa.ForeignKeyConstraint(
["card_id"],
["card.id"],
name="deck_selected_card_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["deck_id"],
["deck.id"],
name="deck_selected_card_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint(
"deck_id", "card_id", "tutor_card_id", name="idx_17004_primary"
),
)
op.create_index(
"idx_17004_deck_selected_card_ibfk_1",
"deck_selected_card",
["card_id"],
unique=False,
)
op.create_table(
"deck_card",
sa.Column("deck_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("card_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("count", sa.SMALLINT(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["card_id"],
["card.id"],
name="deck_card_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["deck_id"],
["deck.id"],
name="deck_card_ibfk_2",
onupdate="RESTRICT",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("deck_id", "card_id", name="idx_16998_primary"),
)
op.create_index("idx_16998_card_id", "deck_card", ["card_id"], unique=False)
op.create_table(
"invite",
sa.Column("uuid", sa.VARCHAR(length=36), autoincrement=False, nullable=False),
sa.Column("email", sa.VARCHAR(length=254), autoincrement=False, nullable=False),
sa.Column("requests", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"requested",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.PrimaryKeyConstraint("uuid", name="idx_17008_primary"),
)
op.create_index("idx_17008_ix_invite_email", "invite", ["email"], unique=True)
op.create_table(
"stream",
sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"entity_type", sa.VARCHAR(length=16), autoincrement=False, nullable=True
),
sa.Column("source_entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"posted",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.PrimaryKeyConstraint("id", name="idx_17058_primary"),
)
op.create_index(
"idx_17058_ix_stream_souce_entity_id",
"stream",
["source_entity_id"],
unique=False,
)
op.create_index("idx_17058_ix_stream_posted", "stream", ["posted"], unique=False)
op.create_index(
"idx_17058_ix_stream_entity_type", "stream", ["entity_type"], unique=False
)
op.create_index(
"idx_17058_ix_stream_entity_id", "stream", ["entity_id"], unique=True
)
op.create_table(
"user_release",
sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("release_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["release_id"],
["releases.id"],
name="user_release_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
name="user_release_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("user_id", "release_id", name="idx_17083_primary"),
)
op.create_index(
"idx_17083_release_id", "user_release", ["release_id"], unique=False
)
op.create_index(
"idx_17083_ix_user_release_user_id", "user_release", ["user_id"], unique=False
)
op.create_table(
"subscription",
sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("source_entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column(
"last_seen_entity_id", sa.BIGINT(), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
name="subscription_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint(
"user_id", "source_entity_id", name="idx_17068_primary"
),
)
op.create_index(
"idx_17068_ix_subscription_last_seen_entity_id",
"subscription",
["last_seen_entity_id"],
unique=False,
)
op.create_table(
"deck_die",
sa.Column("deck_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("die_flag", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("count", sa.SMALLINT(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(
["deck_id"],
["deck.id"],
name="deck_die_ibfk_1",
onupdate="RESTRICT",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("deck_id", "die_flag", name="idx_17001_primary"),
)
op.create_table(
"streamable",
sa.Column("entity_id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.PrimaryKeyConstraint("entity_id", name="idx_17064_primary"),
)
op.create_table(
"ashes500_value",
sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column("card_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("revision_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("combo_card_id", sa.BIGINT(), autoincrement=False, nullable=True),
sa.Column("qty_1", sa.SMALLINT(), autoincrement=False, nullable=False),
sa.Column("qty_2", sa.SMALLINT(), autoincrement=False, nullable=True),
sa.Column("qty_3", sa.SMALLINT(), autoincrement=False, nullable=True),
sa.Column(
"combo_card_type", sa.VARCHAR(length=25), autoincrement=False, nullable=True
),
sa.ForeignKeyConstraint(
["card_id"],
["card.id"],
name="ashes500_value_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["combo_card_id"],
["card.id"],
name="ashes500_value_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["revision_id"],
["ashes500_revision.id"],
name="ashes500_value_ibfk_3",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("id", name="idx_16955_primary"),
)
op.create_index(
"idx_16955_ix_ashes500_value_revision_id",
"ashes500_value",
["revision_id"],
unique=False,
)
op.create_index(
"idx_16955_ix_ashes500_value_card_id",
"ashes500_value",
["card_id"],
unique=False,
)
op.create_index(
"idx_16955_combo_card_id", "ashes500_value", ["combo_card_id"], unique=False
)
op.create_table(
"post",
sa.Column("id", sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column("entity_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("user_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("section_id", sa.BIGINT(), autoincrement=False, nullable=False),
sa.Column("title", sa.VARCHAR(length=255), autoincrement=False, nullable=False),
sa.Column("text", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column("pin_teaser", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"is_pinned",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"is_deleted",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"is_moderated",
sa.BOOLEAN(),
server_default=sa.text("false"),
autoincrement=False,
nullable=False,
),
sa.Column(
"original_title", sa.VARCHAR(length=255), autoincrement=False, nullable=True
),
sa.Column("original_text", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column("moderation_notes", sa.TEXT(), autoincrement=False, nullable=True),
sa.Column(
"created",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.Column(
"modified",
postgresql.TIMESTAMP(timezone=True),
autoincrement=False,
nullable=True,
),
sa.ForeignKeyConstraint(
["section_id"],
["section.id"],
name="post_ibfk_1",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
name="post_ibfk_2",
onupdate="RESTRICT",
ondelete="RESTRICT",
),
sa.PrimaryKeyConstraint("id", name="idx_17019_primary"),
)
op.create_index("idx_17019_user_id", "post", ["user_id"], unique=False)
op.create_index(
"idx_17019_ix_post_section_id", "post", ["section_id"], unique=False
)
op.create_index("idx_17019_ix_post_is_pinned", "post", ["is_pinned"], unique=False)
op.create_index(
"idx_17019_ix_post_is_deleted", "post", ["is_deleted"], unique=False
)
op.create_index("idx_17019_ix_post_entity_id", | |
dist <= sight_range:
self.once_in_sight_range_opponent_team_2[i][
t_id] = True
if 1 < action__ < 6:
if move_in_sight_not_counted:
self.move_in_sight_range_team2[i] += 1
move_in_sight_not_counted = False
x_diff = new_pos_team_2[i][0] - t_unit.pos.x
y_diff = new_pos_team_2[i][1] - t_unit.pos.y
if action__ == 2:
# north
if y_diff < 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 3:
# south
if y_diff > 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 4:
# east
if x_diff < 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
if action__ == 5:
# west
if x_diff > 0:
self.move_toward_in_sight_range_team2[
i][t_id] += 1
else:
self.move_away_in_sight_range_team2[i][
t_id] += 1
for i in range(self.n_agents):
self.distance_traveled_team_1[i] += self.distance(
self.previous_team_1_pos[i][0],
self.previous_team_1_pos[i][1],
new_pos_team_1[i][0],
new_pos_team_1[i][1])
self.previous_team_1_pos[i][0] = new_pos_team_1[i][0]
self.previous_team_1_pos[i][1] = new_pos_team_1[i][1]
for i in range(self.n_enemies):
self.distance_traveled_team_2[i] += self.distance(
self.previous_team_2_pos[i][0],
self.previous_team_2_pos[i][1],
new_pos_team_2[i][0],
new_pos_team_2[i][1])
self.previous_team_2_pos[i][0] = new_pos_team_2[i][0]
self.previous_team_2_pos[i][1] = new_pos_team_2[i][1]
self.last_action = np.eye(self.n_actions)[np.array(actions)]
# Collect individual actions
sc_actions_team_1 = []
sc_actions_team_2 = []
if self.debug:
logging.debug("Actions".center(60, "-"))
try:
for a_id, action in enumerate(actions):
agent_action = self.get_agent_action(a_id, action)
if agent_action:
if a_id < self.n_agents:
sc_actions_team_1.append(agent_action)
else:
sc_actions_team_2.append(agent_action)
except AssertionError as err:
self._episode_count += 1
self.action_error += 1
self.reset()
return [0 for _ in actions], True, {"battle_won_team_1": False,
"battle_won_team_2": False,
"env_error": True}
req_actions_p1 = sc_pb.RequestAction(
actions=sc_actions_team_1)
req_actions_p2 = sc_pb.RequestAction(
actions=sc_actions_team_2)
req_actions_all = [req_actions_p1, req_actions_p2]
try:
for idx_, (controller, req_actions) \
in enumerate(zip(self._controller, req_actions_all)):
controller.actions(req_actions)
# Make step in SC2, i.e. apply actions
if self._step_mul is not None:
for _ in range(self._step_mul):
for c in self._controller:
c.step()
# Observe here so that we know if the episode is over.
for idx_, c in enumerate(self._controller):
self._obs[idx_] = c.observe()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
return [0 for _ in actions], True, {"battle_won_team_1": False,
"battle_won_team_2": False,
"env_error": True}
self._total_steps += 1
self._episode_steps += 1
# Update units
game_end_code = self.update_units()
terminated = False
reward = self.reward_battle()
info = {"battle_won_team_1": False,
"battle_won_team_2": False}
if game_end_code is not None:
# Battle is over
terminated = True
self.battles_game += 1
if self.log_more_stats:
center_x = self.map_x / 2
center_y = self.map_y / 2
pos_team_1 = []
pos_team_2 = []
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
pos_team_1.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
pos_team_2.append(((
unit.pos.x - center_x) / self.max_distance_x,
(
unit.pos.y - center_y) / self.max_distance_y))
if game_end_code == 1 and not self.win_counted:
self.win_counted = True
self.battles_won_team_1 += 1
info["battle_won_team_1"] = True
if not self.reward_sparse:
reward[0] += self.reward_win
reward[1] += self.reward_defeat
else:
reward[0] = 1
reward[1] = -1
if self.log_more_stats:
# Records remaining health
for i in range(self.n_agents):
unit = self.get_unit_by_id(i)
info["win_health_team_1_agent_" + str(
i)] = unit.health / unit.health_max
info["win_position_x_team_1_agent_" + str(
i)] = pos_team_1[i][0]
info["win_position_y_team_1_agent_" + str(
i)] = pos_team_1[i][1]
info["win_distance_traveled_team_1_agent_" + str(
i)] = self.distance_traveled_team_1[i]
info["win_attack_actions_team_1_agent_" + str(
i)] = self.attack_actions_team_1[i]
info["win_move_actions_team_1_agent_" + str(
i)] = self.move_actions_team_1[i]
info["win_stop_actions_team_1_agent_" + str(
i)] = self.stop_actions_team_1[i]
info[
"win_once_in_shoot_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][0]
info[
"win_once_in_shoot_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][1]
info[
"win_once_in_shoot_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][2]
info[
"win_once_in_sight_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][0]
info[
"win_once_in_sight_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][1]
info[
"win_once_in_sight_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][2]
info[
"win_move_in_sight_range_team_1_agent_" + str(i)] = \
self.move_in_sight_range_team1[i]
info[
"win_move_toward_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
0]
info[
"win_move_toward_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
1]
info[
"win_move_toward_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
2]
info[
"win_move_away_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][0]
info[
"win_move_away_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][1]
info[
"win_move_away_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][2]
info[
"win_move_in_shoot_range_team_1_agent_" + str(i)] = \
self.move_in_shoot_range_team1[i]
info[
"win_move_toward_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
0]
info[
"win_move_toward_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
1]
info[
"win_move_toward_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
2]
info[
"win_move_away_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][0]
info[
"win_move_away_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][1]
info[
"win_move_away_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][2]
for i in range(self.n_enemies):
info["loss_position_x_team_2_agent_" + str(
i)] = pos_team_2[i][0]
info["loss_position_y_team_2_agent_" + str(
i)] = pos_team_2[i][1]
info["loss_distance_traveled_team_2_agent_" + str(
i)] = self.distance_traveled_team_2[i]
info["loss_attack_actions_team_2_agent_" + str(
i)] = self.attack_actions_team_2[i]
info["loss_move_actions_team_2_agent_" + str(
i)] = self.move_actions_team_2[i]
info["loss_stop_actions_team_2_agent_" + str(
i)] = self.stop_actions_team_2[i]
info[
"loss_once_in_shoot_range_opponent_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[
i]
info[
"loss_once_in_sight_range_opponent_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[
i]
info[
"loss_once_in_shoot_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][0]
info[
"loss_once_in_shoot_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][1]
info[
"loss_once_in_shoot_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][2]
info[
"loss_once_in_sight_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][0]
info[
"loss_once_in_sight_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][1]
info[
"loss_once_in_sight_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][2]
info["loss_move_in_sight_range_team_2_agent_" + str(
i)] = self.move_in_sight_range_team2[i]
info[
"loss_move_toward_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
0]
info[
"loss_move_toward_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
1]
info[
"loss_move_toward_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
2]
info[
"loss_move_away_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][0]
info[
"loss_move_away_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][1]
info[
"loss_move_away_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][2]
info["loss_move_in_shoot_range_team_2_agent_" + str(
i)] = self.move_in_shoot_range_team2[i]
info[
"loss_move_toward_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
0]
info[
"loss_move_toward_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
1]
info[
"loss_move_toward_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
2]
info[
"loss_move_away_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][0]
info[
"loss_move_away_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][1]
info[
"loss_move_away_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][2]
elif game_end_code == -1 and not self.defeat_counted:
self.defeat_counted = True
self.battles_won_team_2 += 1
info["battle_won_team_2"] = True
if not self.reward_sparse:
reward[0] += self.reward_defeat
reward[1] += self.reward_win
else:
reward[0] = -1
reward[1] = 1
if self.log_more_stats:
for i in range(self.n_enemies):
unit = self.get_unit_by_id(self.n_agents + i)
info["win_health_team_2_agent_" + str(
i)] = unit.health / unit.health_max
info["win_position_x_team_2_agent_" + str(
i)] = pos_team_2[i][0]
info["win_position_y_team_2_agent_" + str(
i)] = pos_team_2[i][1]
info["win_distance_traveled_team_2_agent_" + str(
i)] = self.distance_traveled_team_2[i]
info["win_attack_actions_team_2_agent_" + str(
i)] = self.attack_actions_team_2[i]
info["win_move_actions_team_2_agent_" + str(
i)] = self.move_actions_team_2[i]
info["win_stop_actions_team_2_agent_" + str(
i)] = self.stop_actions_team_2[i]
info[
"win_once_in_shoot_range_opponent_team_2_agent_" + str(
i)] = self.once_in_shoot_range_opponent_team_2[
i]
info[
"win_once_in_sight_range_opponent_team_2_agent_" + str(
i)] = self.once_in_sight_range_opponent_team_2[
i]
info[
"win_once_in_shoot_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][0]
info[
"win_once_in_shoot_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][1]
info[
"win_once_in_shoot_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_2[i][2]
info[
"win_once_in_sight_range_opponent_1_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][0]
info[
"win_once_in_sight_range_opponent_2_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][1]
info[
"win_once_in_sight_range_opponent_3_team_2_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_2[i][2]
info[
"win_move_in_sight_range_team_2_agent_" + str(i)] = \
self.move_in_sight_range_team2[i]
info[
"win_move_toward_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
0]
info[
"win_move_toward_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
1]
info[
"win_move_toward_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_sight_range_team2[i][
2]
info[
"win_move_away_in_sight_range_1_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][0]
info[
"win_move_away_in_sight_range_2_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][1]
info[
"win_move_away_in_sight_range_3_team_2_agent_" + str(
i)] = self.move_away_in_sight_range_team2[i][2]
info[
"win_move_in_shoot_range_team_2_agent_" + str(i)] = \
self.move_in_shoot_range_team2[i]
info[
"win_move_toward_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
0]
info[
"win_move_toward_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
1]
info[
"win_move_toward_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_toward_in_shoot_range_team2[i][
2]
info[
"win_move_away_in_shoot_range_1_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][0]
info[
"win_move_away_in_shoot_range_2_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][1]
info[
"win_move_away_in_shoot_range_3_team_2_agent_" + str(
i)] = self.move_away_in_shoot_range_team2[i][2]
for i in range(self.n_agents):
info["loss_position_x_team_1_agent_" + str(
i)] = pos_team_1[i][0]
info["loss_position_y_team_1_agent_" + str(
i)] = pos_team_1[i][1]
info["loss_distance_traveled_team_1_agent_" + str(
i)] = self.distance_traveled_team_1[i]
info["loss_attack_actions_team_1_agent_" + str(
i)] = self.attack_actions_team_1[i]
info["loss_move_actions_team_1_agent_" + str(
i)] = self.move_actions_team_1[i]
info["loss_stop_actions_team_1_agent_" + str(
i)] = self.stop_actions_team_1[i]
info[
"loss_once_in_shoot_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][0]
info[
"loss_once_in_shoot_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][1]
info[
"loss_once_in_shoot_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_shoot_range_opponent_team_1[i][2]
info[
"loss_once_in_sight_range_opponent_1_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][0]
info[
"loss_once_in_sight_range_opponent_2_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][1]
info[
"loss_once_in_sight_range_opponent_3_team_1_agent_" + str(
i)] = \
self.once_in_sight_range_opponent_team_1[i][2]
info["loss_move_in_sight_range_team_1_agent_" + str(
i)] = self.move_in_sight_range_team1[i]
info[
"loss_move_toward_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
0]
info[
"loss_move_toward_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
1]
info[
"loss_move_toward_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_sight_range_team1[i][
2]
info[
"loss_move_away_in_sight_range_1_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][0]
info[
"loss_move_away_in_sight_range_2_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][1]
info[
"loss_move_away_in_sight_range_3_team_1_agent_" + str(
i)] = self.move_away_in_sight_range_team1[i][2]
info["loss_move_in_shoot_range_team_1_agent_" + str(
i)] = self.move_in_shoot_range_team1[i]
info[
"loss_move_toward_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
0]
info[
"loss_move_toward_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
1]
info[
"loss_move_toward_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_toward_in_shoot_range_team1[i][
2]
info[
"loss_move_away_in_shoot_range_1_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][0]
info[
"loss_move_away_in_shoot_range_2_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][1]
info[
"loss_move_away_in_shoot_range_3_team_1_agent_" + str(
i)] = self.move_away_in_shoot_range_team1[i][2]
elif self._episode_steps >= self.episode_limit:
# Episode limit reached
terminated | |
not in all_authors:
if not etal_json:
all_authors[author_group_type] = []
if etal_json:
all_authors[author_group_type] = etal_json
else:
all_authors[author_group_type].append(author_json)
return all_authors
def references_json_authors(ref_authors, ref_content):
"build the authors for references json here for testability"
all_authors = references_authors(ref_authors)
if all_authors != {}:
if ref_content.get("type") in [
"conference-proceeding",
"journal",
"other",
"periodical",
"preprint",
"report",
"web",
]:
for author_type in ["authors", "authorsEtAl"]:
utils.set_if_value(
ref_content, author_type, all_authors.get(author_type)
)
elif ref_content.get("type") in ["book", "book-chapter"]:
for author_type in ["authors", "authorsEtAl", "editors", "editorsEtAl"]:
utils.set_if_value(
ref_content, author_type, all_authors.get(author_type)
)
elif ref_content.get("type") in ["clinical-trial"]:
# Always set as authors, once, then add the authorsType
for author_type in ["authors", "collaborators", "sponsors"]:
if "authorsType" not in ref_content and all_authors.get(author_type):
utils.set_if_value(
ref_content, "authors", all_authors.get(author_type)
)
utils.set_if_value(
ref_content,
"authorsEtAl",
all_authors.get(author_type + "EtAl"),
)
ref_content["authorsType"] = author_type
elif ref_content.get("type") in ["data", "software"]:
for author_type in [
"authors",
"authorsEtAl",
"compilers",
"compilersEtAl",
"curators",
"curatorsEtAl",
]:
utils.set_if_value(
ref_content, author_type, all_authors.get(author_type)
)
elif ref_content.get("type") in ["patent"]:
for author_type in [
"inventors",
"inventorsEtAl",
"assignees",
"assigneesEtAl",
]:
utils.set_if_value(
ref_content, author_type, all_authors.get(author_type)
)
elif ref_content.get("type") in ["thesis"]:
# Convert list to a non-list
if all_authors.get("authors") and len(all_authors.get("authors")) > 0:
ref_content["author"] = all_authors.get("authors")[0]
return ref_content
def references_json(soup, html_flag=True):
# Configure the XML to HTML conversion preference for shorthand use below
def convert(xml_string):
return xml_to_html(html_flag, xml_string)
references_json = []
for ref in refs(soup):
ref_content = OrderedDict()
# type
if ref.get("publication-type") == "book" and (
"chapter-title" in ref or "full_article_title" in ref
):
utils.set_if_value(ref_content, "type", "book-chapter")
elif ref.get("publication-type") == "confproc":
utils.set_if_value(ref_content, "type", "conference-proceeding")
elif ref.get("publication-type") == "clinicaltrial":
utils.set_if_value(ref_content, "type", "clinical-trial")
elif ref.get("publication-type") == "webpage":
utils.set_if_value(ref_content, "type", "web")
else:
utils.set_if_value(ref_content, "type", ref.get("publication-type"))
utils.set_if_value(ref_content, "id", ref.get("id"))
(year_date, discriminator, year_in_press) = references_date(ref.get("year"))
if not discriminator:
utils.set_if_value(ref_content, "date", ref.get("year-iso-8601-date"))
if "date" not in ref_content:
utils.set_if_value(ref_content, "date", year_date)
utils.set_if_value(ref_content, "discriminator", discriminator)
# accessed
if ref.get("publication-type") in ["web", "webpage"] and ref.get(
"iso-8601-date"
):
utils.set_if_value(ref_content, "accessed", ref.get("iso-8601-date"))
# Set the date to the year tag value if accessed is set and there is a year
utils.set_if_value(ref_content, "date", year_date)
utils.set_if_value(ref_content, "discriminator", discriminator)
# authors and etal
if ref.get("authors"):
ref_content = references_json_authors(ref.get("authors"), ref_content)
# titles
if ref.get("publication-type") in [
"journal",
"confproc",
"preprint",
"periodical",
]:
utils.set_if_value(
ref_content, "articleTitle", ref.get("full_article_title")
)
elif ref.get("publication-type") in ["thesis", "clinicaltrial", "other"]:
utils.set_if_value(ref_content, "title", ref.get("full_article_title"))
elif ref.get("publication-type") in ["book"]:
utils.set_if_value(ref_content, "bookTitle", ref.get("source"))
if "bookTitle" not in ref_content:
utils.set_if_value(
ref_content, "bookTitle", ref.get("full_article_title")
)
elif ref.get("publication-type") in ["software", "data"]:
utils.set_if_value(ref_content, "title", ref.get("data-title"))
if "title" not in ref_content:
utils.set_if_value(ref_content, "title", ref.get("source"))
elif ref.get("publication-type") in ["patent", "web", "webpage"]:
utils.set_if_value(ref_content, "title", ref.get("full_article_title"))
if "title" not in ref_content:
utils.set_if_value(ref_content, "title", ref.get("comment"))
if "title" not in ref_content:
utils.set_if_value(ref_content, "title", ref.get("uri"))
# Finally try to extract from source if a title is not found
if (
"title" not in ref_content
and "articleTitle" not in ref_content
and "bookTitle" not in ref_content
):
utils.set_if_value(ref_content, "title", ref.get("source"))
# conference
if ref.get("conf-name"):
utils.set_if_value(
ref_content,
"conference",
references_publisher(ref.get("conf-name"), None),
)
# source
if ref.get("publication-type") == "journal":
utils.set_if_value(ref_content, "journal", ref.get("source"))
elif ref.get("publication-type") == "periodical":
utils.set_if_value(ref_content, "periodical", ref.get("source"))
elif ref.get("publication-type") in ["web", "webpage"]:
utils.set_if_value(ref_content, "website", ref.get("source"))
elif ref.get("publication-type") in ["patent"]:
utils.set_if_value(ref_content, "patentType", ref.get("source"))
elif ref.get("publication-type") not in ["book"]:
utils.set_if_value(ref_content, "source", ref.get("source"))
# patent details
utils.set_if_value(ref_content, "number", ref.get("patent"))
utils.set_if_value(ref_content, "country", ref.get("country"))
# publisher
if ref.get("publisher_name"):
utils.set_if_value(
ref_content,
"publisher",
references_publisher(
ref.get("publisher_name"), ref.get("publisher_loc")
),
)
elif ref.get("publication-type") in ["software"] and ref.get("source"):
utils.set_if_value(
ref_content,
"publisher",
references_publisher(ref.get("source"), ref.get("publisher_loc")),
)
# volume
utils.set_if_value(ref_content, "volume", ref.get("volume"))
# edition
if ref.get("publication-type") in ["software"]:
utils.set_if_value(ref_content, "version", ref.get("version"))
if "version" not in ref_content:
utils.set_if_value(ref_content, "version", ref.get("edition"))
else:
utils.set_if_value(ref_content, "edition", ref.get("edition"))
# chapter-title
utils.set_if_value(ref_content, "chapterTitle", ref.get("chapter-title"))
if ref_content["type"] == "book-chapter" and "chapterTitle" not in ref_content:
utils.set_if_value(
ref_content, "chapterTitle", ref.get("full_article_title")
)
# pages
if ref.get("elocation-id"):
ref_content["pages"] = ref.get("elocation-id")
elif ref.get("fpage") and not re.match(r"^[A-Za-z0-9\.]+$", ref.get("fpage")):
# Use range as string value
ref_content["pages"] = references_pages_range(
ref.get("fpage"), ref.get("lpage")
)
elif ref.get("lpage") and not re.match(r"^[A-Za-z0-9\.]+$", ref.get("lpage")):
# Use range as string value
ref_content["pages"] = references_pages_range(
ref.get("fpage"), ref.get("lpage")
)
elif ref.get("fpage") and not ref.get("lpage"):
ref_content["pages"] = references_pages_range(
ref.get("fpage"), ref.get("lpage")
)
elif ref.get("fpage") and ref.get("lpage"):
ref_content["pages"] = OrderedDict()
ref_content["pages"]["first"] = ref.get("fpage").strip()
if ref.get("lpage"):
ref_content["pages"]["last"] = ref.get("lpage").strip()
ref_content["pages"]["range"] = references_pages_range(
ref.get("fpage"), ref.get("lpage")
)
elif ref.get("comment"):
if "in press" in ref.get("comment").lower().strip():
ref_content["pages"] = "In press"
elif year_in_press:
# in press may have been taken from the year field
ref_content["pages"] = "In press"
# Special, to retain some comment tag values, convert to type other
if ref.get("comment"):
if ref_content.get("pages") and ref_content.get("pages") == "In press":
# Do not convert
pass
else:
ref_content["type"] = "other"
# dataId
if ref.get("publication-type") in ["data"]:
utils.set_if_value(ref_content, "dataId", ref.get("accession"))
# doi
if ref.get("publication-type") not in ["web", "webpage"]:
utils.set_if_value(ref_content, "doi", ref.get("doi"))
# pmid
utils.set_if_value(
ref_content, "pmid", utils.coerce_to_int(ref.get("pmid"), None)
)
# isbn
utils.set_if_value(ref_content, "isbn", ref.get("isbn"))
# uri
utils.set_if_value(ref_content, "uri", ref.get("uri"))
# take the uri_text value if no uri yet
if "uri" not in ref_content:
utils.set_if_value(ref_content, "uri", ref.get("uri_text"))
# next option is to set the uri from the doi value
if "uri" not in ref_content and ref.get("publication-type") in [
"confproc",
"data",
"web",
"webpage",
"preprint",
"report",
]:
if ref.get("doi"):
# Convert doi to uri
ref_content["uri"] = "https://doi.org/" + ref.get("doi")
# Convert to HTML
for index in ["title", "articleTitle", "chapterTitle", "bookTitle", "edition"]:
utils.set_if_value(ref_content, index, convert(ref_content.get(index)))
# Rewrite references data with support to delete a reference too
ref_content_rewritten = elifetools.json_rewrite.rewrite_json(
"references_json", soup, [ref_content]
)
if ref_content_rewritten and len(ref_content_rewritten) > 0:
ref_content = ref_content_rewritten[0]
elif len(ref_content_rewritten) == 0:
ref_content = None
# Now can convert to type unknown if applicable
if ref_content:
ref_content = convert_references_json(ref_content, soup)
references_json.append(ref_content)
return references_json
def convert_references_json(ref_content, soup=None):
"Check for references that will not pass schema validation, fix or convert them to unknown"
# Convert reference to unkonwn if still missing important values
if (
(ref_content.get("type") == "other")
or (ref_content.get("type") == "book-chapter" and "editors" not in ref_content)
or (ref_content.get("type") == "journal" and "articleTitle" not in ref_content)
or (ref_content.get("type") == "book-chapter" and "pages" not in ref_content)
or (ref_content.get("type") == "journal" and "journal" not in ref_content)
or (
ref_content.get("type")
in ["book", "book-chapter", "report", "thesis", "software"]
and "publisher" not in ref_content
)
or (ref_content.get("type") == "book" and "bookTitle" not in ref_content)
or (ref_content.get("type") == "data" and "source" not in ref_content)
or (
ref_content.get("type") == "conference-proceeding"
and "conference" not in ref_content
)
):
ref_content = references_json_to_unknown(ref_content, soup)
return ref_content
def references_json_to_unknown(ref_content, soup=None):
unknown_ref_content = OrderedDict()
unknown_ref_content["type"] = "unknown"
utils.set_if_value(unknown_ref_content, "id", ref_content.get("id"))
utils.set_if_value(unknown_ref_content, "date", ref_content.get("date"))
utils.set_if_value(unknown_ref_content, "authors", ref_content.get("authors"))
if not unknown_ref_content.get("authors") and ref_content.get("author"):
unknown_ref_content["authors"] = []
unknown_ref_content["authors"].append(ref_content.get("author"))
utils.set_if_value(
unknown_ref_content, "authorsEtAl", ref_content.get("authorsEtAl")
)
# compile details first for use later in title as a default
details = references_json_unknown_details(ref_content, soup)
# title
utils.set_if_value(unknown_ref_content, "title", ref_content.get("title"))
if "title" not in unknown_ref_content:
utils.set_if_value(unknown_ref_content, "title", ref_content.get("bookTitle"))
if "title" not in unknown_ref_content:
utils.set_if_value(
unknown_ref_content, "title", ref_content.get("articleTitle")
)
if "title" not in unknown_ref_content:
# Still not title, try to use the details as the title
utils.set_if_value(unknown_ref_content, "title", details)
# add details
utils.set_if_value(unknown_ref_content, "details", details)
utils.set_if_value(unknown_ref_content, "uri", ref_content.get("uri"))
return unknown_ref_content
def references_json_unknown_details(ref_content, soup=None):
"Extract detail value for references of type unknown"
details = ""
# Try adding pages values first
if "pages" in ref_content:
if "range" in ref_content["pages"]:
details += ref_content["pages"]["range"]
else:
details += ref_content["pages"]
if soup:
# Attempt to find the XML element by id, and convert it to details
if "id" in ref_content:
ref_tag = utils.first(soup.select("ref#" + ref_content["id"]))
if ref_tag:
# Now remove tags that would be already part of the unknown reference by now
for remove_tag in [
"person-group",
"year",
"article-title",
"elocation-id",
"fpage",
"lpage",
]:
ref_tag = utils.remove_tag_from_tag(ref_tag, remove_tag)
# Add the remaining tag content comma separated
for tag in utils.first(raw_parser.element_citation(ref_tag)):
if utils.node_text(tag) is not None:
if details != "":
details += ", "
details += utils.node_text(tag)
if details == "":
return None
return details
def ethics_json(soup):
ethics_json = []
ethics_fn_group = utils.first(raw_parser.fn_group(soup, "ethics-information"))
# Part one, find the fn tags in the ethics section
fn_body_blocks = []
if ethics_fn_group:
fn_tags = raw_parser.fn(ethics_fn_group)
if fn_tags:
for fn_tag in fn_tags:
fn_body_blocks = fn_body_blocks + body_block_content_render(fn_tag)
# Part two, if we have fn, then build out the json content
for fn_content in fn_body_blocks:
if fn_content.get("content"):
for content_item in fn_content.get("content"):
if "type" in content_item and content_item["type"] == "paragraph":
ethics_json.append(content_item)
return ethics_json
def | |
"""Main FLOWER simulation logic"""
import collections
import logging
import warnings
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from wsnsims.core import segment
from wsnsims.core.cluster import closest_nodes
from wsnsims.core.comparisons import much_greater_than
from wsnsims.core.environment import Environment
from wsnsims.flower import flower_runner
from wsnsims.flower import grid
from wsnsims.flower.cluster import FlowerCluster
from wsnsims.flower.cluster import FlowerHub
from wsnsims.flower.cluster import FlowerVirtualCluster
from wsnsims.flower.cluster import FlowerVirtualHub
from wsnsims.flower.energy import FLOWEREnergyModel
from wsnsims.tocs.cluster import combine_clusters
logger = logging.getLogger(__name__)
warnings.filterwarnings('error')
class FlowerError(Exception):
pass
class FLOWER(object):
def __init__(self, environment):
"""
:param environment:
:type environment: core.environment.Environment
"""
self.env = environment
locs = np.random.rand(self.env.segment_count, 2) * self.env.grid_height
self.segments = [segment.Segment(loc) for loc in locs]
self.grid = grid.Grid(self.segments, self.env)
self.cells = list(self.grid.cells())
segment_centroid = np.mean(locs, axis=0)
logger.debug("Centroid located at %s", segment_centroid)
self.damaged = self.grid.closest_cell(segment_centroid)
self.energy_model = FLOWEREnergyModel(self, self.env)
self.virtual_clusters = list() # type: List[FlowerVirtualCluster]
self.clusters = list() # type: List[FlowerCluster]
# Create a virtual cell to represent the center of the damaged area
virtual_center_cell = self.damaged
self.virtual_hub = FlowerVirtualHub(self.env)
self.virtual_hub.add(virtual_center_cell)
self.virtual_hub.cluster_id = self.env.mdc_count - 1
self.hub = FlowerHub(self.env)
self.hub.add(virtual_center_cell)
self.hub.cluster_id = self.env.mdc_count - 1
self.em_is_large = False
self.ec_is_large = False
def show_state(self):
fig = plt.figure()
ax = fig.add_subplot(111)
# Show the location of all segments
segment_points = [seg.location.nd for seg in self.segments]
segment_points = np.array(segment_points)
ax.plot(segment_points[:, 0], segment_points[:, 1], 'bo')
# Show the location of all cells
cell_points = [c.location.nd for c in self.cells]
cell_points = np.array(cell_points)
ax.plot(cell_points[:, 0], cell_points[:, 1], 'rx')
# Illustrate the communication distance from each cell
for cell_point in cell_points:
circle = plt.Circle((cell_point[0], cell_point[1]),
radius=self.env.comms_range, alpha=0.1)
ax.add_patch(circle)
# Annotate the cells for easier debugging
for cell in self.cells:
xy = cell.location.nd
xy_text = xy + 1.
ax.annotate(cell, xy=xy, xytext=xy_text)
# Draw lines between each cell the clusters to illustrate the cluster
# formations.
for cluster in self.clusters + [self.hub]:
route = cluster.tour
cps = route.points
ax.plot(cps[:, 0], cps[:, 1], 'go')
ax.plot(cps[route.vertices, 0], cps[route.vertices, 1], 'g--',
lw=2)
for cluster in [self.hub]:
route = cluster.tour
cps = route.points
ax.plot(cps[:, 0], cps[:, 1], 'ro')
ax.plot(cps[route.vertices, 0], cps[route.vertices, 1], 'r--',
lw=2)
# Annotate the clusters for easier debugging
for cluster in self.clusters + [self.hub]:
xy = cluster.location.nd
xy_text = xy + 1.
ax.annotate(cluster, xy=xy, xytext=xy_text)
plt.show()
def find_cells(self):
for cell in self.grid.cells():
# Calculate the cell's proximity as it's cell distance from
# the center of the "damaged area."
cell.proximity = grid.cell_distance(cell, self.damaged)
# Calculate the number of one-hop segments within range of each cell
for cell in self.grid.cells():
segments = set()
for nbr in cell.neighbors:
segments = set.union(segments, nbr.segments)
segments = segments.difference(cell.segments)
cell.single_hop_count = len(segments)
# First, we need to filter the "families" for the set coverage. We
# start by filtering for access, then 1-hop count, then proximity.
families = [c for c in self.grid.cells() if c.segments]
# Group by segments covered, this also has the effect of filtering by
# access.
family_map = collections.defaultdict(list)
for cell in families:
family_map[tuple(cell.segments)].append(cell)
# Filter by 1-hop
for segments, cells in family_map.items():
if len(cells) == 1:
continue
best_1_hop = 0
best_cells = list()
for cell in cells:
if cell.single_hop_count > best_1_hop:
best_1_hop = cell.single_hop_count
best_cells.clear()
best_cells.append(cell)
elif cell.single_hop_count == best_1_hop:
best_cells.append(cell)
family_map[segments] = best_cells
# Filter by proximity
for segments, cells in family_map.items():
if len(cells) == 1:
continue
best_proximity = np.inf
best_cells = list()
for cell in cells:
if cell.proximity < best_proximity:
best_proximity = cell.proximity
best_cells.clear()
best_cells.append(cell)
assert best_cells
family_map[segments] = best_cells
families = list()
for _, cells in family_map.items():
families.extend(cells)
# Calculate the set cover over the segments
cover = list()
uncovered = set(self.segments)
while uncovered:
selected = max(families, key=lambda s: len(
uncovered.intersection(set(s.segments))))
uncovered -= set(selected.segments)
cover.append(selected)
# Initialized!!
cell_cover = list(cover)
logger.debug("Length of cover: %d", len(cell_cover))
assert self.env.mdc_count < len(cell_cover)
# Remove duplication among the cells
cell_cover.sort(key=lambda c: len(c.segments), reverse=True)
covered_segments = list()
for cell in cell_cover:
segments = list(cell.segments)
for seg in segments:
if seg in covered_segments:
# This segment is already served by another cell
cell.segments.remove(seg)
else:
covered_segments.append(seg)
segment_count = 0
for cell in cell_cover:
segment_count += len(cell.segments)
assert segment_count == len(self.segments)
# For future lookups, set a reference from each segment to its cell
for cell in cell_cover:
for seg in cell.segments:
seg.cell = cell
# Sort the cells by ID to ensure consistency across runs.
self.cells = sorted(cell_cover, key=lambda c: c.cell_id)
@staticmethod
def _polar_angle(point, origin):
vector = point - origin
angle = np.arctan2(vector[1], vector[0])
return angle
def polar_sort(self, clusters):
"""
:param clusters:
:type clusters: list(FlowerCluster)
:return:
"""
points = [c.location.nd for c in clusters]
origin = self.damaged.location.nd
polar_angles = [self._polar_angle(p, origin) for p in points]
indexes = np.argsort(polar_angles)
sorted_clusters = np.array(clusters)[indexes]
return list(sorted_clusters)
def create_virtual_clusters(self):
for cell in self.cells:
c = FlowerVirtualCluster(self.env)
c.add(cell)
self.virtual_clusters.append(c)
# Combine the clusters until we have MDC_COUNT - 1 non-central, virtual
# clusters
while len(self.virtual_clusters) >= self.env.mdc_count:
self.virtual_clusters = combine_clusters(self.virtual_clusters,
self.virtual_hub)
# FLOWER has some dependencies on the order of cluster IDs, so we need
# to sort and re-label each virtual cluster.
sorted_clusters = self.polar_sort(self.virtual_clusters)
for i, vc in enumerate(sorted_clusters):
vc.cluster_id = i
def greedy_expansion(self):
# First round (initial cell setup and energy calculation)
for c in self.cells:
c.cluster_id = -1
for vc in self.virtual_clusters:
c = FlowerCluster(self.env)
c.cluster_id = vc.cluster_id
c.anchor = self.damaged
closest_cell, _ = closest_nodes(vc, self.hub)
c.add(closest_cell)
self.clusters.append(c)
assert self.energy_model.total_movement_energy(self.hub) == 0.
# Rounds 2 through N
r = 1
while any(not c.completed for c in self.clusters):
r += 1
# Determine the minimum-cost cluster by first filtering out all
# non-completed clusters. Then find the the cluster with the lowest
# total cost.
candidates = self.clusters + [self.hub]
candidates = [c for c in candidates if not c.completed]
c_least = min(candidates,
key=lambda x: self.total_cluster_energy(x))
# In general, only consider cells that have not already been added
# to a cluster. There is an exception to this when expanding the
# hub cluster.
cells = [c for c in self.cells if c.cluster_id == -1]
# If there are no more cells to assign, then we mark this cluster
# as "completed"
if not cells:
c_least.completed = True
logger.debug("All cells assigned. Marking %s as completed",
c_least)
continue
if c_least == self.hub:
# This logic handles the case where the hub cluster is has the
# fewest energy requirements. Either the cluster will be moved
# (initialization) or it will be grown.
#
# If the hub cluster is still in its original location at the
# center of the damaged area, we need to move it to an actual
# cell. If the hub has already been moved, then we expand it by
# finding the cell nearest to the center of the damaged area,
# and that itself hasn't already been added to the hub cluster.
if c_least.cells == [self.damaged]:
# Find the nearest cell to the center of the damaged area
# and move the hub to it. This is equivalent to finding the
# cell with the lowest proximity.
best_cell = min(cells, key=lambda x: x.proximity)
# As the hub only currently has the virtual center cell in
# it, we can just "move" the hub to the nearest real cell
# by replacing the virtual cell with it.
self.hub.remove(self.damaged)
self.hub.add(best_cell)
# Just for proper bookkeeping, reset the virtual cell's ID
# to NOT_CLUSTERED
self.damaged.cluster_id = -1
self.damaged.virtual_cluster_id = -1
logger.debug("ROUND %d: Moved %s to %s", r, self.hub,
best_cell)
else:
# Find the set of cells that are not already in the hub
# cluster
available_cells = list(set(cells) - set(self.hub.cells))
# Out of those cells, find the one that is closest to the
# damaged area
best_cell, _ = closest_nodes(available_cells,
[self.hub.recent])
# Add that cell to the hub cluster
self.hub.add(best_cell)
logger.debug("ROUND %d: Added %s to %s", r, best_cell,
self.hub)
self.update_anchors()
else:
# In this case, the cluster with the lowest energy requirements
# is one of the non-hub clusters.
best_cell = None
# Find the VC that corresponds to the current cluster
vci = next(vc for vc in self.virtual_clusters if
vc.cluster_id == c_least.cluster_id)
# Get a list | |
0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f,
0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,
0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4, 0x0f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4f, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xe4,
0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, | |
#-----------------------------------------------------------------------------
# Name: Reports
# Purpose:
# Author: <NAME>
# Created: 1/26/2018
# License: MIT License
#-----------------------------------------------------------------------------
"""Reports is a module dedicated to generating reports after data collection or analysis. It contains models for
basic html reports and the checkstandard reporting process.
Examples
--------
#!python
Requirements
------------
+ [sys](https://docs.python.org/2/library/sys.html)
+ [os](https://docs.python.org/2/library/os.html)
+ [re](https://docs.python.org/2/library/re.html)
+ [math](https://docs.python.org/2/library/math.html)
+ [pyMez](https://github.com/aricsanders/pyMez)
Help
---------------
<a href="./index.html">`pyMez.Code.Analysis`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples Home</a> |
<a href="../../../Reference_Index.html">Index</a>
</div>
"""
#-----------------------------------------------------------------------------
# Standard Imports
import os
import sys
import math
import re
#-----------------------------------------------------------------------------
# Third Party Imports
sys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))
try:
from Code.Analysis.SParameter import *
except:
print("Code.Analysis.SParameter did not import correctly")
raise ImportError
try:
from Code.Analysis.Uncertainty import *
except:
print("Code.Analysis.Uncertainty did not import correctly")
raise ImportError
try:
from Code.DataHandlers.NISTModels import *
except:
print("Code.DataHandlers.NISTModels did not import correctly")
raise ImportError
try:
from Code.DataHandlers.XMLModels import *
except:
print("Code.DataHandlers.XMLModels did not import correctly")
raise ImportError
try:
from Code.DataHandlers.HTMLModels import *
except:
print("Code.DataHandlers.HTMLModels did not import correctly")
raise ImportError
try:
from Code.DataHandlers.GraphModels import *
except:
print("Code.DataHandlers.GraphModels did not import correctly")
raise ImportError
#-----------------------------------------------------------------------------
# Module Constants
TWO_PORT_NR_CHKSTD_CSV=r"C:\Share\Converted_Check_Standard\Two_Port_NR_Check_Standard.csv"
COMBINED_ONE_PORT_CHKSTD_CSV=r"C:\Share\Converted_Check_Standard\Combined_One_Port_Check_Standard.csv"
COMBINED_TWO_PORT_CHKSTD_CSV=r"C:\Share\Converted_Check_Standard\Combined_Two_Port_Check_Standard.csv"
COMBINED_POWER_CHKSTD_CSV=r"C:\Share\Converted_Check_Standard\Combined_Power_Check_Standard.csv"
ONE_PORT_CALREP_CSV=r"C:\Share\Converted_DUT\One_Port_DUT.csv"
TWO_PORT_CALREP_CSV=r"C:\Share\Converted_DUT\Two_Port_DUT.csv"
POWER_3TERM_CALREP_CSV=r"C:\Share\Converted_DUT\Power_3Term_DUT.csv"
POWER_4TERM_CALREP_CSV=r"C:\Share\Converted_DUT\Power_4Term_DUT.csv"
DEFAULT_TOGGLE_SCRIPT="""<script type="text/javascript">
function toggleId(id,$link){
$node = document.getElementById(id);
if (!$node)
return;
if (!$node.style.display || $node.style.display == 'none') {
$node.style.display = 'block';
$link.value = '-';
} else {
$node.style.display = 'none';
$link.value = '+';
}
}
</script>"""
DEFAULT_TOGGLE_STYLE="""<style>
.toggleButton {
background-color: white;
border: 2px solid black;
border-radius: 8px;
color:red;
}
.toggleButton:hover {
box-shadow: 0 12px 16px 0 rgba(0,0,0,0.24), 0 17px 50px 0 rgba(0,0,0,0.19);
}
</stlye>"""
ONE_PORT_DTYPE={'Frequency':'float',
'Direction':'str',
'Connect':'str',
'System_Id':'str',
'System_Letter':'str',
'Connector_Type_Calibration':'str',
'Connector_Type_Measurement':'str',
'Measurement_Type':'str',
'Measurement_Date':'str',
'Measurement_Time':'str',
'Program_Used':'str',
'Program_Revision':'str',
'Operator':'str',
'Calibration_Name':'str',
'Calibration_Date':'str',
'Port_Used':'int',
'Number_Connects':'str',
'Number_Repeats':'str',
'Nbs':'str',
'Number_Frequencies':'str',
'Start_Frequency':'float',
'Device_Description':'str',
'Device_Id':'str',
'Measurement_Timestamp':'str',
}
#-----------------------------------------------------------------------------
# Module Functions
#-----------------------------------------------------------------------------
# Module Classes
class HTMLReport(HTMLBase):
def add_toggle_script(self, script=DEFAULT_TOGGLE_SCRIPT):
"""Adds a javascript template toggle script to the body of the HTML"""
self.append_to_body(script)
def add_toggle_style(self, style=DEFAULT_TOGGLE_STYLE):
"""Adds a css to format the javascript template, should be done once"""
self.append_to_head(style)
def add_toggle(self, tag_id=None):
"""Adds a toggle button that toggles the element with id tag_id. This can be used many times """
toggle = '<input type="button" class="toggleButton" value="+" onclick="toggleId(\'{0}\',this)">'.format(tag_id)
self.append_to_body(toggle)
def embedd_image(self, image, image_mode="MatplotlibFigure", **options):
"""Embedds an image in the report. image_mode can be MatplotlibFigure (a reference to the figure class),
Image (the PIL class),
Base64 (a string of the values),
Png, Jpg, Bmp Tiff(the file name),
or a Ndarray of the image values"""
# might change this to self.ImageGraph and use it elsewhere
image_graph = ImageGraph()
image_graph.set_state(image_mode, image)
image_graph.move_to_node("EmbeddedHtml")
self.append_to_body(image_graph.data)
def embedd_image_figure(self, image, image_mode="MatplotlibFigure", figure_id="image", caption="", style="",
**options):
"""Embedds an image in the report. image_mode can be MatplotlibFigure (a reference to the figure class),
Image (the PIL class),
Base64 (a string of the values),
Png, Jpg, Bmp Tiff(the file name),
or a Ndarray of the image values. The image is in a <figure id=figure_id> tag"""
# might change this to self.ImageGraph and use it elsewhere
image_graph = ImageGraph()
image_graph.set_state(image_mode, image)
image_graph.move_to_node("EmbeddedHtml")
self.append_to_body("<figure id='{0}' style='{3}'>{1}<figcaption>{2}</figcaption></figure>".format(figure_id,
image_graph.data,
caption,
style))
def add_download_link(self, content_string, text="Download File", suggested_name="test.txt",
mime_type="text/plain"):
"""Adds a download link to the report"""
self.append_to_body(String_to_DownloadLink(content_string, text=text,
suggested_name=suggested_name,
mime_type=mime_type))
def clear(self):
"""Clears all content in the HTML"""
element_list = self.root.getchildren()
for child in element_list:
self.root.remove(child)
class CheckStandardReport(HTMLReport):
"""Class that creates a report based on a calibrated measurement of a checkstandard. Input can be a file path to
any of the ascii data
types returned by the modified measlp program or a multiconnect mulitdirectional set of
measurements in magnitude / angle format.
The locations of the
CheckStandard data bases in csv format and the directory of the results files are required.
The report is composed of:
1. A plot of the raw file
2. A plot of the file with calrep style errors
3. A plot comparing the file with calrep style errors to the old results database
4. A plot comparing the difference of the file to the old results database
5. A plot comparing the file with calrep style errors to the mean of the new database with outliers excluded
6. A history plot of the check standard for the current measurement and the last n measurements (default is 5)
7. A complete history plot of the check standard
8. A set of download links in text and the formats set in options
If no file is specified and a checkstandard_name is, then only history and means of that checkstandard are shown in the
report"""
def __init__(self, file_path=None, **options):
"""Initializes the CheckStandardReport Class"""
defaults = {"Device_Id": "CTN112",
"results_directory": r'C:\Share\resfiles',
"one_port_csv": COMBINED_ONE_PORT_CHKSTD_CSV,
"two_port_csv": COMBINED_TWO_PORT_CHKSTD_CSV,
"two_port_nr_csv": TWO_PORT_NR_CHKSTD_CSV,
"power_csv": COMBINED_POWER_CHKSTD_CSV,
"outlier_removal": True,
"last_n": 5,
"download_formats": ["Csv"],
"conversion_options":{
"nodes": ['CsvFile', 'ExcelFile'],
"extensions": [ 'csv', 'xlsx'],
"mime_types": ['text/plain',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']}
}
self.options = {}
for key, value in defaults.items():
self.options[key] = value
for key, value in options.items():
self.options[key] = value
self.conversion_defaults = {"base_name": None,
"nodes": ['XmlFile', 'CsvFile', 'ExcelFile', 'OdsFile', 'MatFile', 'HtmlFile',
'JsonFile'],
"extensions": ['xml', 'csv', 'xlsx', 'ods', 'mat', 'html', 'json'],
"mime_types": ['application/xml', 'text/plain',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'application/vnd.oasis.opendocument.spreadsheet',
'application/x-matlab-data', 'text/html', 'application/json']}
# html_options={}
if self.options["conversion_options"] is None:
self.conversion_options=self.conversion_defaults
else:
self.conversion_options=self.options["conversion_options"]
HTMLReport.__init__(self, None, **self.options)
self.plots = []
self.plot_ids = []
self.plot_titles = []
self.plot_captions = []
# set up dtypes for pandas
one_port_dtype = ONE_PORT_DTYPE
# this reads the NISTModels constant
if COMBINE_S11_S22:
one_port_dtype["arg"] = 'float'
one_port_dtype["mag"] = 'float'
else:
one_port_dtype["argS11"] = 'float'
one_port_dtype["magS11"] = 'float'
one_port_dtype["argS22"] = 'float'
one_port_dtype["magS22"] = 'float'
# create a history dictionary.
# print("{0} is {1}".format("self.options",self.options))
self.history_dict = {'1-port': pandas.read_csv(self.options["one_port_csv"], dtype=one_port_dtype),
'2-port': pandas.read_csv(self.options["two_port_csv"]),
'2-portNR': pandas.read_csv(self.options["two_port_nr_csv"]),
'power': pandas.read_csv(self.options["power_csv"])}
if file_path is None:
# plot the results file
self.build_checkstandard_report()
else:
self.build_comparison_report(file_path)
def build_checkstandard_report(self):
"""Builds the report for the options Device_Id"""
self.raw_measurement=None
self.calrep_measurement=None
self.clear()
self.plots = []
self.plot_ids = []
self.plot_captions = []
self.plot_titles = []
measurement_type = self.options["Device_Id"][-3]
if re.match("1", measurement_type):
self.options["Measurement_Type"] = "1-port"
elif re.match("2", measurement_type):
self.options["Measurement_Type"] = "2-port"
elif re.match("p", measurement_type, re.IGNORECASE):
self.options["Measurement_Type"] = "power"
print(("{0} is {1}".format("measurement_type",measurement_type)))
self.results_file = ResultFileModel(os.path.join(self.options["results_directory"], self.options["Device_Id"]))
options = {"Device_Id": self.options["Device_Id"], "System_Id": None, "Measurement_Timestamp": None,
"Connector_Type_Measurement": None,
"Measurement_Date": None, "Measurement_Time": None, "outlier_removal": False}
if re.search('2-port', self.options["Measurement_Type"], re.IGNORECASE):
history_key = '2-port'
options["column_names"] = ['Frequency', 'magS11', 'argS11', 'magS21', 'argS21', 'magS22', 'argS22']
elif re.search('1-port', self.options["Measurement_Type"], re.IGNORECASE):
history_key = '1-port'
if COMBINE_S11_S22:
options["column_names"] = ['Frequency', 'magS11', 'argS11']
else:
options["column_names"] = ['Frequency', 'magS11', 'argS11', 'magS22', 'argS22']
elif re.search('Dry Cal|Thermistor|power', self.options["Measurement_Type"], re.IGNORECASE):
history_key = 'power'
options["column_names"] = ['Frequency', 'magS11', 'argS11', 'Efficiency']
# print history[history_key][:5]
# print history_key
database = self.history_dict[history_key]
self.device_history = database[database["Device_Id"] == self.options["Device_Id"]]
if self.options["outlier_removal"]:
self.outlier_removal()
self.mean_frame = mean_from_history(self.device_history, **options)
self.plots.append(plot_checkstandard_history(self.device_history))
self.plot_ids.append("completeHistory")
self.plot_titles.append("The Complete History of {0}".format(self.options["Device_Id"]))
self.plot_captions.append("""Figure {0}. Every measurement of {1} currently
in the database.""".format(len(self.plots), self.options["Device_Id"]))
self.plots.append(plot_checkstandard_history(self.device_history,
min_num=len(self.get_measurement_dates()) - self.options[
"last_n"] - 1,
max_num=len(self.get_measurement_dates()) - 1,
extra_plots=[self.results_file,
self.mean_frame],
extra_plot_labels=["Historical Database", "Mean of New Database"],
extra_plot_formats=["r--", "k^"]))
self.plot_ids.append("partialHistory")
self.plot_titles.append("""The last {0} measurements of {1}
compared with the historical database and mean. """.format(self.options["last_n"], self.options["Device_Id"]))
self.plot_captions.append("""Figure {0}. Last {1} measurements of {2}
compared with historical database and mean""".format(len(self.plots),
self.options["last_n"], self.options["Device_Id"]))
self.add_toggle_support()
summary_text = """
This device has been measured {0} times from {1} to {2}""".format(len(self.get_measurement_dates()),
min(self.get_measurement_dates()),
max(self.get_measurement_dates()))
self.add_report_heading()
self.append_to_body({"tag": "p", "text": summary_text})
download_options={"mime_types":self.conversion_options["mime_types"],
"download_formats":self.conversion_options["nodes"],
"download_extensions":self.conversion_options["extensions"],
"clear_before": False,
"download_files": [self.results_file,
self.mean_frame, self.device_history],
"download_files_input_format": ["AsciiDataTable", "DataFrame",
"DataFrame"],
"download_files_base_names": ["Historical_Database.txt",
"Mean_Database.txt",
"Device_History.txt"],
"style": "display:none;border:1;"}
self.add_download_table(**download_options)
self.add_all_plots()
def build_comparison_report(self, raw_file_path=None):
"""Builds the report for a raw file comparison, requires a raw_file_path to process"""
self.clear()
self.plots = []
self.plot_ids = []
self.plot_captions = []
self.plot_titles = []
self.raw_measurement_model = sparameter_power_type(raw_file_path)
self.raw_measurement = globals()[self.raw_measurement_model](raw_file_path)
# print("{0} is {1}".format("self.raw_measurement.column_names",self.raw_measurement.column_names))
table = self.raw_measurement
self.options["Device_Id"] = table.metadata["Device_Id"]
self.plots.append(self.raw_measurement.show())
self.plot_ids.append("rawMeasurement")
self.plot_titles.append("Raw Measurement of {0}".format(self.options["Device_Id"]))
self.plot_captions.append("""Figure {0}. Raw measurement of {1}. The measurement of check standard {1}
in a calibrated mode.""".format(len(self.plots), self.options["Device_Id"]))
self.calrep_measurement = calrep(self.raw_measurement)
self.plots.append(plot_calrep(self.calrep_measurement))
self.plot_ids.append("clarepMeasurement")
self.plot_titles.append("Plot of {0} with uncertainty".format(self.options["Device_Id"]))
self.plot_captions.append("""Figure {0}. Measurement of {1}. The measurement of check standard {1}
with nist total uncertainty.""".format(len(self.plots), self.options["Device_Id"]))
self.plots.append(plot_calrep_uncertainty(self.calrep_measurement))
self.plot_ids.append("clarepUncert")
self.plot_titles.append("Plot Uncertainty Components".format(self.options["Device_Id"]))
self.plot_captions.append("""Figure {0}. Uncertainty Components.
The uncertainty in measurement of check standard {1}
.""".format(len(self.plots), self.options["Device_Id"]))
try:
self.results_file = ResultFileModel(os.path.join(self.options["results_directory"],
self.calrep_measurement.metadata["Device_Id"]))
except:
self.results_file = None
options = {"Device_Id": table.metadata["Device_Id"], "System_Id": table.metadata["System_Id"],
"Measurement_Timestamp": None,
"Connector_Type_Measurement": table.metadata["Connector_Type_Measurement"],
"Measurement_Date": None, "Measurement_Time": None, "outlier_removal": False}
if re.search('2-port',
table.metadata["Measurement_Type"],
re.IGNORECASE) and not re.search('2-portNR',
table.metadata["Measurement_Type"],
re.IGNORECASE):
history_key = '2-port'
options["column_names"] = ['Frequency', 'magS11', 'argS11', 'magS21', 'argS21', 'magS22', 'argS22']
elif re.search('2-portNR', table.metadata["Measurement_Type"], re.IGNORECASE):
history_key = '2-portNR'
options["column_names"] = ['Frequency', 'magS11', 'argS11', 'magS12', 'argS12', 'magS21', 'argS21',
'magS22', 'argS22']
elif re.search('1-port', table.metadata["Measurement_Type"], re.IGNORECASE):
history_key = '1-port'
if COMBINE_S11_S22:
options["column_names"] = ['Frequency', 'magS11', | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/fitting/pdf_ops.py
# PDF multiplication via operators: easy wrappers for class RooProdPdf
# @see RooProdPdf
# @see ostap.fitting.basic.PDF
# @see ostap.fitting.fit2d.PDF2
# @see ostap.fitting.fit3d.PDF3
# @author <NAME> <EMAIL>
# @date 2019-01-28
# =============================================================================
"""Add PDF multiplication via operators
"""
# =============================================================================
__version__ = "$Revision:"
__author__ = "<NAME> <EMAIL>"
__date__ = "2019-01-28"
__all__ = ()
# =============================================================================
import ROOT
import ostap.fitting.basic
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.fitting.pdf_ops' )
else : logger = getLogger ( __name__ )
# =============================================================================
# =============================================================================
def _prod_ ( pdf1 , pdf2 ) :
return ROOT.RooProdPdf (
'Product_%s_%s' % ( pdf1.name , pdf2.name ) ,
'Product:(%s)x(%s)'% ( pdf1.name , pdf2.name ) , pdf1.pdf , pdf2.pdf )
# =============================================================================
def _in_ ( a , *others ) :
for b in others :
if a is b : return True
return False
# =============================================================================
## Product of two PDFs :
# @code
# pdf1 = ...
# pdf2 = ...
# pdf = pdf1 * pdf2
# @endcode
#
# Supported argument types and signatures
# - PDF3 ( x , y , z ) * PDF3 ( x , y , z ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF2 ( x , y ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF2 ( x , z ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF2 ( y , z ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF ( x ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF ( y ) -> PDF3 ( x , y , z )
# - PDF3 ( x , y , z ) * PDF ( z ) -> PDF3 ( x , y , z )
# - PDF2 ( x , y ) * PDF3 ( ... ) -> process as PDF3 (...) * PDF2 ( x , y )
# - PDF2 ( x , y ) * PDF2 ( x , y ) -> PDF2 ( x , y )
# - PDF2 ( x , y ) * PDF ( x ) -> PDF2 ( x , y )
# - PDF2 ( x , y ) * PDF ( y ) -> PDF2 ( x , y )
# - PDF ( x ) * PDF3 ( ... ) -> process as PDF3 (...) * PDF ( x )
# - PDF ( x ) * PDF2 ( ... ) -> process as PDF2 (...) * PDF ( x )
# - PDF ( x ) * PDF ( x ) -> PDF ( x )
# - PDF ( x ) * PDF ( y ) -> PDF2 ( x , y )
# Other argument types and signatures are not supported
# @see ostap.fitting.basic.PDF
# @see ostap.fitting.fit2d.PDF2
# @see ostap.fitting.fit3d.PDF3
# @see ostap.fitting.modifiers.Product1D
# @see ostap.fitting.fit2d.Model2D
def pdf_product ( pdf1 , pdf2 ) :
""" Product of two PDFs :
- see ostap.fitting.basic.PDF
- see ostap.fitting.fit2d.PDF2
- ostap.fitting.fit3d.PDF3
- ostap.fitting.modifiers.Product1D
- ostap.fitting.fit2d.Model2D
Supported argument types and signatures:
- PDF3 ( x , y , z ) * PDF3 ( x , y , z ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF2 ( x , y ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF2 ( x , z ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF2 ( y , z ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF ( x ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF ( y ) -> PDF3 ( x , y , z )
- PDF3 ( x , y , z ) * PDF ( z ) -> PDF3 ( x , y , z )
- PDF2 ( x , y ) * PDF3 ( ... ) -> process as PDF3 (...) * PDF2 ( x , y )
- PDF2 ( x , y ) * PDF2 ( x , y ) -> PDF2 ( x , y )
- PDF2 ( x , y ) * PDF ( x ) -> PDF2 ( x , y )
- PDF2 ( x , y ) * PDF ( y ) -> PDF2 ( x , y )
- PDF ( x ) * PDF3 ( ... ) -> process as PDF3 (...) * PDF ( x )
- PDF ( x ) * PDF2 ( ... ) -> process as PDF2 (...) * PDF ( x )
- PDF ( x ) * PDF ( x ) -> PDF ( x )
- PDF ( x ) * PDF ( y ) -> PDF2 ( x , y )
>>> pdf1 = ...
>>> pdf2 = ...
>>> pdf = pdf1 * pdf2
"""
import ostap.fitting.basic as _1D
import ostap.fitting.fit2d as _2D
import ostap.fitting.fit3d as _3D
## 1D * ...
if isinstance ( pdf1 , _3D.PDF3 ) :
x1 = pdf1.xvar
y1 = pdf1.yvar
z1 = pdf1.zvar
v1 = x1 , y1 , z1
if isinstance ( pdf2 , _3D.PDF3 ) :
x2 = pdf2.xvar
y2 = pdf2.yvar
z2 = pdf2.zvar
if _in_ ( x2 , *v1 ) and _in_ ( y2 , *v1 ) and _in_ ( z2 , *v1 ) :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , *v1 )
elif isinstance ( pdf2 , _2D.PDF2 ) :
x2 = pdf2.xvar
y2 = pdf2.yvar
if _in_ ( x2 , *v1 ) and _in_ ( y2 , *v1 ) :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , *v1 )
elif isinstance ( pdf2 , _1D.PDF ) :
x2 = pdf2.xvar
if _in_ ( x2 , *v1 ) :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , *v1 )
return NotImplemented
elif isinstance ( pdf1 , _2D.PDF2 ) :
x1 = pdf1.xvar
y1 = pdf1.yvar
v1 = x1 , y1
if isinstance ( pdf2 , _3D.PDF3 ) : return pdf_product ( pdf2 , pdf1 )
elif isinstance ( pdf2 , _2D.PDF2 ) :
x2 = pdf2.xvar
y2 = pdf2.yvar
if _in_ ( x2 , *v1 ) and _in_ ( y2 , *v1 ) :
return _2D.Generic2D_pdf ( _prod_ ( pdf1 , pdf2 ) , *v1 )
elif x1 is x2 :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , x1 , y1 , y2 )
elif x1 is y2 :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , x1 , y1 , x2 )
elif y1 is x2 :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , x1 , y1 , y2 )
elif y1 is y2 :
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , x1 , y1 , x2 )
elif isinstance ( pdf2 , _1D.PDF ) :
x2 = pdf2.yvar
if _in_ ( x2 , *v1 ) :
return _2D.Generic2D_pdf ( _prod_ ( pdf1 , pdf2 ) , *v1 )
return _3D.Generic3D_pdf ( _prod_ ( pdf1 , pdf2 ) , x1 , y1 | |
if hold_arr is not None:
tomos_exp[tkey][lkey] = hold_arr
for npi in range(tomos_np[tkey][lkey]):
lists_exp[lkey].append(hold_arr)
if ana_global:
print('\t\t\t\t\t-Simulating univariate second order metrics...')
hold_arr_1, hold_arr_2 = ltomo.simulate_uni_2nd_order(p_nsims, ModelCSRV, part_vtp,
ana_rg_v, thick=ana_shell_thick_v, border=ana_border,
conv_iter=ana_conv_iter, max_iter=ana_max_iter, out_sep=2,
npr=ana_npr, npr_model=ana_npr_model, tmp_folder=tmp_sim_folder,
verbose=pt_sim_v)
for npi in range(tomos_np[tkey][lkey]):
if (hold_arr_1 is not None) and (hold_arr_2 is not None):
for arr_1, arr_2 in zip(hold_arr_1, hold_arr_2):
tomos_sim[tkey][lkey].append((arr_1, arr_2))
lists_sim[lkey][tkey].append((arr_1, arr_2))
else:
print('\t\t\t\t\t-Simulating univariate second order metrics...')
hold_arr = ltomo.simulate_uni_2nd_order(p_nsims, ModelCSRV, part_vtp,
ana_rg_v, thick=ana_shell_thick_v, border=ana_border,
conv_iter=ana_conv_iter, max_iter=ana_max_iter, out_sep=0,
npr=ana_npr, npr_model=ana_npr_model, tmp_folder=tmp_sim_folder,
verbose=pt_sim_v)
if hold_arr is not None:
tomos_sim[tkey][lkey] = hold_arr
for npi in range(tomos_np[tkey][lkey]):
for arr in hold_arr:
lists_sim[lkey].append(arr)
if ana_global:
print('\tGlobal computations by tomos...')
hold_tomos_exp, hold_tomos_sim = copy.deepcopy(tomos_exp), copy.deepcopy(tomos_sim)
del tomos_exp
del tomos_sim
tomos_exp, tomos_sim = dict(), dict()
for tkey in hold_tomos_exp.keys():
tomos_exp[tkey], tomos_sim[tkey] = dict(), dict()
dens = tomos_den[tkey]
for lkey, mat in zip(iter(hold_tomos_exp[tkey].keys()), iter(hold_tomos_exp[tkey].values())):
arr_1, arr_2 = mat[0], mat[1]
if ana_shell_thick is None:
gl_arr = ana_rg * (np.cbrt((1. / dens[lkey]) * (arr_1.sum(axis=0) / arr_2.sum(axis=0))) - 1.)
else:
gl_arr = (1. / dens[lkey]) * (arr_1.sum(axis=0) / arr_2.sum(axis=0)) - 1.
tomos_exp[tkey][lkey] = gl_arr
for lkey, mat in zip(iter(hold_tomos_sim[tkey].keys()), iter(hold_tomos_sim[tkey].values())):
for n_sim in range(p_nsims):
mat = hold_tomos_sim[tkey][lkey]
arr_1, arr_2 = mat[n_sim][0], mat[n_sim][1]
if ana_shell_thick is None:
gl_arr = ana_rg * (np.cbrt((1. / dens[lkey]) * (arr_1.sum(axis=0) / arr_2.sum(axis=0))) - 1.)
else:
gl_arr = (1. / dens[lkey]) * (arr_1.sum(axis=0) / arr_2.sum(axis=0)) - 1.
try:
tomos_sim[tkey][lkey].append(gl_arr)
except KeyError:
tomos_sim[tkey][lkey] = list()
tomos_sim[tkey][lkey].append(gl_arr)
print('\tGlobal computations by lists...')
hold_lists_exp, hold_lists_sim = copy.deepcopy(lists_exp), copy.deepcopy(lists_sim)
del lists_exp
del lists_sim
lists_exp, lists_sim = dict(), dict()
for lkey in hold_lists_exp.keys():
lists_exp[lkey], lists_sim[lkey] = list(), list()
dens, mat = lists_gden[lkey], hold_lists_exp[lkey]
arr_1, arr_2 = list(), list()
for hold_mat in mat:
for hold_mat_1, hold_mat_2 in zip(hold_mat[0], hold_mat[1]):
arr_1.append(hold_mat_1)
arr_2.append(hold_mat_2)
arr_1, arr_2 = arr_1, arr_2 = np.asarray(arr_1), np.asarray(arr_2)
if ana_shell_thick is None:
gl_arr = ana_rg * (np.cbrt((1. / dens) * (arr_1.sum(axis=0) / arr_2.sum(axis=0))) - 1.)
else:
gl_arr = (1. / dens) * (arr_1.sum(axis=0) / arr_2.sum(axis=0)) - 1.
lists_exp[lkey] = gl_arr
for lkey in hold_lists_sim.keys():
dens = lists_gden[lkey]
for n_sim in range(p_nsims):
arr_1, arr_2 = list(), list()
for mat in hold_lists_sim[lkey].values():
for hold_mat_1, hold_mat_2 in zip(mat[n_sim][0], mat[n_sim][1]):
arr_1.append(hold_mat_1)
arr_2.append(hold_mat_2)
arr_1, arr_2 = np.asarray(arr_1), np.asarray(arr_2)
if ana_shell_thick is None:
gl_arr = ana_rg * (np.cbrt((1. / dens) * (arr_1.sum(axis=0) / arr_2.sum(axis=0))) - 1.)
else:
gl_arr = (1. / dens) * (arr_1.sum(axis=0) / arr_2.sum(axis=0)) - 1.
try:
lists_sim[lkey].append(gl_arr)
except KeyError:
lists_sim[lkey] = list()
lists_sim[lkey].append(gl_arr)
out_wspace = out_dir + '/' + out_stem + '_wspace.pkl'
print('\tPickling computation workspace in: ' + out_wspace)
wspace = (lists_count, tomos_count,
lists_hash, tomos_hash,
tomos_np, tomos_den, tomos_exp, tomos_sim,
lists_np, lists_den, lists_gden, lists_exp, lists_sim, lists_color,
vesicles, vols)
with open(out_wspace, "wb") as fl:
pickle.dump(wspace, fl)
fl.close()
else:
print('\tLoading the workspace: ' + in_wspace)
with open(in_wspace, 'r') as pkl:
wspace = pickle.load(pkl)
lists_count, tomos_count = wspace[0], wspace[1]
lists_hash, tomos_hash = wspace[2], wspace[3]
tomos_np, tomos_den, tomos_exp, tomos_sim = wspace[4], wspace[5], wspace[6], wspace[7]
lists_np, lists_den, lists_gden, lists_exp, lists_sim, lists_color = wspace[8], wspace[9], wspace[10], wspace[11], wspace[12], wspace[13]
vesicles, vols = wspace[14], wspace[15]
print('\tPrinting lists hash: ')
for id, lkey in zip(iter(lists_hash.keys()), iter(lists_hash.values())):
print('\t\t-[' + str(id) + '] -> [' + lkey + ']')
print('\tPrinting tomograms hash: ')
for tkey, val in zip(iter(tomos_hash.keys()), iter(tomos_hash.values())):
print('\t\t-[' + tkey + '] -> [' + str(val) + ']')
# Getting the lists colormap
n_lists = len(list(lists_hash.keys()))
for i, lkey in zip(iter(lists_hash.keys()), iter(lists_hash.values())):
lists_color[lkey] = pt_cmap(1.*i/n_lists)
print('\tTOMOGRAMS PLOTTING LOOP: ')
out_tomos_dir = out_stem_dir + '/tomos'
os.makedirs(out_tomos_dir)
print('\t\t-Plotting the number of particles...')
for tkey, ltomo in zip(iter(tomos_np.keys()), iter(tomos_np.values())):
tkey_short = os.path.splitext(os.path.split(tkey)[1])[0]
plt.figure()
plt.title('Num. particles for ' + tkey_short)
plt.ylabel('Num. particles')
plt.xlabel('Classes')
for lkey, nparts in zip(iter(ltomo.keys()), iter(ltomo.values())):
if lkey == 'PST':
i_lkey = 0
elif lkey == 'PST_A':
i_lkey = 1
elif lkey == 'PST_B':
i_lkey = 2
elif lkey == 'PST_C':
i_lkey = 3
elif lkey == 'AMPAR':
i_lkey = 4
elif lkey == 'NMDAR':
i_lkey = 5
plt.bar(i_lkey, nparts, width=0.75, color=lists_color[lkey], label=lkey)
plt.xticks(BAR_WIDTH + np.arange(n_lists), np.arange(n_lists))
plt.legend(loc=1)
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
hold_dir = out_tomos_dir + '/' + tkey_short
if not os.path.exists(hold_dir):
os.makedirs(hold_dir)
plt.savefig(hold_dir + '/np.png')
plt.close()
print('\t\t-Plotting densities...')
for tkey, ltomo in zip(iter(tomos_den.keys()), iter(tomos_den.values())):
tkey_short = os.path.splitext(os.path.split(tkey)[1])[0]
plt.figure()
plt.title('Density for ' + tkey_short)
plt.ylabel('Density (np/vol)')
plt.xlabel('Classes')
for lkey, den in zip(iter(ltomo.keys()), iter(ltomo.values())):
if lkey == 'PST':
i_lkey = 0
elif lkey == 'PST_A':
i_lkey = 1
elif lkey == 'PST_B':
i_lkey = 2
elif lkey == 'PST_C':
i_lkey = 3
elif lkey == 'AMPAR':
i_lkey = 4
elif lkey == 'NMDAR':
i_lkey = 5
plt.bar(i_lkey, den, width=0.75, color=lists_color[lkey], label=lkey)
plt.xticks(BAR_WIDTH + np.arange(n_lists), np.arange(n_lists))
plt.legend(loc=4)
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
hold_dir = out_tomos_dir + '/' + tkey_short
if not os.path.exists(hold_dir):
os.makedirs(hold_dir)
plt.savefig(hold_dir + '/den.png')
plt.close()
print('\t\t-Plotting densities by tethered vesicles...')
plt.figure()
# plt.title('Colocalization respect ' + key_ref)
plt.ylabel('Number of particles')
plt.xlabel('Number of tethered vesicles')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
l_ves, l_num_pst, l_num_pst_a, l_num_pst_b, l_num_pst_c = list(), list(), list(), list(), list()
l_wnum_pst, l_wnum_pst_a, l_wnum_pst_b, l_wnum_pst_c = list(), list(), list(), list()
l_num_ampar, l_num_nmdar, l_wnum_ampar, l_wnum_nmdar = list(), list(), list(), list()
p_max_np = 0
for tkey in tomos_den.keys():
tkey_short = os.path.splitext(os.path.split(tkey)[1])[0]
if np.asarray(list(tomos_np[tkey].values())).sum() > 0:
l_ves.append(vesicles[tkey_short])
l_num_pst.append(tomos_np[tkey]['PST'])
l_wnum_pst.append(1.)
l_num_pst_a.append(tomos_np[tkey]['PST_A'])
l_wnum_pst_a.append(1.)
l_num_pst_b.append(tomos_np[tkey]['PST_B'])
l_wnum_pst_b.append(1.)
l_num_pst_c.append(tomos_np[tkey]['PST_C'])
l_wnum_pst_c.append(1.)
l_num_ampar.append(tomos_np[tkey]['AMPAR'])
l_wnum_ampar.append(1.)
l_num_nmdar.append(tomos_np[tkey]['NMDAR'])
l_wnum_nmdar.append(1.)
if l_num_pst[-1] > p_max_np:
p_max_np = l_num_pst[-1]
l_ves = np.asarray(l_ves, dtype=np.float).reshape(-1, 1)
l_num_pst = np.asarray(l_num_pst, dtype=np.float).reshape(-1, 1)
l_num_pst_a = np.asarray(l_num_pst_a, dtype=np.float).reshape(-1, 1)
l_num_pst_b = np.asarray(l_num_pst_b, dtype=np.float).reshape(-1, 1)
l_num_pst_c = np.asarray(l_num_pst_c, dtype=np.float).reshape(-1, 1)
l_num_ampar = np.asarray(l_num_ampar, dtype=np.float).reshape(-1, 1)
l_num_nmdar = np.asarray(l_num_nmdar, dtype=np.float).reshape(-1, 1)
l_wnum_pst = np.asarray(l_wnum_pst, dtype=np.float)
l_wnum_pst_a = np.asarray(l_wnum_pst_a, dtype=np.float)
l_wnum_pst_b = np.asarray(l_wnum_pst_b, dtype=np.float)
l_wnum_pst_c = np.asarray(l_wnum_pst_c, dtype=np.float)
l_wnum_ampar = np.asarray(l_wnum_ampar, dtype=np.float)
l_wnum_nmdar = np.asarray(l_wnum_nmdar, dtype=np.float)
l_wnum_pst /= l_wnum_pst.sum()
l_wnum_pst_a /= l_wnum_pst_a.sum()
l_wnum_pst_b /= l_wnum_pst_b.sum()
l_wnum_pst_c /= l_wnum_pst_c.sum()
l_wnum_ampar /= l_wnum_ampar.sum()
l_wnum_nmdar /= l_wnum_nmdar.sum()
regr_pst = linear_model.LinearRegression()
regr_pst_a = linear_model.LinearRegression()
regr_pst_b = linear_model.LinearRegression()
regr_pst_c = linear_model.LinearRegression()
regr_ampar = linear_model.LinearRegression()
regr_nmdar = linear_model.LinearRegression()
regr_pst.fit(l_ves, l_num_pst, sample_weight=l_wnum_pst)
regr_pst_a.fit(l_ves, l_num_pst_a, sample_weight=l_wnum_pst_a)
regr_pst_b.fit(l_ves, l_num_pst_b, sample_weight=l_wnum_pst_b)
regr_pst_c.fit(l_ves, l_num_pst_c, sample_weight=l_wnum_pst_c)
regr_ampar.fit(l_ves, l_num_ampar, sample_weight=l_wnum_ampar)
regr_nmdar.fit(l_ves, l_num_nmdar, sample_weight=l_wnum_nmdar)
l_num_pst_r = regr_pst.predict(l_ves)
l_num_pst_a_r = regr_pst_a.predict(l_ves)
l_num_pst_b_r = regr_pst_b.predict(l_ves)
l_num_pst_c_r = regr_pst_c.predict(l_ves)
l_num_ampar_r = regr_ampar.predict(l_ves)
l_num_nmdar_r = regr_nmdar.predict(l_ves)
plt.plot(l_ves, l_num_pst, color='k', marker='s', markersize=10, label='PST', linestyle='')
plt.plot(l_ves, l_num_pst_a, color='r', marker='*', markersize=10, label='PST_A', linestyle='')
plt.plot(l_ves, l_num_pst_b, color='m', marker='^', markersize=10, label='PST_B', linestyle='')
plt.plot(l_ves, l_num_pst_c, color='maroon', marker='o', markersize=10, label='PST_C', linestyle='')
# plt.plot(l_ves, l_num_ampar, color='red', marker='s', markersize=10, label='AMPAR', linestyle='')
# plt.plot(l_ves, l_num_nmdar, color='lightsalmon', marker='*', markersize=10, label='NMDAR', linestyle='')
plt.plot(l_ves, l_num_pst_r, color='k', label='PST-LR', linestyle='-', linewidth=2.0)
plt.plot(l_ves, l_num_pst_a_r, color='r', label='PST_A-LR', linestyle='-', linewidth=2.0)
plt.plot(l_ves, l_num_pst_b_r, color='m', label='PST_B-LR', linestyle='--', linewidth=2.0)
plt.plot(l_ves, l_num_pst_c_r, color='maroon', label='PST_C-LR', linestyle='-.', linewidth=2.0)
# plt.plot(l_ves, l_num_ampar_r, color='r', label='AMPAR-LR', linestyle='-', linewidth=2.0)
# plt.plot(l_ves, l_num_nmdar_r, color='lightsalmon', label='NMDAR-LR', linestyle='-', linewidth=2.0)
plt.xlim((-3., l_ves.max()*1.1))
plt.ylim((0, p_max_np*1.1))
plt.xticks((0, 2, 4, 6, 8, 10, 12))
plt.legend(loc=3)
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_tomos_dir + '/den_by_ntet.png', dpi=600)
plt.close()
r2_pst = r2_score(l_num_pst, l_num_pst_r)
r2_pst_a = r2_score(l_num_pst_a, l_num_pst_a_r)
r2_pst_b = r2_score(l_num_pst_b, l_num_pst_b_r)
r2_pst_c = r2_score(l_num_pst_c, l_num_pst_c_r)
r2_ampar = r2_score(l_num_ampar, l_num_ampar_r)
r2_nmdar = r2_score(l_num_nmdar, l_num_nmdar_r)
print('\t\t\t+Linear regression:')
print('\t\t\t\t-Coefficient of determination PST: ' + str(r2_pst))
print('\t\t\t\t-Coefficient of determination PST_A: ' + str(r2_pst_a))
print('\t\t\t\t-Coefficient of determination PST_B: ' + str(r2_pst_b))
print('\t\t\t\t-Coefficient of determination PST_C: ' + str(r2_pst_c))
print('\t\t\t\t-Coefficient of determination AMPAR: ' + str(r2_ampar))
print('\t\t\t\t-Coefficient of determination NMDAR: ' + str(r2_nmdar))
[pc_pst, pcv_pst] = sp.stats.pearsonr(l_num_pst, l_num_pst_r)
pc_pst, pcv_pst = pc_pst[0], pcv_pst[0]
[pc_pst_a, pcv_pst_a] = sp.stats.pearsonr(l_num_pst_a, l_num_pst_a_r)
pc_pst_a, pcv_pst_a = pc_pst_a[0], pcv_pst_a[0]
[pc_pst_b, pcv_pst_b] = sp.stats.pearsonr(l_num_pst_b, l_num_pst_b_r)
pc_pst_b, pcv_pst_b = pc_pst_b[0], pcv_pst_b[0]
[pc_pst_c, pcv_pst_c] = sp.stats.pearsonr(l_num_pst_c, l_num_pst_c_r)
pc_pst_c, pcv_pst_c = pc_pst_c[0], pcv_pst_c[0]
[pc_ampar, pcv_ampar] = sp.stats.pearsonr(l_num_ampar, l_num_ampar_r)
pc_ampar, pcv_ampar = pc_ampar[0], pcv_ampar[0]
[pc_nmdar, pcv_nmdar] = sp.stats.pearsonr(l_num_nmdar, l_num_nmdar_r)
pc_nmdar, pcv_nmdar = pc_nmdar[0], pcv_nmdar[0]
print('\t\t\t\t-Pearson coefficient PST [p, t]: ' + str([pc_pst, pcv_pst]))
print('\t\t\t\t-Pearson coefficient PST_A [p, t]: ' + str([pc_pst_a, pcv_pst_a]))
print('\t\t\t\t-Pearson coefficient PST_B [p, t]: ' + str([pc_pst_b, pcv_pst_b]))
print('\t\t\t\t-Pearson coefficient PST_C [p, t]: ' + str([pc_pst_c, pcv_pst_c]))
print('\t\t\t\t-Pearson coefficient AMPAR [p, t]: ' + str([pc_ampar, pcv_ampar]))
print('\t\t\t\t-Pearson coefficient NMDAR [p, t]: ' + str([pc_nmdar, pcv_nmdar]))
plt.figure()
# plt.title('Colocalization respect ' + key_ref)
plt.ylabel('Number of particles')
plt.xlabel('Number of tethered vesicles')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.plot(l_ves, l_num_ampar, color='red', marker='s', markersize=10, label='AMPAR', linestyle='')
plt.plot(l_ves, l_num_nmdar, color='k', marker='*', markersize=10, label='NMDAR', linestyle='')
plt.plot(l_ves, l_num_ampar_r, color='r', label='AMPAR-LR', linestyle='-', linewidth=2.0)
plt.plot(l_ves, l_num_nmdar_r, color='k', label='NMDAR-LR', linestyle='-', linewidth=2.0)
plt.xlim((-3., l_ves.max()*1.1))
# plt.ylim((0, p_max_np*1.1))
plt.xticks((0, 2, 4, 6, 8, 10, 12))
plt.legend(loc=3)
plt.tight_layout()
if fig_fmt is None:
plt.show(block=True)
else:
plt.savefig(out_tomos_dir + '/den_by_ntet_an.png', dpi=600)
plt.close()
print('\t\t-Plotting densities by pre-syanptic membrane volume...')
plt.figure()
# plt.title('Colocalization respect ' + key_ref)
plt.ylabel('Number of particles')
plt.xlabel('Membrane Area [nm]')
plt.ticklabel_format(style='sci', axis='both', scilimits=(0,0))
l_areas, l_vols, l_num_pst, l_num_pst_a, l_num_pst_b, l_num_pst_c = list(), list(), list(), list(), list(), list()
l_wnum_pst, l_wnum_pst_a, l_wnum_pst_b, l_wnum_pst_c = list(), list(), list(), list()
l_num_ampar, l_num_nmdar, l_wnum_ampar, l_wnum_nmdar = list(), list(), list(), list()
for tkey in tomos_den.keys():
vol = vols[tkey] * (1. / (ana_res * ana_res * ana_res))
area = (vol / MB_THICK)
if np.asarray(list(tomos_np[tkey].values())).sum() > 0:
l_vols.append(vol)
l_areas.append(area)
l_num_pst.append(tomos_np[tkey]['PST'])
l_wnum_pst.append(1.)
l_num_pst_a.append(tomos_np[tkey]['PST_A'])
l_wnum_pst_a.append(1.)
l_num_pst_b.append(tomos_np[tkey]['PST_B'])
l_wnum_pst_b.append(1.)
l_num_pst_c.append(tomos_np[tkey]['PST_C'])
l_wnum_pst_c.append(1.)
l_num_ampar.append(tomos_np[tkey]['AMPAR'])
l_wnum_ampar.append(1.)
l_num_nmdar.append(tomos_np[tkey]['NMDAR'])
l_wnum_nmdar.append(1.)
l_ves = np.asarray(l_ves, dtype=np.float).reshape(-1, 1)
l_areas | |
+ 1):
s = pickle.dumps(a, proto)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertTrue(isinstance(derived, SubclassDatetime))
def test_compat_unpickle(self):
tests = [
b'cdatetime\ndatetime\n('
b"S'\\x07\\xdf\\x0b\\x1b\\x14;\\x01\\x00\\x10\\x00'\ntR.",
b'cdatetime\ndatetime\n('
b'U\n\x07\xdf\x0b\x1b\x14;\x01\x00\x10\x00tR.',
b'\x80\x02cdatetime\ndatetime\n'
b'U\n\x07\xdf\x0b\x1b\x14;\x01\x00\x10\x00\x85R.',
]
args = 2015, 11, 27, 20, 59, 1, 64**2
expected = self.theclass(*args)
for data in tests:
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
# Run with US-style DST rules: DST begins 2 a.m. on second Sunday in
# March (M3.2.0) and ends 2 a.m. on first Sunday in November (M11.1.0).
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_timestamp_naive(self):
t = self.theclass(1970, 1, 1)
self.assertEqual(t.timestamp(), 18000.0)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4)
self.assertEqual(t.timestamp(),
18000.0 + 3600 + 2*60 + 3 + 4*1e-6)
# Missing hour
t0 = self.theclass(2012, 3, 11, 2, 30)
t1 = t0.replace(fold=1)
self.assertEqual(self.theclass.fromtimestamp(t1.timestamp()),
t0 - timedelta(hours=1))
self.assertEqual(self.theclass.fromtimestamp(t0.timestamp()),
t1 + timedelta(hours=1))
# Ambiguous hour defaults to DST
t = self.theclass(2012, 11, 4, 1, 30)
self.assertEqual(self.theclass.fromtimestamp(t.timestamp()), t)
# Timestamp may raise an overflow error on some platforms
# XXX: Do we care to support the first and last year?
for t in [self.theclass(2,1,1), self.theclass(9998,12,12)]:
try:
s = t.timestamp()
except OverflowError:
pass
else:
self.assertEqual(self.theclass.fromtimestamp(s), t)
def test_timestamp_aware(self):
t = self.theclass(1970, 1, 1, tzinfo=timezone.utc)
self.assertEqual(t.timestamp(), 0.0)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4, tzinfo=timezone.utc)
self.assertEqual(t.timestamp(),
3600 + 2*60 + 3 + 4*1e-6)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4,
tzinfo=timezone(timedelta(hours=-5), 'EST'))
self.assertEqual(t.timestamp(),
18000 + 3600 + 2*60 + 3 + 4*1e-6)
@support.run_with_tz('MSK-03') # Something east of Greenwich
def test_microsecond_rounding(self):
for fts in [self.theclass.fromtimestamp,
self.theclass.utcfromtimestamp]:
zero = fts(0)
self.assertEqual(zero.second, 0)
self.assertEqual(zero.microsecond, 0)
one = fts(1e-6)
try:
minus_one = fts(-1e-6)
except OSError:
# localtime(-1) and gmtime(-1) is not supported on Windows
pass
else:
self.assertEqual(minus_one.second, 59)
self.assertEqual(minus_one.microsecond, 999999)
t = fts(-1e-8)
self.assertEqual(t, zero)
t = fts(-9e-7)
self.assertEqual(t, minus_one)
t = fts(-1e-7)
self.assertEqual(t, zero)
t = fts(-1/2**7)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 992188)
t = fts(1e-7)
self.assertEqual(t, zero)
t = fts(9e-7)
self.assertEqual(t, one)
t = fts(0.99999949)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 999999)
t = fts(0.9999999)
self.assertEqual(t.second, 1)
self.assertEqual(t.microsecond, 0)
t = fts(1/2**7)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 7812)
def test_timestamp_limits(self):
# minimum timestamp
min_dt = self.theclass.min.replace(tzinfo=timezone.utc)
min_ts = min_dt.timestamp()
try:
# date 0001-01-01 00:00:00+00:00: timestamp=-62135596800
self.assertEqual(self.theclass.fromtimestamp(min_ts, tz=timezone.utc),
min_dt)
except (OverflowError, OSError) as exc:
# the date 0001-01-01 doesn't fit into 32-bit time_t,
# or platform doesn't support such very old date
self.skipTest(str(exc))
# maximum timestamp: set seconds to zero to avoid rounding issues
max_dt = self.theclass.max.replace(tzinfo=timezone.utc,
second=0, microsecond=0)
max_ts = max_dt.timestamp()
# date 9999-12-31 23:59:00+00:00: timestamp 253402300740
self.assertEqual(self.theclass.fromtimestamp(max_ts, tz=timezone.utc),
max_dt)
# number of seconds greater than 1 year: make sure that the new date
# is not valid in datetime.datetime limits
delta = 3600 * 24 * 400
# too small
ts = min_ts - delta
# converting a Python int to C time_t can raise a OverflowError,
# especially on 32-bit platforms.
with self.assertRaises((ValueError, OverflowError)):
self.theclass.fromtimestamp(ts)
with self.assertRaises((ValueError, OverflowError)):
self.theclass.utcfromtimestamp(ts)
# too big
ts = max_dt.timestamp() + delta
with self.assertRaises((ValueError, OverflowError)):
self.theclass.fromtimestamp(ts)
with self.assertRaises((ValueError, OverflowError)):
self.theclass.utcfromtimestamp(ts)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(OverflowError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(OverflowError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertLessEqual(abs(from_timestamp - from_now), tolerance)
def test_strptime(self):
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
expected = _strptime._strptime_datetime(self.theclass, string, format)
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
self.assertIs(type(expected), self.theclass)
self.assertIs(type(got), self.theclass)
# bpo-34482: Check that surrogates are handled properly.
inputs = [
('2004-12-01\ud80013:02:47.197', '%Y-%m-%d\ud800%H:%M:%S.%f'),
('2004\ud80012-01 13:02:47.197', '%Y\ud800%m-%d %H:%M:%S.%f'),
('2004-12-01 13:02\ud80047.197', '%Y-%m-%d %H:%M\ud800%S.%f'),
]
for string, format in inputs:
with self.subTest(string=string, format=format):
expected = _strptime._strptime_datetime(self.theclass, string,
format)
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
strptime = self.theclass.strptime
self.assertEqual(strptime("+0002", "%z").utcoffset(), 2 * MINUTE)
self.assertEqual(strptime("-0002", "%z").utcoffset(), -2 * MINUTE)
self.assertEqual(
strptime("-00:02:01.000003", "%z").utcoffset(),
-timedelta(minutes=2, seconds=1, microseconds=3)
)
# Only local timezone and UTC are supported
for tzseconds, tzname in ((0, 'UTC'), (0, 'GMT'),
(-_time.timezone, _time.tzname[0])):
if tzseconds < 0:
sign = '-'
seconds = -tzseconds
else:
sign ='+'
seconds = tzseconds
hours, minutes = divmod(seconds//60, 60)
dtstr = "{}{:02d}{:02d} {}".format(sign, hours, minutes, tzname)
dt = strptime(dtstr, "%z %Z")
self.assertEqual(dt.utcoffset(), timedelta(seconds=tzseconds))
self.assertEqual(dt.tzname(), tzname)
# Can produce inconsistent datetime
dtstr, fmt = "+1234 UTC", "%z %Z"
dt = strptime(dtstr, fmt)
self.assertEqual(dt.utcoffset(), 12 * HOUR + 34 * MINUTE)
self.assertEqual(dt.tzname(), 'UTC')
# yet will roundtrip
self.assertEqual(dt.strftime(fmt), dtstr)
# Produce naive datetime if no %z is provided
self.assertEqual(strptime("UTC", "%Z").tzinfo, None)
with self.assertRaises(ValueError): strptime("-2400", "%z")
with self.assertRaises(ValueError): strptime("-000", "%z")
with self.assertRaises(ValueError): strptime("z", "%z")
def test_strptime_single_digit(self):
# bpo-34903: Check that single digit dates and times are allowed.
strptime = self.theclass.strptime
with self.assertRaises(ValueError):
# %y does require two digits.
newdate = strptime('01/02/3 04:05:06', '%d/%m/%y %H:%M:%S')
dt1 = self.theclass(2003, 2, 1, 4, 5, 6)
dt2 = self.theclass(2003, 1, 2, 4, 5, 6)
dt3 = self.theclass(2003, 2, 1, 0, 0, 0)
dt4 = self.theclass(2003, 1, 25, 0, 0, 0)
inputs = [
('%d', '1/02/03 4:5:6', '%d/%m/%y %H:%M:%S', dt1),
('%m', '01/2/03 4:5:6', '%d/%m/%y %H:%M:%S', dt1),
('%H', '01/02/03 4:05:06', '%d/%m/%y %H:%M:%S', dt1),
('%M', '01/02/03 04:5:06', '%d/%m/%y %H:%M:%S', dt1),
('%S', '01/02/03 04:05:6', '%d/%m/%y %H:%M:%S', dt1),
('%j', '2/03 04am:05:06', '%j/%y %I%p:%M:%S',dt2),
('%I', '02/03 4am:05:06', '%j/%y %I%p:%M:%S',dt2),
('%w', '6/04/03', '%w/%U/%y', dt3),
# %u requires a single digit.
('%W', '6/4/2003', '%u/%W/%Y', dt3),
('%V', '6/4/2003', '%u/%V/%G', dt4),
]
for reason, string, format, target in inputs:
reason = 'test single digit ' + reason
with self.subTest(reason=reason,
string=string,
format=format,
target=target):
newdate = strptime(string, format)
self.assertEqual(newdate, target, msg=reason)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
| |
-1, -1, 0, 1, 1]
safeguard 1.6 0.4899 [2, 2, 1, 1, 1, 2, 2, 2, 2, 1]
safeguarded 1.5 0.92195 [1, 2, 2, 0, 2, 0, 3, 1, 2, 2]
safeguarding 1.1 0.7 [2, 1, 1, 0, 1, 0, 2, 1, 1, 2]
safeguards 1.4 0.66332 [1, 2, 1, 1, 0, 2, 2, 2, 2, 1]
safekeeping 1.4 0.66332 [3, 1, 1, 2, 1, 1, 2, 1, 1, 1]
safelight 1.1 1.22066 [0, 3, 0, 2, 0, 3, 0, 2, 1, 0]
safelights 0.8 1.07703 [0, 3, 1, 0, 0, 2, 0, 2, 0, 0]
safely 2.2 0.74833 [2, 2, 2, 3, 4, 2, 2, 1, 2, 2]
safeness 1.5 0.67082 [1, 1, 1, 1, 3, 1, 2, 2, 2, 1]
safer 1.8 0.6 [2, 1, 2, 3, 2, 2, 1, 1, 2, 2]
safes 0.4 0.8 [0, 0, 2, 0, 0, 0, 0, 0, 2, 0]
safest 1.7 1.61555 [2, 2, 2, 2, -3, 3, 3, 2, 2, 2]
safeties 1.5 1.0247 [2, 0, 1, 3, 2, 1, 3, 1, 0, 2]
safety 1.8 0.6 [2, 2, 2, 2, 1, 1, 2, 3, 1, 2]
safetyman 0.3 0.64031 [0, 0, 0, 0, 2, 0, 1, 0, 0, 0]
salient 1.1 1.22066 [1, 3, 0, -1, 0, 1, 2, 1, 1, 3]
sappy -1.0 1.18322 [-2, -1, 2, -2, -2, 0, -1, -1, -2, -1]
sarcasm -0.9 0.7 [0, -2, -1, -1, 0, 0, -1, -1, -2, -1]
sarcasms -0.9 0.7 [0, -1, 0, -1, -1, -2, 0, -1, -1, -2]
sarcastic -1.0 0.7746 [-1, -1, -1, -1, -1, -1, -1, -2, 1, -2]
sarcastically -1.1 1.37477 [-1, -4, 1, -1, -1, -2, 1, -2, -1, -1]
satisfaction 1.9 0.9434 [1, 3, 2, 4, 2, 1, 1, 1, 2, 2]
satisfactions 2.1 0.7 [3, 3, 3, 1, 2, 2, 1, 2, 2, 2]
satisfactorily 1.6 1.11355 [1, 2, 2, -1, 2, 1, 3, 3, 1, 2]
satisfactoriness 1.5 0.5 [1, 2, 1, 2, 2, 1, 2, 1, 2, 1]
satisfactory 1.5 0.67082 [2, 3, 1, 1, 1, 2, 2, 1, 1, 1]
satisfiable 1.9 0.83066 [3, 1, 2, 1, 2, 3, 1, 2, 3, 1]
satisfied 1.8 0.6 [2, 2, 2, 1, 1, 2, 3, 1, 2, 2]
satisfies 1.8 0.6 [3, 1, 2, 1, 1, 2, 2, 2, 2, 2]
satisfy 2.0 0.63246 [3, 2, 2, 2, 2, 1, 1, 2, 3, 2]
satisfying 2.0 1.48324 [2, 3, 2, 1, 3, 3, 3, 2, -2, 3]
satisfyingly 1.9 0.9434 [1, 2, 2, 1, 2, 1, 4, 1, 3, 2]
savage -2.0 1.73205 [-3, -4, -3, 1, -2, -1, -3, -4, -2, 1]
savaged -2.0 1.34164 [-1, 0, -4, -3, -3, -3, -2, 0, -1, -3]
savagely -2.2 0.74833 [-2, -1, -3, -2, -2, -1, -2, -3, -3, -3]
savageness -2.6 1.0198 [-3, -1, -2, -3, -2, -1, -4, -4, -3, -3]
savagenesses -0.9 1.86815 [-2, 3, -1, -3, -2, 2, -3, -1, -1, -1]
savageries -1.9 1.75784 [-3, 1, -3, -4, -3, 1, -2, -3, 0, -3]
savagery -2.5 1.62788 [-2, -3, -3, -3, 2, -3, -3, -4, -4, -2]
savages -2.4 1.0198 [-2, -2, -3, -4, -3, -3, -2, 0, -2, -3]
save 2.2 1.16619 [1, 3, 3, 1, 2, 1, 2, 4, 1, 4]
saved 1.8 0.6 [1, 2, 2, 2, 1, 3, 2, 2, 1, 2]
scam -2.7 0.64031 [-2, -3, -3, -3, -2, -2, -4, -3, -3, -2]
scams -2.8 0.87178 [-3, -1, -3, -4, -4, -3, -2, -2, -3, -3]
scandal -1.9 1.81384 [-3, -2, -2, -4, 3, -3, -3, -1, -2, -2]
scandalous -2.4 0.8 [-2, -1, -3, -2, -4, -2, -3, -3, -2, -2]
scandals -2.2 0.9798 [-2, -3, -3, -2, -1, 0, -3, -3, -2, -3]
scapegoat -1.7 0.64031 [-3, -2, -2, -2, -1, -1, -1, -2, -1, -2]
scapegoats -1.4 0.8 [-1, -2, -2, -1, 0, -2, -2, 0, -2, -2]
scare -2.2 0.87178 [-2, -2, -4, -2, -3, -1, -2, -1, -2, -3]
scarecrow -0.8 0.9798 [-1, 0, -1, 0, 0, 0, -2, -3, -1, 0]
scarecrows -0.7 1.1 [2, 0, -1, -1, -1, -2, -1, -2, 0, -1]
scared -1.9 0.7 [-1, -1, -2, -3, -2, -3, -1, -2, -2, -2]
scaremonger -2.1 0.53852 [-1, -2, -2, -3, -2, -2, -3, -2, -2, -2]
scaremongers -2.0 1.0 [-2, -2, 0, -4, -2, -2, -1, -2, -3, -2]
scarer -1.7 0.78102 [-2, -1, -1, -2, -3, -1, -3, -1, -2, -1]
scarers -1.3 0.9 [-1, -2, -1, 0, 0, -1, -3, -2, -1, -2]
scares -1.4 0.4899 [-1, -1, -2, -1, -1, -2, -1, -2, -2, -1]
scarey -1.7 0.64031 [-1, -1, -2, -2, -1, -2, -1, -2, -3, -2]
scaring -1.9 1.22066 [-3, -2, -1, -3, -1, -3, -2, -2, 1, -3]
scary -2.2 0.87178 [-2, -1, -4, -3, -3, -2, -2, -2, -2, -1]
sceptic -1.0 0.89443 [-3, 0, -1, -1, -1, 0, 0, -2, -1, -1]
sceptical -1.2 0.4 [-1, -1, -1, -1, -1, -1, -1, -2, -2, -1]
scepticism -0.8 0.87178 [-1, -2, -2, 0, 0, -1, 1, -1, -1, -1]
sceptics -0.7 0.78102 [0, 0, 0, -1, -2, 0, -1, -1, 0, -2]
scold -1.7 0.78102 [-2, -1, -1, -1, -3, -3, -2, -2, -1, -1]
scoop 0.6 0.8 [0, 0, 1, 0, 2, 0, 2, 0, 1, 0]
scorn -1.7 0.64031 [-2, -3, -2, -1, -1, -1, -1, -2, -2, -2]
scornful -1.8 1.16619 [-3, -3, -2, -1, -4, 0, -2, -1, -1, -1]
scream -1.7 0.78102 [0, -3, -1, -1, -2, -2, -2, -2, -2, -2]
screamed -1.3 1.1 [-2, -3, -2, -1, -1, -2, -2, -1, 1, 0]
screamers -1.5 0.92195 [-2, -1, -2, -2, -2, -2, -1, -2, 1, -2]
screaming -1.6 0.8 [0, -1, -1, -2, -3, -1, -2, -2, -2, -2]
screams -1.2 0.9798 [-1, -2, -2, -1, -1, -2, 1, -2, 0, -2]
screw -0.4 0.91652 [-1, -2, -1, 0, 0, 1, 0, -1, 1, -1]
screwball -0.2 0.87178 [0, -1, 0, 0, 1, 1, -1, 0, -2, 0]
screwballs -0.3 1.00499 [-2, 0, -2, -1, 0, 1, 0, 1, 0, 0]
screwbean 0.3 0.64031 [0, 0, 0, 0, 0, 1, 0, 2, 0, 0]
screwdriver 0.3 0.45826 [1, 0, 0, 0, 0, 1, 0, 0, 1, 0]
screwdrivers 0.1 0.53852 [-1, 0, 0, 1, 0, 0, 0, 1, 0, 0]
screwed -2.2 0.4 [-2, -2, -2, -2, -2, -3, -3, -2, -2, -2]
screwed up -1.5 0.67082 [-2, -2, -2, -1, -1, 0, -2, -1, -2, -2]
screwer -1.2 0.87178 [-1, -2, -1, -2, 0, 0, -2, 0, -2, -2]
screwers -0.5 1.5 [-2, -2, 0, -2, 0, 2, 2, -1, 0, -2]
screwier -0.6 1.2 [0, -1, 2, -2, -1, -2, -1, -1, 1, -1]
screwiest -2.0 0.89443 [-3, -2, -2, -4, -1, -2, -1, -1, -2, -2]
screwiness -0.5 1.80278 [-2, 0, -2, 3, -2, -1, -2, 1, 2, -2]
screwing -0.9 0.9434 [-1, 0, 0, 0, -1, -1, -3, -2, 0, -1]
screwlike 0.1 1.04403 [2, -1, 0, 0, 0, 1, 0, 1, -2, 0]
screws -1.0 1.09545 [0, -3, 0, 0, -1, 0, -2, -2, -2, 0]
screwup -1.7 0.9 [-2, -2, -2, 1, -2, -2, -2, -2, -2, -2]
screwups -1.0 1.61245 [-2, -2, -2, -2, 0, 2, -2, -2, 2, -2]
screwworm -0.4 0.66332 [0, -1, 0, 0, 0, 0, -2, 0, -1, 0]
screwworms -0.1 1.22066 [-3, 0, -1, 0, 0, 0, 0, 2, 1, 0]
screwy -1.4 0.8 [-2, -2, -1, -1, -1, 0, -2, -1, -3, -1]
scrumptious 2.1 1.22066 [3, 3, 0, 3, 2, 3, 1, 3, 0, 3]
scrumptiously 1.5 1.43178 [2, 3, 3, -2, 1, 1, 2, 3, 1, 1]
scumbag -3.2 0.6 [-4, -3, -3, -2, -3, -4, -3, -3, -4, -3]
secure 1.4 0.4899 [1, 2, 1, 1, 2, 1, 1, 2, 2, 1]
secured 1.7 0.78102 [2, 2, 3, 1, 1, 2, 2, 0, 2, 2]
securely 1.4 0.8 [2, 0, 1, 2, 1, 1, 1, 3, 2, 1]
securement 1.1 0.7 [0, 2, 1, 1, 1, 0, 2, 2, 1, 1]
secureness 1.4 0.66332 [2, 1, 1, 3, 1, 1, 1, 1, 2, 1]
securer 1.5 0.67082 [1, 2, 2, 2, 1, 2, 1, 0, 2, 2]
securers 0.6 0.91652 [1, 3, 0, 0, 1, 0, 0, 0, 0, 1]
secures 1.3 0.64031 [1, 2, 2, 1, 1, 2, 1, 0, 2, 1]
securest 2.6 0.8 [3, 3, 2, 3, 1, 4, 3, 2, 2, 3]
securing 1.3 1.00499 [0, 3, 1, 1, 1, 3, 1, 1, 2, 0]
securities 1.2 0.6 [1, 2, 2, 2, 1, 1, 1, 0, 1, 1]
securitization 0.2 1.07703 [0, 0, 1, -1, 1, 0, -2, 0, 2, 1]
securitizations 0.1 0.9434 [0, 0, 0, 0, 2, 0, -2, 0, 1, 0]
securitize 0.3 1.34536 [2, 1, 0, 0, 2, 0, 1, 0, -3, 0]
securitized 1.4 1.0198 [3, 0, 2, 2, 0, 1, 2, 2, 0, 2]
securitizes 1.6 1.0198 [3, 0, 2, 2, 0, 1, 3, 2, 1, 2]
securitizing 0.7 0.9 [2, 0, 0, 1, 2, 0, 2, 0, 0, 0]
security 1.4 0.8 [1, 2, 3, 2, 1, 1, 2, 0, 1, 1]
sedition -1.8 1.249 [-3, -4, -2, -2, -2, -2, -1, -1, -2, 1]
seditious -1.7 0.64031 [-1, -2, -2, -1, -1, -1, -3, -2, -2, -2]
seduced -1.5 0.67082 [0, -1, -2, -2, -2, -2, -1, -1, -2, -2]
self-confident 2.5 0.80623 [1, 3, 3, 3, 2, 3, 1, 3, 3, 3]
selfish -2.1 0.7 [-1, -2, -2, -3, -1, -2, -2, -2, -3, -3]
selfishly -1.4 0.91652 [-3, 0, -1, -1, -1, -2, -3, -1, -1, -1]
selfishness -1.7 0.64031 [-1, -1, -1, -2, -2, -1, -2, -2, -3, -2]
selfishnesses -2.0 1.94936 [-4, -3, -1, -3, -2, -4, 2, 1, -3, -3]
sentence 0.3 0.64031 [0, 0, 0, 0, 1, 2, 0, 0, 0, 0]
sentenced -0.1 1.3 [0, -2, 2, -1, 0, -2, 2, 0, 0, 0]
sentences 0.2 1.07703 [0, 0, 2, 0, 0, -2, 2, 0, 0, 0]
sentencing -0.6 1.8 [-2, 0, -3, -2, 2, 3, 0, -1, -1, -2]
sentimental 1.3 0.64031 [2, 1, 1, 2, 1, 0, 1, 2, 2, 1]
sentimentalise 1.2 0.87178 [2, 1, 0, 3, 2, 1, 1, 0, 1, 1]
sentimentalised 0.8 1.16619 [2, 1, 1, 0, 0, 2, 3, -1, 0, 0]
sentimentalising 0.4 0.91652 [0, 2, 0, 0, 0, -1, 1, 0, 2, 0]
sentimentalism 1.0 0.63246 [2, 1, 0, 2, 1, 1, 1, 0, 1, 1]
sentimentalisms 0.4 0.8 [0, 1, 1, 0, 0, 2, 1, 0, 0, -1]
sentimentalist 0.8 0.87178 [2, 1, 0, 2, 0, 1, 2, 0, 0, 0]
sentimentalists 0.7 0.78102 [0, 0, 1, 0, 0, 1, 1, 2, 0, 2]
sentimentalities 0.9 0.83066 [2, 1, 1, 2, 1, 0, 0, 2, 0, 0]
sentimentality 1.2 1.46969 [-2, 1, 1, 2, 2, 0, 4, 2, 1, 1]
sentimentalization 1.2 0.87178 [0, 1, 2, 0, 1, 1, 3, 1, 2, 1]
sentimentalizations 0.4 0.8 [0, 1, 0, 1, 0, 0, 0, 2, -1, 1]
sentimentalize 0.8 1.07703 [2, 0, 0, 2, 0, 2, 1, -1, 2, 0]
sentimentalized 1.1 1.22066 [3, 0, 2, 0, 1, 3, | |
3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(BatchNormalization()(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(BatchNormalization()(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(BatchNormalization()(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(BatchNormalization()(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(BatchNormalization()(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(BatchNormalization()(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(BatchNormalization()(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(BatchNormalization()(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(BatchNormalization()(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_regularized(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
regularizer=regularizers.l2(0.005),
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool1
)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv2
)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool2
)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv3
)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
pool3
)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv4
)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv6
)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(
conv7
)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same", kernel_regularizer=regularizer)(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, last_kern_size, activation="sigmoid")(conv8)
if include_top:
model = Model(inputs=[inputs], outputs=[conv10])
else:
model = Model(inputs=[inputs], outputs=[conv8])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def finetune_AVG(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the spatial average version of the network.
num_layers_locked (int) is the number of layers, starting from the input layer,
that will be locked (non-trainable) during fine-tuning.
"""
model = unet3d_big_expectedvalue(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
gridsize,
batch_norm,
instance_norm,
include_top=False,
)
pre = model.get_weights()
# Load weights
model = renameLayers(model, weightspath)
post = model.get_weights()
print("evaluating weight deltas in the first conv layer")
print("pre-weights")
print(pre[1][0])
print("post-weights")
print(post[1][0])
print("delta:")
print(np.sum(pre[1][0] - post[1][0]))
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((*gridsize, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="linear", padding="same"
)(old_out)
grid_centers = Input((None, 3))
new_conv2 = Lambda(lambda x: ops.spatial_softmax(x))(new_conv)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))(
[new_conv2, grid_centers]
)
model = Model(inputs=[input_, grid_centers], outputs=[output])
return model
def load_attributes_from_hdf5_group(group, name):
"""Loads attributes of the specified name from the HDF5 group.
This method deals with an inherent problem
of HDF5 file which is not able to store
data larger than HDF5_OBJECT_HEADER_LIMIT bytes.
Arguments:
group: A pointer to a HDF5 group.
name: A name of the attributes to load.
Returns:
data: Attributes data.
From the TF/keras hdf5_format.py
"""
if name not in group.attrs:
group = group["model_weights"]
data = [n.decode("utf8") for n in group.attrs[name]]
return data
def renameLayers(model, weightspath):
"""
Rename layers in the model if we detect differences from the layer names in
the weights file.
"""
with h5py.File(weightspath, "r") as f:
lnames = load_attributes_from_hdf5_group(f, "layer_names")
tf2_names = []
for (i, layer) in enumerate(model.layers):
tf2_names.append(layer.name)
if layer.name != lnames[i]:
print(
"Correcting mismatch in layer name, model: {}, weights: {}".format(
layer.name, lnames[i]
)
)
layer._name = lnames[i]
model.load_weights(weightspath, by_name=True)
# We need to change the model layer names back to the TF2 version otherwise the model
# won't save
# If no layer names were changed, this won't do anything.
for (i, layer) in enumerate(model.layers):
layer._name = tf2_names[i]
return model
def finetune_MAX(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the argmax version of the network.
"""
model = unet3d_big(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm,
instance_norm,
include_top=False,
)
# If a model was created with TF1, it will not load by name into a TF2
# model because TF2 changing the naming convention.
# here, we call a function to change the names of the layers in the model
# to match what's contained in the weights file
model = renameLayers(model, weightspath)
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((None, None, None, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same"
)(old_out)
model = Model(inputs=[input_], outputs=[new_conv])
return model
def finetune_MAX_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to set up nets for fine-tuning
the argmax version of the network.
"""
model = unet3d_big_IN_BN(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm,
instance_norm,
include_top=False,
)
# Load weights
model.load_weights(weightspath, by_name=True)
# Lock desired number of layers
for layer in model.layers[:num_layers_locked]:
layer.trainable = False
# Do forward pass all the way until end
input_ = Input((None, None, None, input_dim * num_cams))
old_out = model(input_)
# Add new output conv. layer
new_conv = Conv3D(
new_n_channels_out, new_last_kern_size, activation="sigmoid", padding="same"
)(old_out)
model = Model(inputs=[input_], outputs=[new_conv])
return model
def finetune_MAX_regularized(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
new_last_kern_size,
new_n_channels_out,
weightspath,
num_layers_locked=2,
batch_norm=False,
instance_norm=False,
gridsize=(64, 64, 64),
):
"""
makes necessary calls to network constructors to | |
TransDParams:
"""This class defines the hyperameters and its ranges for tuning TranD algorithm.
TransDParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class TransRParams:
"""This class defines the hyperameters and its ranges for tuning TranR algorithm.
TransRParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class NTNParams:
"""This class defines the hyperameters and its ranges for tuning NTN algorithm.
NTNParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(64),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(64),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32]
# self.rel_hidden_size = [8, 16, 32]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class SLMParams:
"""This class defines the hyperameters and its ranges for tuning SLM algorithm.
SLMParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
ent_hidden_size (list): List of integer values.
rel_hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'ent_hidden_size': scope.int(hp.qloguniform('ent_hidden_size', np.log(8), np.log(512),1)),
'rel_hidden_size': scope.int(hp.qloguniform('rel_hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.ent_hidden_size = [8, 16, 32, 64, 128, 256]
# self.rel_hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class HoLEParams:
"""This class defines the hyperameters and its ranges for tuning HoLE algorithm.
HoLEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class RotatEParams:
"""This class defines the hyperameters and its ranges for tuning RotatE algorithm.
RotatEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
learning_rate (list): List of floating point values.
L1_flag (list): List of boolean values.
hidden_size (list): List of integer values.
batch_size (list): List of integer values.
epochs (list): List of integer values.
margin (list): List of floating point values.
optimizer (list): List of strings defining the optimization algorithm to be used.
sampling (list): List of string defining the sampling to be used for generating negative examples.
"""
def __init__(self):
self.search_space = {
'learning_rate': hp.loguniform('learning_rate', np.log(0.00001), np.log(0.1)),
'L1_flag': hp.choice('L1_flag', [True, False]),
'hidden_size': scope.int(hp.qloguniform('hidden_size', np.log(8), np.log(512),1)),
'batch_size': scope.int(hp.qloguniform('batch_size', np.log(8), np.log(4096),1)),
'margin': hp.uniform('margin', 0.0, 2.0),
'optimizer': hp.choice('optimizer', ["adam", "sgd", 'rms']),
'epochs': hp.choice('epochs', [10]) # always choose 10 training epochs.
}
# self.learning_rate = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
# self.L1_flag = [True, False]
# self.hidden_size = [8, 16, 32, 64, 128, 256]
# self.batch_size = [128, 256, 512]
# self.epochs = [2, 5, 10]
# self.margin = [0.4, 1.0, 2.0]
# self.optimizer = ["adam", "sgd", 'rms']
# self.sampling = ["uniform", "bern"]
class ConvEParams:
"""This class defines the hyperameters and its ranges for tuning ConvE algorithm.
ConvEParams defines all the possibel values to be tuned for the algorithm. User may
change these values directly for performing the bayesian optimization of the hyper-parameters
Args:
lambda | |
from flask import Flask, render_template, request, redirect, jsonify, \
url_for, flash
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Book, User
from flask import session as login_session
import random
import string
import urllib
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(PROJECT_ROOT, 'client_secrets.json')
app = Flask(__name__)
app.config['SECRET_KEY'] = 'super-secret-key'
CLIENT_ID = json.load(open(json_url))['web']['client_id']
APPLICATION_NAME = "Library App"
# Connect to Database and create database session
engine = create_engine('postgresql://catalog:<password>@localhost/library')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# Create anti-forgery state token
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
categories = session.query(Category).order_by(asc(Category.name))
return render_template('login.html', categories=categories, STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets(json_url, scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
# See if a user exists, if it doesn't make a new one
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 200px; height: 200px;border-radius: 100px;\
-webkit-border-radius: 100px;-moz-border-radius: 100px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# User Helper Functions
def createUser(login_session):
newUser = User(name=login_session['username'], email=login_session[
'email'], picture=login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserInfo(user_id):
user = session.query(User).filter_by(id=user_id).one()
return user
def getUserID(email):
user = session.query(User).filter_by(email=email).first()
if user is None:
return None
else:
return user.id
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# Reset the user's sesson.
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
flash('You are now logged out.')
return redirect(url_for('showCategories'))
else:
# For whatever reason, the given token was invalid.
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
# JSON APIs to view Books in the Library
# Return all books for a given category
@app.route('/category/<int:category_id>/books/JSON')
def categoryBooksJSON(category_id):
category = session.query(Category).filter_by(id=category_id).first()
if category is None:
return "{None found}"
books = session.query(Book).filter_by(
category_id=category_id).all()
return jsonify(Books=[b.serialize for b in books])
# Return a specific book in a given category
@app.route('/category/<int:category_id>/book/<int:book_id>/JSON')
def bookJSON(category_id, book_id):
book = session.query(Book).filter_by(id=book_id).first()
if book is None:
return "{None found}"
else:
return jsonify(Book=book.serialize)
# Return all available book categories
@app.route('/categories/JSON')
def categoriesJSON():
categories = session.query(Category).all()
return jsonify(Categories=[c.serialize for c in categories])
# Return all the books that are currently in the database
@app.route('/library/JSON')
def libraryJSON():
books = session.query(Book).all()
return jsonify(Book=[b.serialize for b in books])
# Show all categories
@app.route('/')
@app.route('/category/')
def showCategories():
categories = session.query(Category).order_by(asc(Category.name))
if 'username' not in login_session:
return render_template('library.html', categories=categories)
else:
return render_template('library.html', categories=categories,
picture=login_session['picture'],
name=login_session['username'])
# Create a new category
@app.route('/category/new/', methods=['GET', 'POST'])
def newCategory():
if 'username' not in login_session:
return redirect('/login')
if request.method == 'POST':
newCategory = Category(
name=request.form['name'].capitalize(),
user_id=login_session['user_id'])
session.add(newCategory)
flash("New Category '%s' Successfully Created" % newCategory.name)
session.commit()
return redirect(url_for('showCategories'))
else:
categories = session.query(Category).order_by(asc(Category.name))
return render_template('newCategory.html', categories=categories,
name=login_session['username'],
picture=login_session['picture'])
# Edit a category
@app.route('/category/<int:category_id>/edit/', methods=['GET', 'POST'])
def editCategory(category_id):
if 'username' not in login_session:
return redirect('/login')
categories = session.query(Category).order_by(asc(Category.name))
editedCategory = session.query(
Category).filter_by(id=category_id).one()
if editedCategory.user_id != login_session['user_id']:
return redirect('/login')
if request.method == 'POST':
if request.form['name']:
editedCategory.name = request.form['name'].capitalize()
flash("Category '%s' successfully edited." % editedCategory.name)
return redirect(url_for('showBooks', category_id=category_id))
else:
return render_template('editCategory.html', category=editedCategory,
categories=categories,
name=login_session['username'],
picture=login_session['picture'])
# Delete a category
@app.route('/category/<int:category_id>/delete/', methods=['GET', 'POST'])
def deleteCategory(category_id):
if 'username' not in login_session:
return redirect('/login')
categories = session.query(Category).order_by(asc(Category.name))
categoryToDelete = session.query(
Category).filter_by(id=category_id).one()
if categoryToDelete.user_id != login_session['user_id']:
return "<script>function myFunction() {alert('You are not authorized \
to delete this category. Please create your own category in order \
to delete.');}</script><body onload='myFunction()''>"
numbooks = session.query(Book).filter_by(
category_id=category_id).count()
if numbooks > 0:
flash("Cannot delete category with books in it! "
"Delete all books in '%s' category first."
% categoryToDelete.name)
return redirect(url_for('showBooks', category_id=category_id))
if request.method == 'POST':
session.delete(categoryToDelete)
flash("Category '%s' successfully deleted." % categoryToDelete.name)
session.commit()
return redirect(url_for('showCategories'))
else:
return render_template('deleteCategory.html',
category=categoryToDelete,
categories=categories,
name=login_session['username'],
picture=login_session['picture'])
# Show books
@app.route('/category/<int:category_id>/')
@app.route('/category/<int:category_id>/books/')
def showBooks(category_id):
categories = session.query(Category).order_by(asc(Category.name))
category = session.query(Category).filter_by(id=category_id).one()
creator = getUserInfo(category.user_id)
books = session.query(Book).filter_by(
category_id=category_id).all()
if 'username' not in login_session:
return render_template('books.html', books=books, category=category,
creator=creator, categories=categories)
else:
return render_template('books.html', books=books, category=category,
creator=creator, categories=categories,
picture=login_session['picture'],
name=login_session['username'],
user=login_session['user_id'])
# Preview a book
@app.route('/category/<int:category_id>/books/<int:book_id>/')
def previewBook(category_id, book_id):
categories = session.query(Category).order_by(asc(Category.name))
book = session.query(Book).filter_by(id=book_id).first()
if book is None:
flash("There is no book with that ID.")
return redirect(url_for('showCategories'))
creator = getUserInfo(book.user_id)
url = ("https://www.googleapis.com/books/v1/volumes?q=id:{0}"
"&key=<KEY>".format(book.isbn))
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if result['items']:
thebook = result['items'][0]
if 'subtitle' in thebook['volumeInfo']:
title = (thebook['volumeInfo']['title'] + ': ' +
thebook['volumeInfo']['subtitle'])
else:
title = thebook['volumeInfo']['title']
cover = thebook['volumeInfo']['imageLinks']['thumbnail']
if 'authors' in thebook['volumeInfo']:
authors = thebook['volumeInfo']['authors']
else:
authors = 'No author available.'
if 'description' in thebook['volumeInfo']:
description = thebook['volumeInfo']['description']
else:
description = 'No Description available for this book.'
if 'username' not in login_session:
return render_template('previewBook.html', book=book,
cover=cover,
authors=authors, description=description,
creator=creator, categories=categories)
else:
return render_template('previewBook.html', book=book,
cover=cover,
authors=authors, description=description,
picture=login_session['picture'],
name=login_session['username'],
creator=creator, categories=categories)
else:
flash("Cannot retrieve the information for '%s'." % book.title)
return redirect(url_for('showBooks', category_id=category_id))
# Search for a new book
@app.route('/search/', methods=['GET'])
def searchResults():
if request.method == 'GET':
entry = request.args.get('search')
if entry is None:
entry = ''
categories = session.query(Category).order_by(asc(Category.name))
url = ("https://www.googleapis.com/books/v1/volumes?q={0}"
"&key="
"<KEY>".format(urllib.quote(entry, safe='')))
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if 'error' in result or result['totalItems'] < 1:
books = []
else:
books = result['items']
if 'username' not in login_session:
return render_template('searchresults.html', books=books,
categories=categories, entry=entry)
else:
return render_template('searchresults.html', books=books,
picture=login_session['picture'],
name=login_session['username'],
categories=categories, entry=entry)
# Add a new book
@app.route('/book/<isbn>/new/', methods=['GET', 'POST'])
def newBook(isbn):
categories = session.query(Category).order_by(asc(Category.name))
if request.method == 'POST':
newBook = Book(title=request.form['title'], isbn=request.form['isbn'],
category_id=request.form['category_id'],
user_id=login_session['user_id'])
session.add(newBook)
session.commit()
flash("'%s' successfully added." % (newBook.title))
return redirect(url_for('showBooks',
category_id=request.form['category_id']))
else:
url = ("https://www.googleapis.com/books/v1/volumes?q=isbn:{0}"
"&key=<KEY>".format(isbn))
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
if 'error' in result or result['totalItems'] < 1:
flash('No Book was found with ISBN %s' % isbn)
return redirect(url_for('showCategories'))
else:
thebook = result['items'][0]
bookadded = session.query(Book).filter_by(isbn=isbn).first()
if 'subtitle' in thebook['volumeInfo']:
title = (thebook['volumeInfo']['title'] + ': ' +
thebook['volumeInfo']['subtitle'])
else:
title = thebook['volumeInfo']['title']
if ('imageLinks' in thebook['volumeInfo']
and 'thumbnail' in thebook['volumeInfo']['imageLinks']):
cover = thebook['volumeInfo']['imageLinks']['thumbnail']
else:
cover = '#'
if 'authors' in thebook['volumeInfo']:
authors = thebook['volumeInfo']['authors']
else:
authors = ''
if 'description' in thebook['volumeInfo']:
description = thebook['volumeInfo']['description']
else:
description = 'No Description available for this book.'
if 'username' not in login_session:
return render_template('newbook.html', title=title, cover=cover,
authors=authors, description=description,
categories=categories,
bookadded=bookadded, isbn=isbn)
else:
return render_template('newbook.html', title=title, cover=cover,
authors=authors, description=description,
picture=login_session['picture'],
name=login_session['username'],
categories=categories, bookadded=bookadded,
isbn=isbn)
# Delete a book
@app.route('/book/<book_id>/delete/', methods=['GET', 'POST'])
def deleteBook(book_id):
if 'username' not in login_session:
return redirect('/login')
categories = session.query(Category).order_by(asc(Category.name))
bookToDelete | |
constructs "
f"spanning axes {names}"
)
if log:
logger.info("\n".join(log))
if not _return_axis_map:
return False
else:
# Map item axes in the two instances
axes0_to_axes1[axes0] = axes1
for axes0, axes1 in axes0_to_axes1.items():
for axis0, axis1 in zip(axes0, axes1):
if axis0 in axis0_to_axis1 and axis1 != axis0_to_axis1[axis0]:
logger.info(
f"{self.__class__.__name__}: Ambiguous axis mapping "
f"({self.domain_axis_identity(axes0)} -> both "
f"{other.domain_axis_identity(axis1)} and "
f"{other.domain_axis_identity(axis0_to_axis1[axis0])})"
) # pragma: no cover
if not _return_axis_map:
return False
elif (
axis1 in axis1_to_axis0 and axis0 != axis1_to_axis0[axis1]
):
logger.info(
f"{self.__class__.__name__}: Ambiguous axis mapping "
f"({self.domain_axis_identity(axis0)} -> both "
f"{self.domain_axis_identity(axis1_to_axis0[axis0])} "
f"and {other.domain_axis_identity(axes1)})"
) # pragma: no cover
if not _return_axis_map:
return False
axis0_to_axis1[axis0] = axis1
axis1_to_axis0[axis1] = axis0
if _return_axis_map:
return axis0_to_axis1
# ------------------------------------------------------------
# Constructs with no arrays
# ------------------------------------------------------------
for construct_type in self._non_array_constructs:
if not getattr(self, "_equals_" + construct_type)(
other,
rtol=rtol,
atol=atol,
verbose=verbose,
ignore_type=_ignore_type,
axis1_to_axis0=axis1_to_axis0,
key1_to_key0=key1_to_key0,
):
return False
# ------------------------------------------------------------
# Still here? Then the two objects are equal
# ------------------------------------------------------------
return True
def filter(
self,
axis_mode="and",
property_mode="and",
todict=False,
cached=None,
_identity_config={},
**filters,
):
"""Select metadata constructs by a chain of filters.
This method allows multiple filters defined by the
"filter_by_*" methods to be chained in an alternative manner
to calling the individual methods in sequence.
For instance, to select the domain axis constructs with size
73 or 96
>>> c2 = c.filter(filter_by_type=['domain_axis'],
... filter_by_size=[73, 96])
is equivalent to
>>> c2 = c.filter_by_type('domain_axis')
>>> c2 = c2.filter_by_size(73, 96)
When the results are requested as a dictionary as opposed to a
`Constructs` object (see the *todict* parameter), using the
`filter` method to call two or more filters is faster than
calling the individual methods in sequence. For instance
>>> d = c.filter(filter_by_type=['dimension_coordinate'],
... filter_by_identity=['time'],
... todict=True)
is equivalent to, but faster than
>>> c2 = c.filter_by_type('dimension_coordinate')
>>> d = c2.filter_by_identity('time', todict=True)
.. versionadded:: (cfdm) 1.8.9.0
.. seealso:: `filter_by_axis`, `filter_by_data`,
`filter_by_identity`, `filter_by_key`,
`filter_by_measure`, `filter_by_method`,
`filter_by_identity`, `filter_by_naxes`,
`filter_by_ncdim`, `filter_by_ncvar`,
`filter_by_property`, `filter_by_type`,
`filters_applied`, `inverse_filter`, `unfilter`,
`clear_filters_applied`
:Parameters:
filters: optional
Keyword arguments defining the filters to apply. Each
filter keyword defines a filter method, and its value
provides the arguments for that method.
For instance, ``filter_by_type=['domain_axis']`` will
cause the `filter_by_type` method to be called with
positional arguments ``*['domain_axis']``.
The filters are applied in the same order that the
keyword arguments appear.
Valid keywords and their values are:
====================== ==============================
Keyword Value
====================== ==============================
``filter_by_axis`` A sequence as expected by the
*axes* parameter of
`filter_by_axis`
``filter_by_identity`` A sequence as expected by the
*identities* parameter of
`filter_by_identity`
``filter_by_key`` A sequence as expected by the
*keys* parameter of
`filter_by_key`
``filter_by_measure`` A sequence as expected by the
*measures* parameter of
`filter_by_measure`
``filter_by_method`` A sequence as expected by the
*methods* parameter of
`filter_by_method`
``filter_by_naxes`` A sequence as expected by the
*naxes* parameter of
`filter_by_naxes`
``filter_by_ncdim`` A sequence as expected by the
*ncdims* parameter of
`filter_by_ncdim`
``filter_by_ncvar`` A sequence as expected by the
*ncvars* parameter of
`filter_by_ncvar`
``filter_by_size`` A sequence as expected by the
*sizes* parameter of
`filter_by_size`
``filter_by_type`` A sequence as expected by the
*types* parameter of
`filter_by_type`
``filter_by_property`` A dictionary as expected by
the *properties* parameter of
`filter_by_property`
``filter_by_data`` Any value is allowed which
will be ignored, as
`filter_by_data` does not have
any positional arguments.
====================== ==============================
axis_mode: `str`, optional
Provide a value for the *axis_mode* parameter of the
`filter_by_axis` method. By default *axis_mode* is
``'and'``.
property_mode: `str`, optional
Provide a value for the *property_mode* parameter of
the `filter_by_property` method. By default
*property_mode* is ``'and'``.
{{todict: `bool`, optional}}
{{cached: optional}}
_identity_config: optional
Provide a value for the *_config* parameter of the
`filter_by_identity` method.
:Returns:
`Constructs` or `dict` or *cached*
The selected constructs, or a cached valued.
"""
if cached is not None:
return cached
if not filters:
out, _ = self._filter_preprocess(self, todict=todict)
return out
out = self
for method, args in filters.items():
try:
filter_method = getattr(self, "_" + method)
except AttributeError:
raise TypeError(
f"{self.__class__.__name__}.filter() has an unexpected "
f"keyword argument {method!r}"
)
args = self._filter_parse_args(
method,
args,
todict=todict,
axis_mode=axis_mode,
property_mode=property_mode,
_identity_config=_identity_config,
)
out = filter_method(out, *args)
return out
def _filter_by_axis(
self,
arg,
axes,
todict,
axis_mode,
):
"""Worker function for `filter_by_axis` and `filter`.
See `filter_by_axis` for details.
.. versionadded:: (cfdm) 1.8.9.0
"""
# Parse the axis_mode parameter
_or = False
_exact = False
_subset = False
if axis_mode in ("and", None):
pass
elif axis_mode == "or":
_or = True
elif axis_mode == "exact":
_exact = True
elif axis_mode == "subset":
_subset = True
elif axes:
raise ValueError(
f"{self.__class__.__name__}.filter_by_axis() has incorrect "
f"'axis_mode' value {axis_mode!r}. "
"Expected one of 'or', 'and', 'exact', 'subset'"
)
filter_applied = {"filter_by_axis": (axes, {"axis_mode": axis_mode})}
if not axes:
# Return all constructs that could have data if no axes
# have been provided
return self._filter_by_data(
arg, None, todict, filter_applied=filter_applied
)
out, pop = self._filter_preprocess(
arg,
filter_applied=filter_applied,
todict=todict,
)
# Convert values to domain axis construct identifiers, if any
# can be.
axes2 = self._filter_convert_to_domain_axis(
axes, check_axis_identities=True
)
if not axes2:
# No arguments found unique domain axis constructs
if isinstance(out, dict):
out = {}
else:
out._clear()
return out
axes = set(axes2)
data_axes = self._construct_axes
for cid in tuple(out):
x = data_axes.get(cid)
if x is None:
pop(cid)
continue
ok = True
if _exact:
if set(x) != axes:
ok = False
elif _subset:
if not set(x).issubset(axes):
ok = False
else:
for axis_key in axes:
ok = axis_key in x
if _or:
if ok:
break
elif not ok:
break
if not ok:
pop(cid)
return out
def filter_by_axis(
self,
*axes,
axis_mode="and",
todict=False,
cached=None,
):
"""Select metadata constructs by axes spanned by their data.
.. versionadded:: (cfdm) 1.7.0
.. seealso:: `filter`, `filters_applied`, `inverse_filter`,
`clear_filters_applied`, `unfilter`
:Parameters:
mode: `str`
Deprecated at version 1.8.9.0. Use the *axis_mode*
parameter instead.
axes: optional
Select constructs that whose data spans the domain
axis constructs specified by the given values. A value
may be:
* A domain axis construct identifier, with or without
the ``'key%'`` prefix.
* The unique domain axis construct spanned by all of
the 1-d coordinate constructs returned by, for a
given ``value``,
``c.filter(filter_by_type=["dimension_coordinate",
"auxiliary_coordinate"], filter_by_naxes=(1,),
filter_by_identity=(value,))``. See `filter` for
details.
* If there is an associated `Field` data array and a
value matches the integer position of an array
dimension, then the corresponding domain axis
construct is specified.
* A unique domain axis construct identity, defined by
its `!identities` methods. In this case a value may
be any object that can match via the ``==``
operator, or a `re.Pattern` object that matches via
its `~re.Pattern.search` method.
If no axes are provided then all constructs that do,
or could have data, spanning any domain axes
constructs, are selected.
axis_mode: `str`
Define the relationship between the given domain axes
and the constructs' data.
=========== =========================================
*axis_mode* Description
=========== =========================================
``'and'`` A construct is selected if it spans *all*
of the given domain axes, *and possibly
others*.
``'or'`` A construct is selected if it spans *any*
of the domain axes, *and possibly
others*.
``exact`` A construct is selected if it spans *all*
of the given domain axes, *and no
others*.
``subset`` A construct is selected if it spans *a
subset* of the given domain axes, *and no
others*.
=========== =========================================
By default *axis_mode* is ``'and'``.
{{todict: `bool`, optional}}
.. versionadded:: (cfdm) 1.8.9.0
{{cached: optional}}
.. versionadded:: (cfdm) 1.8.9.0
:Returns:
`Constructs` or `dict` or *cached*
The selected constructs, or a cached valued.
**Examples:**
Select constructs whose data spans the "domainaxis1" domain
axis construct:
>>> d = c.filter_by_axis('domainaxis1')
Select constructs whose data does not span the "domainaxis2"
domain axis construct:
>>> d = c.filter_by_axis('domainaxis2').inverse_filter()
Select constructs whose data spans the "domainaxis1", but not
the "domainaxis2" domain axis constructs:
>>> d = c.filter_by_axis('domainaxis1')
>>> d = d.filter_by_axis('domainaxis2')
>>> d = d.inverse_filter(1)
Select constructs whose data spans the "domainaxis1" or the
"domainaxis2" domain axis constructs:
>>> d = c.filter_by_axis('domainaxis1', 'domainaxis2', axis_mode="or")
"""
if cached is not None:
return cached
return self._filter_by_axis(self, axes, todict, axis_mode)
def _filter_by_data(self, arg, | |
<gh_stars>1-10
#!/usr/bin/env python
"""A variety of methods to solve first order ordinary differential equations.
AUTHOR:
<NAME> <<EMAIL>>
Gordon College
Based Octave functions written in the spring of 1999
Python version: March 2008, October 2008
"""
import numpy
#-----------------------------------------------------------------------------
def euler( f, x0, t ):
"""Euler's method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = euler(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
"""
n = len( t )
x = numpy.array( [x0] * n )
for i in xrange( n - 1 ):
x[i+1] = x[i] + ( t[i+1] - t[i] ) * f( x[i], t[i] )
return x
#-----------------------------------------------------------------------------
def heun( f, x0, t ):
"""Heun's method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = heun(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
"""
n = len( t )
x = numpy.array( [x0] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( x[i], t[i] )
k2 = h * f( x[i] + k1, t[i+1] )
x[i+1] = x[i] + ( k1 + k2 ) / 2.0
return x
#-----------------------------------------------------------------------------
def rk2a( f, x0, t ):
"""Second-order Runge-Kutta method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = rk2a(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
NOTES:
This version is based on the algorithm presented in "Numerical
Analysis", 6th Edition, by <NAME> Faires, Brooks-Cole, 1997.
"""
n = len( t )
x = numpy.array( [ x0 ] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( x[i], t[i] ) / 2.0
x[i+1] = x[i] + h * f( x[i] + k1, t[i] + h / 2.0 )
return x
#-----------------------------------------------------------------------------
def rk2b( f, x0, t ):
"""Second-order Runge-Kutta method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = rk2b(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
NOTES:
This version is based on the algorithm presented in "Numerical
Mathematics and Computing" 4th Edition, by <NAME> Kincaid,
Brooks-Cole, 1999.
"""
n = len( t )
x = numpy.array( [ x0 ] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( x[i], t[i] )
k2 = h * f( x[i] + k1, t[i+1] )
x[i+1] = x[i] + ( k1 + k2 ) / 2.0
return x
#-----------------------------------------------------------------------------
def rk4( f, x0, t ):
"""Fourth-order Runge-Kutta method to solve x' = f(x,t) with x(t[0]) = x0.
USAGE:
x = rk4(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
entry in t array. If a system is being solved, x will be
an array of arrays.
"""
n = len( t )
x = numpy.array( [ x0 ] * n )
for i in xrange( n - 1 ):
h = t[i+1] - t[i]
k1 = h * f( x[i], t[i] )
k2 = h * f( x[i] + 0.5 * k1, t[i] + 0.5 * h )
k3 = h * f( x[i] + 0.5 * k2, t[i] + 0.5 * h )
k4 = h * f( x[i] + k3, t[i+1] )
x[i+1] = x[i] + ( k1 + 2.0 * ( k2 + k3 ) + k4 ) / 6.0
return x
#-----------------------------------------------------------------------------
def rk45( f, x0, t ):
"""Fourth-order Runge-Kutta method with error estimate.
USAGE:
x, err = rk45(f, x0, t)
INPUT:
f - function of x and t equal to dx/dt. x may be multivalued,
in which case it should a list or a NumPy array. In this
case f must return a NumPy array with the same dimension
as x.
x0 - the initial condition(s). Specifies the value of x when
t = t[0]. Can be either a scalar or a list or NumPy array
if a system of equations is being solved.
t - list or NumPy array of t values to compute solution at.
t[0] is the the initial condition point, and the difference
h=t[i+1]-t[i] determines the step size h.
OUTPUT:
x - NumPy array containing solution values corresponding to each
| |
import pygame
class Board:
def __init__(self):
self.turn = 0
self.check = 0
self.checkmate = False
self.stalemate = False
self.draw = False
self.last_move = None
self.promotion = False
self.black_promotion = pygame.image.load(".\\Pieces\\black_promotion.png")
self.white_promotion = pygame.image.load(".\\Pieces\\white_promotion.png")
self.image = pygame.image.load(".\\Pieces\\board.png")
self.board = self.start()
def start(self):
board = []
for i in range(8):
line = []
for j in range(8):
x = 25 + j * 72
y = 10 + i * 71
to_append = [(x, y), 0]
if i == 0 or i == 7:
if i == 0:
color = ".\\Pieces\\black"
team = 1
else:
color = ".\\Pieces\\white"
team = 0
if j == 0 or j == 7:
to_append = [(x, y), Rook(team, f"{color}_rook.png", j % 6)]
elif j == 1 or j == 6:
to_append = [(x, y), Knight(team, f"{color}_knight.png")]
elif j == 2 or j == 5:
to_append = [(x, y), Bishop(team, f"{color}_bishop.png")]
elif j == 3:
to_append = [(x, y), Queen(team, f"{color}_queen.png")]
else:
to_append = [(x, y), King(team, f"{color}_king.png")]
elif i == 1:
to_append = [(x, y), Pawn(1, ".\\Pieces\\black_pawn.png")]
elif i == 6:
to_append = [(x, y), Pawn(0, ".\\Pieces\\white_pawn.png")]
line.append(to_append)
board.append(line)
return board
def get_all_moves(self, team):
moves = []
king_sequences = ((1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1))
y = 0
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team == team:
if type(row).__name__ != "King":
for move in row.get_moves(x, y, self.board):
moves.append(move)
else:
index_x, index_y = x, y
x += 1
y += 1
for move in king_sequences:
try:
self.board[index_y + move[0]][index_x + move[1]]
moves.append((index_y + move[0], index_x + move[1]))
except AttributeError:
moves.append((index_y + move[0], index_x + move[1]))
except IndexError:
pass
return moves
def reset_en_passant(self):
for line in self.board:
for _, row in line:
if row != 0 and type(row).__name__ == "Pawn":
row.en_passant = False
def check_check(self, all_moves):
y = 0
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team == self.turn and type(row).__name__ == "King":
index_x, index_y = x, y
x += 1
y += 1
for move in all_moves:
if move == (index_y, index_x):
self.check += 1
def get_king_legal_moves(self, index_x, index_y):
all_moves = self.get_all_moves((self.turn + 1) % 2)
moves = self.board[index_y][index_x][1].get_moves(index_x, index_y, self.board, all_moves)
y = 0
remove = []
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team != self.turn and type(row).__name__ == "Pawn":
if row.team == 0:
remove.append((y - 1, x))
if y == 6:
remove.append((y - 2, x))
else:
remove.append((y + 1, x))
if y == 1:
remove.append((y + 2, x))
x += 1
y += 1
i = 0
new_all_moves = []
while i < len(all_moves):
if all_moves[i] in remove:
remove.remove(all_moves[i])
else:
new_all_moves.append(all_moves[i])
i += 1
y = 0
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team != self.turn:
if type(row).__name__ == "Pawn":
if self.turn == 0:
if x + 1 < 8:
new_all_moves.append((y + 1, x + 1))
if x - 1 >= 0:
new_all_moves.append((y + 1, x - 1))
else:
if x + 1 < 8:
new_all_moves.append((y - 1, x + 1))
if x - 1 >= 0:
new_all_moves.append((y - 1, x - 1))
x += 1
y += 1
moves = [x for x in moves if x not in new_all_moves]
aux_all_moves = []
for move in moves:
try:
if self.board[move[0]][move[1]][1].team != self.turn:
aux_all_moves.append(move)
except AttributeError:
aux_all_moves.append(move)
king = self.board[index_y][index_x][1]
self.board[index_y][index_x][1] = 0
aux_check = self.check
final = []
for move in aux_all_moves:
self.check = 0
previous = self.board[move[0]][move[1]][1]
self.board[move[0]][move[1]][1] = king
self.check_check(self.get_all_moves((self.turn + 1) % 2))
if self.check == 0:
final.append(move)
self.board[move[0]][move[1]][1] = previous
self.check = aux_check
self.board[index_y][index_x][1] = king
return final
def get_legal_moves(self, x, y, selected):
piece_moves = selected.get_moves(x, y, self.board)
aux_moves = []
for move in piece_moves:
try:
if self.board[move[0]][move[1]][1].team != self.turn:
aux_moves.append(move)
except AttributeError:
aux_moves.append(move)
if self.check == 0:
moves = []
self.board[y][x][1] = 0
for move in aux_moves:
previous = self.board[move[0]][move[1]][1]
self.board[move[0]][move[1]][1] = selected
self.check_check(self.get_all_moves((self.turn + 1) % 2))
if self.check == 0:
moves.append(move)
self.check = 0
self.board[move[0]][move[1]][1] = previous
self.board[y][x][1] = selected
elif self.check == 1:
moves = []
direction = self.get_attack_direction()
aux_moves = [x for x in aux_moves if x in direction]
self.board[y][x][1] = 0
for move in aux_moves:
previous = self.board[move[0]][move[1]][1]
self.check = 0
self.board[move[0]][move[1]][1] = selected
self.check_check(self.get_all_moves((self.turn + 1) % 2))
if self.check == 0:
moves.append(move)
self.board[move[0]][move[1]][1] = previous
self.check = 1
self.board[y][x][1] = selected
else:
moves = []
return moves
def get_attack_direction(self):
moves = [self.last_move]
y = 0
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team == self.turn and type(row).__name__ == "King":
index_x, index_y = x, y
x += 1
y += 1
horizontal_distance = self.last_move[1] - index_x
vertical_distance = self.last_move[0] - index_y
if vertical_distance == 0:
if horizontal_distance > 0:
for i in range(1, horizontal_distance):
moves.append((index_y, index_x + i))
else:
for i in range(1, abs(horizontal_distance)):
moves.append((index_y, index_x - i))
elif horizontal_distance == 0:
if vertical_distance > 0:
for i in range(1, vertical_distance):
moves.append((index_y + i, index_x))
else:
for i in range(1, abs(vertical_distance)):
moves.append((index_y - i, index_x))
else:
if vertical_distance > 0 and horizontal_distance > 0:
for i in range(1, vertical_distance):
moves.append((index_y + i, index_x + i))
elif vertical_distance < 0 and horizontal_distance < 0:
for i in range(1, abs(vertical_distance)):
moves.append((index_y - i, index_x - i))
elif vertical_distance > 0 and horizontal_distance < 0:
for i in range(1, vertical_distance):
moves.append((index_y + i, index_x - i))
else:
for i in range(1, horizontal_distance):
moves.append((index_y - i, index_x + i))
return moves
def check_checkmate_or_stalemate(self):
y = 0
available_moves = 0
for line in self.board:
x = 0
for _, row in line:
if row != 0 and row.team == self.turn:
if type(row).__name__ == "King":
available_moves += len(self.get_king_legal_moves(x, y))
else:
available_moves += len(self.get_legal_moves(x, y, self.board[y][x][1]))
x += 1
y += 1
if available_moves == 0:
if self.check > 0:
self.checkmate = True
else:
self.stalemate = True
def check_draw(self):
white = 0
black = 0
draws = ((1, 1), (2, 1), (1, 2))
for line in self.board:
for _, row in line:
if row != 0:
if type(row).__name__ in ("Queen", "Rook", "Pawn"):
return
if row.team == 0:
white += 1
else:
black += 1
if (black, white) in draws:
self.draw = True
class Piece:
def __init__(self, value, team, image):
self.value = value
self.team = team
self.image = pygame.image.load(image)
def remove_negatives(self, moves):
new_moves = []
for move in moves:
if move[0] >= 0 and move[1] >= 0:
new_moves.append(move)
return new_moves
class Pawn(Piece):
def __init__(self, team, image):
super().__init__(1, team, image)
self.en_passant = False
def get_moves(self, pos_x, pos_y, board):
moves = []
increment = -1
if self.team == 1:
increment = 1
if 0 <= pos_y + increment < 8:
if board[pos_y + increment][pos_x][1] == 0:
moves.append((pos_y + increment, pos_x))
if self.team == 0 and pos_y == 6 and board[pos_y - 2][pos_x][1] == 0:
moves.append((pos_y - 2, pos_x))
if self.team == 1 and pos_y == 1 and board[pos_y + 2][pos_x][1] == 0:
moves.append((pos_y + 2, pos_x))
if pos_x - 1 >= 0:
if (board[pos_y + increment][pos_x - 1][1] != 0 and board[pos_y + increment][pos_x - 1][1].team != self.team) or \
(board[pos_y][pos_x - 1][1] != 0 and board[pos_y][pos_x - 1][1].team != self.team and \
type(board[pos_y][pos_x - 1][1]).__name__ == "Pawn" and board[pos_y][pos_x - 1][1].en_passant):
moves.append((pos_y + increment, pos_x - 1))
if pos_x + 1 < 8:
if (board[pos_y + increment][pos_x + 1][1] != 0 and board[pos_y + increment][pos_x + 1][1].team != self.team) or \
(board[pos_y][pos_x + 1][1] != 0 and board[pos_y][pos_x + 1][1].team != self.team and \
type(board[pos_y][pos_x + 1][1]).__name__ == "Pawn" and board[pos_y][pos_x + 1][1].en_passant):
moves.append((pos_y + increment, pos_x + 1))
return super().remove_negatives(moves)
def do_en_passant(self, pos_x, pos_y, board):
if self.team == 0:
| |
- {'.notdef'})
return [g for g in diff[:] if g != '.notdef']
class NbspAndSpaceSameWidth(Fixer):
def get_shell_command(self):
return "fontbakery-fix-nbsp.py {}".format(self.fontpath)
def getGlyph(self, uchar):
for table in self.font['cmap'].tables:
if not (table.platformID == 3 and table.platEncID in [1, 10]):
continue
if uchar in table.cmap:
return table.cmap[uchar]
return None
def addCFFGlyph(self, glyphName=None, program=None, private=None,
globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
from fontTools.misc.psCharStrings import T2CharString
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def addGlyph(self, uchar, glyph):
# Add to glyph list
glyphOrder = self.font.getGlyphOrder()
# assert glyph not in glyphOrder
glyphOrder.append(glyph)
self.font.setGlyphOrder(glyphOrder)
# Add horizontal metrics (to zero)
self.font['hmtx'][glyph] = [0, 0]
# Add to cmap
for table in self.font['cmap'].tables:
if not (table.platformID == 3 and table.platEncID in [1, 10]):
continue
if not table.cmap: # Skip UVS cmaps
continue
assert uchar not in table.cmap
table.cmap[uchar] = glyph
# Add empty glyph outline
if 'glyf' in self.font:
self.font['glyf'].glyphs[glyph] = ttLib.getTableModule('glyf').Glyph()
else:
cff = self.font['CFF '].cff
self.addCFFGlyph(
glyphName=glyph,
private=cff.topDictIndex[0].Private,
globalSubrs=cff.GlobalSubrs,
charStringsIndex=cff.topDictIndex[0].CharStrings.charStringsIndex,
# charStringsIndex=cff.topDictIndex[0].CharStrings.charStrings.charStringsIndex,
topDict=cff.topDictIndex[0],
charStrings=cff.topDictIndex[0].CharStrings
)
import ipdb; ipdb.set_trace()
return glyph
def getWidth(self, glyph):
return self.font['hmtx'][glyph][0]
def setWidth(self, glyph, width):
self.font['hmtx'][glyph] = (width, self.font['hmtx'][glyph][1])
def glyphHasInk(self, name):
"""Checks if specified glyph has any ink.
That is, that it has at least one defined contour associated. Composites are
considered to have ink if any of their components have ink.
Args:
glyph_name: The name of the glyph to check for ink.
Returns:
True if the font has at least one contour associated with it.
"""
glyph = self.font['glyf'].glyphs[name]
glyph.expand(self.font['glyf'])
if not glyph.isComposite():
if glyph.numberOfContours == 0:
return False
(coords, _, _) = glyph.getCoordinates(self.font['glyf'])
# you need at least 3 points to draw
return len(coords) > 2
# composite is blank if composed of blanks
# if you setup a font with cycles you are just a bad person
for glyph_name in glyph.getComponentNames(glyph.components):
if self.glyphHasInk(glyph_name):
return True
return False
def fix(self, check=False):
retval = False
fontfile = os.path.basename(self.fontpath)
space = self.getGlyph(0x0020)
nbsp = self.getGlyph(0x00A0)
if space not in ["space", "uni0020"]:
logger.error('ER: {}: Glyph 0x0020 is called "{}": Change to "space" or "uni0020"'.format(fontfile, space))
if nbsp not in ["nbsp", "uni00A0"]:
logger.error('ER: {}: Glyph 0x00A0 is called "{}": Change to "nbsp" or "uni00A0"'.format(fontfile, nbsp))
isNbspAdded = isSpaceAdded = False
if not nbsp:
isNbspAdded = True
try:
nbsp = self.addGlyph(0x00A0, 'nbsp')
except Exception as ex:
logger.error('ER: {}'.format(ex))
return False
if not space:
isSpaceAdded = True
try:
space = self.addGlyph(0x0020, 'space')
except Exception as ex:
logger.error('ER: {}'.format(ex))
return False
for g in [space, nbsp]:
if self.glyphHasInk(g):
if check:
logger.error('ER: {}: Glyph "{}" has ink. Delete any contours or components'.format(fontfile, g))
else:
logger.error('ER: {}: Glyph "{}" has ink. Fixed: Overwritten by an empty glyph'.format(fontfile, g))
#overwrite existing glyph with an empty one
self.font['glyf'].glyphs[g] = ttLib.getTableModule('glyf').Glyph()
retval = True
spaceWidth = self.getWidth(space)
nbspWidth = self.getWidth(nbsp)
if spaceWidth != nbspWidth or nbspWidth < 0:
self.setWidth(nbsp, min(nbspWidth, spaceWidth))
self.setWidth(space, min(nbspWidth, spaceWidth))
if isNbspAdded:
if check:
msg = 'ER: {} space {} nbsp None: Add nbsp with advanceWidth {}'
else:
msg = 'ER: {} space {} nbsp None: Added nbsp with advanceWidth {}'
logger.error(msg.format(fontfile, spaceWidth, spaceWidth))
if isSpaceAdded:
if check:
msg = 'ER: {} space None nbsp {}: Add space with advanceWidth {}'
else:
msg = 'ER: {} space None nbsp {}: Added space with advanceWidth {}'
logger.error(msg.format(fontfile, nbspWidth, nbspWidth))
if nbspWidth > spaceWidth and spaceWidth >= 0:
if check:
msg = 'ER: {} space {} nbsp {}: Change space advanceWidth to {}'
else:
msg = 'ER: {} space {} nbsp {}: Fixed space advanceWidth to {}'
logger.error(msg.format(fontfile, spaceWidth, nbspWidth, nbspWidth))
else:
if check:
msg = 'ER: {} space {} nbsp {}: Change nbsp advanceWidth to {}'
else:
msg = 'ER: {} space {} nbsp {}: Fixed nbsp advanceWidth to {}'
logger.error(msg.format(fontfile, spaceWidth, nbspWidth, spaceWidth))
return True
logger.info('OK: {} space {} nbsp {}'.format(fontfile, spaceWidth, nbspWidth))
return retval
class GaspFixer(Fixer):
def get_shell_command(self):
SCRIPTPATH = 'fontbakery-fix-gasp.py'
return "$ {0} --set={1} {2}".format(SCRIPTPATH, 15, self.fontpath)
def fix(self, path, value=15):
try:
table = self.font.get('gasp')
table.gaspRange[65535] = value
return True
except:
logger.error('ER: {}: no table gasp'.format(path))
return
def show(self, path):
try:
table = self.font.get('gasp')
except:
logger.error('ER: {}: no table gasp'.format(path))
return
try:
logger.info(self.font.get('gasp').gaspRange[65535])
except IndexError:
logger.error('ER: {}: no index 65535'.format(path))
class Vmet(Fixer):
SCRIPTPATH = 'fontbakery-fix-vertical-metrics.py'
def loadfont(self, fontpath):
return ttLib.TTFont() # return for this fixer empty TTFont
def __init__(self, testcase, fontpath):
super(Vmet, self).__init__(testcase, fontpath)
d = os.path.dirname(fontpath)
directory = UpstreamDirectory(d)
self.fonts = [os.path.join(d, f) for f in directory.BIN]
def get_shell_command(self):
return "{} --autofix {}".format(Vmet.SCRIPTPATH, ' '.join(self.fonts))
def apply(self, override_origin=False):
from bakery_cli.ttfont import Font
ymin = 0
ymax = 0
for f in self.fonts:
metrics = Font(f)
font_ymin, font_ymax = metrics.get_bounding()
ymin = min(font_ymin, ymin)
ymax = max(font_ymax, ymax)
for f in self.fonts:
fixer = VmetFixer(self.testcase, f)
fixer.apply(ymin, ymax, override_origin=override_origin)
command = "$ {0} {1}".format(Vmet.SCRIPTPATH, ' '.join(self.fonts))
logger.debug(command)
import StringIO
for l in StringIO.StringIO(metricview(self.fonts)):
logger.debug(l)
class VmetFixer(Fixer):
def fix(self, ymin, ymax):
from bakery_cli.ttfont import AscentGroup, DescentGroup, LineGapGroup
AscentGroup(self.font).set(ymax)
DescentGroup(self.font).set(ymin)
LineGapGroup(self.font).set(0)
# self.font['head'].unitsPerEm = ymax
return True
def fontTools_to_dict(font):
fontdata = {
'names': [
{'nameID': rec.nameID,
'platformID': rec.platformID,
'langID': rec.langID,
'string': rec.string.decode("utf_16_be")
if rec.isUnicode() else rec.string,
'platEncID': rec.platEncID} for rec in font['name'].names
],
'OS/2': {
'fsSelection': font['OS/2'].fsSelection,
'usWeightClass': font['OS/2'].usWeightClass,
},
'head': {
'macStyle': font['head'].macStyle,
},
'post': {
'italicAngle': font['post'].italicAngle
}
}
if 'CFF ' in font:
fontdata['CFF'] = {
'Weight': font['CFF '].cff.topDictIndex[0].Weight
}
return fontdata
class FamilyAndStyleNameFixer(Fixer):
def get_shell_command(self):
return "fontbakery-fix-opentype-names.py {}".format(self.fontpath)
def getOrCreateNameRecord(self, nameId, val):
logger.error('NAMEID {}: "{}"'.format(nameId, val))
return
result_namerec = None
for k, p in [[1, 0], [3, 1]]:
result_namerec = self.font['name'].getName(nameId, k, p)
if result_namerec:
result_namerec.string = (val or '').encode(result_namerec.getEncoding())
if result_namerec:
return result_namerec
ot_namerecord = NameRecord()
ot_namerecord.nameID = nameId
ot_namerecord.platformID = 3
ot_namerecord.langID = 0x409
# When building a Unicode font for Windows, the platform ID
# should be 3 and the encoding ID should be 1
ot_namerecord.platEncID = 1
ot_namerecord.string = (val or '').encode(ot_namerecord.getEncoding())
self.font['name'].names.append(ot_namerecord)
return ot_namerecord
def fix(self):
# Convert huge and complex fontTools to config python dict
fontdata = fontTools_to_dict(self.font)
fontdata = clean_name_values(fontdata)
familyname = ''
for rec in fontdata['names']:
if rec['nameID'] == 1:
familyname = rec['string']
break
fontdata = fix_all_names(fontdata, familyname)
logger.error('```')
logger.error(os.path.basename(self.fontpath))
logger.error('')
for field in fontdata['names']:
self.getOrCreateNameRecord(field['nameID'], field['string'])
logger.error('```')
return True
class RemoveNameRecordWithOpyright(Fixer):
def containsSubstr(self, namerecord, substr):
string = namerecord.string.decode(namerecord.getEncoding())
return bool(substr in string)
def fix(self):
records = []
for record in self.font['name'].names:
if self.containsSubstr(record, 'opyright') and record.nameID == 10:
continue
records.append(record)
self.font['name'].names = records
return True
class RemoveItemsWithPlatformID1(Fixer):
def fix(self):
records = []
for record in self.font['name'].names:
if record.platformID == 1:
continue
records.append(record)
self.font['name'].names = records
return True
DATA_DIR = os.path.join(os.path.dirname(__file__), '..', 'Data')
DATA_DIR = os.path.abspath(DATA_DIR)
class ReplaceLicenseURL(Fixer):
def get_licenseurl_filename(self):
return None
def get_licensecontent_filename(self):
return None
def validate(self):
path = os.path.join(DATA_DIR, self.get_licensecontent_filename())
licenseText = open(path).read()
path = os.path.join(DATA_DIR, self.get_licenseurl_filename())
placeholder = open(path).read().strip()
for field in self.font['name'].names:
if field.nameID == 14:
string = field.string.decode(field.getEncoding())
if string != placeholder:
return placeholder
if field.nameID == 13:
string = field.string.decode(field.getEncoding())
if licenseText.strip() in string:
return placeholder
return
def fix(self):
placeholder = self.validate()
if not placeholder:
return
for nameRecord in self.font['name'].names:
if nameRecord.nameID == 14:
nameRecord.string = placeholder.encode(placeholder.getEncoding())
return True
class ReplaceOFLLicenseURL(ReplaceLicenseURL):
def get_licenseurl_filename(self):
return 'OFL.url'
def get_licensecontent_filename(self):
return 'OFL.license'
class ReplaceApacheLicenseURL(ReplaceLicenseURL):
def get_licenseurl_filename(self):
return 'APACHE.url'
def get_licensecontent_filename(self):
return 'APACHE.license'
class ReplaceLicenseWithShortline(Fixer):
def get_placeholder(self):
path = self.get_placeholder_filename()
with open(os.path.join(DATA_DIR, path)) as fp:
return fp.read().strip()
def fix(self):
placeholder = self.get_placeholder()
for nameRecord in self.font['name'].names:
if nameRecord.nameID == 13:
nameRecord.string = placeholder.encode(placeholder.getEncoding())
return True
class ReplaceOFLLicenseWithShortLine(ReplaceLicenseWithShortline):
def get_placeholder_filename(self):
return 'OFL.placeholder'
class ReplaceApacheLicenseWithShortLine(ReplaceLicenseWithShortline):
def get_placeholder_filename(self):
return 'APACHE.placeholder'
class RenameFileWithSuggestedName(Fixer):
def validate(self):
suggestedvalues = getSuggestedFontNameValues(self.font)
expectedname = '{0}-{1}'.format(family_name.replace(' ', ''),
subfamily_name.replace(' ', ''))
actualname, extension = os.path.splitext(self.fontpath)
return '{0}{1}'.format(expectedname, extension)
def fix(self):
newfilename = self.validate()
new_targetpath = os.path.join(os.path.dirname(self.fontpath),
newfilename)
shutil.move(self.fontpath, new_targetpath)
from bakery_cli.logger import logger
logger.info('$ mv {} {}'.format(self.fontpath, os.path.basename(new_targetpath)))
self.testcase.operator.path = new_targetpath
from bakery_cli.utils import ProcessedFile
f = ProcessedFile()
f.filepath = newfilename
self.save_after_fix = False
return True
class SpaceIndentationWriter(Fixer):
def loadfont(self, path):
return # this fixer does not operate with font
def apply(self, *args, **kwargs):
string = ''
for line in open(self.fontpath):
string += | |
import tkinter
from abc import ABC, abstractmethod
from tkinter import *
import math
class ConvexPolygon(ABC):
@abstractmethod
def __init__(self, fill_color, outline_colour):
self.fill_colour = fill_color
self.otuline_colour = outline_colour
super(ConvexPolygon, self).__init__()
@abstractmethod
def area(self):
pass
@abstractmethod
def perimeter(self):
pass
@abstractmethod
def draw(self):
pass
def __get__(self, instance, owner):
return getattr(self.fill_colour, self.otuline_colour)
# deskryptor
class Quantity:
_count = 0 # zlicza liczbę instancji deskryptora
def __init__(self):
cls = self.__class__ # odwołanie do klasy deskryptora
prefix = cls.__name__
index = cls._count
# unikalan wartość atrybutu storage_name dla każdej instancji deskryptora
self.storage_name = f'_{prefix}#{index}'
cls._count += 1
# implementujemy __get__ bo nazwa atrybuty zarządzanego jest inna niż storage_name
def __get__(self, instance, owner): # owner - odwołanie do klasy zarządzanej
return getattr(instance, self.storage_name) # !
def __set__(self, instance, value):
if value > 0:
setattr(instance, self.storage_name, value) # !
else:
raise ValueError("wartość musi być większa od zera!")
# Trójkąt
class Triangle(ConvexPolygon):
base = Quantity()
height = Quantity()
sideC = Quantity()
def __init__(self, window):
super(Triangle, self).__init__("yellow", "red")
self.tuplePoints = ()
def getData(self):
print("Tworzenie trójkąta")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość podstawy: ")
self.entry_1 = Entry(root)
self.label_2 = Label(root, text="Podaj wysokość:")
self.entry_2 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.label_2.grid(row=1, column=0)
self.entry_2.grid(row=1, column=1)
self.button_1.grid(row=3, column=1)
def area(self):
return round((self.base * self.height) / 2, 2)
def perimeter(self):
return round(self.base + self.height + self.sideC, 2)
def draw(self):
marginX = 100
marginY = 200
self.base = float(self.entry_1.get())
self.height = float(self.entry_2.get())
self.sideC = math.sqrt(math.pow(self.height, 2) + math.pow(self.base, 2))
# self.tuplePoints = (
# marginX, marginY, self.base + marginX, marginY, (self.base / 2) + marginX, -self.height + marginY, marginX,
# marginY)
self.tuplePoints = (
marginX, marginY,
marginX, marginY + self.height,
marginX + self.base, marginY + self.height,
)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
# Trójkąt rownoramienny
class IsoscelesTriangle(Triangle):
base = Quantity()
height = Quantity()
def __init__(self, window):
super(IsoscelesTriangle, self).__init__(window)
self.tuplePoints = ()
def getData(self):
root = tkinter.Tk()
self.label_1 = Label(root, text="podaj długość boku: ")
self.entry_1 = Entry(root)
self.label_2 = Label(root, text="podaj długość wysokości: ")
self.entry_2 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.label_2.grid(row=1, column=0)
self.entry_2.grid(row=1, column=1)
self.button_1.grid(row=3, column=1)
def draw(self):
marginX = 100
marginY = 200
self.base = float(self.entry_1.get())
self.height = float(self.entry_2.get())
self.tuplePoints = (
marginX, marginY, self.base + marginX, marginY, (self.base / 2) + marginX, -self.height + marginY, marginX,
marginY)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def area(self):
triangleArea = math.fabs(
self.tuplePoints[0] * (self.tuplePoints[3] - self.tuplePoints[5]) + self.tuplePoints[2] *
(self.tuplePoints[5] - self.tuplePoints[1]) + self.tuplePoints[4] * (
self.tuplePoints[1] - self.tuplePoints[3])) / 2
return round(triangleArea, 2)
def perimeter(self):
AB = math.sqrt(
math.pow(self.tuplePoints[2] - self.tuplePoints[0], 2) + math.pow(self.tuplePoints[3] - self.tuplePoints[1],
2))
BC = math.sqrt(
math.pow(self.tuplePoints[4] - self.tuplePoints[2], 2) + math.pow(self.tuplePoints[5] - self.tuplePoints[3],
2))
CA = math.sqrt(
math.pow(self.tuplePoints[0] - self.tuplePoints[4], 2) + math.pow(self.tuplePoints[1] - self.tuplePoints[5],
2))
return round(AB + BC + CA, 2)
# Trójkąt równoboczny
class EquilateralTriangle(Triangle):
Sidea = Quantity()
def __init__(self, window):
super(EquilateralTriangle, self).__init__(window)
def getData(self):
print("Tworzenie trójkąta równoramiennego")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość podstawy: ")
self.entry_1 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.button_1.grid(row=3, column=1)
def draw(self):
marginX = 200
marginY = 300
self.base = float(self.entry_1.get())
self.height = (self.base * math.sqrt(3)) / 2
self.tuplePoints = (
marginX, marginY, self.base + marginX, marginY, (self.base / 2) + marginX, -self.height + marginY, marginX,
marginY)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def perimeter(self):
return self.base * 3
def area(self):
return (self.base * self.height) / 2
# done
class RegularPentagon(ConvexPolygon):
Side = Quantity()
def __init__(self, window):
super(RegularPentagon, self).__init__("green", "blue")
self.tuplePoints = ()
def area(self):
return round(3 * (self.Side ** 2 / 4) * (1 / math.tan(math.radians(36))), 2)
def perimeter(self):
return round(5 * self.Side, 2)
def draw(self):
marginX = 300
marginY = 150
self.Side = float(self.entry_1.get())
tab = list()
for i in range(5):
x = marginX + self.Side * math.cos(math.radians(360 / 5 * i))
y = marginY + self.Side * math.sin(math.radians(360 / 5 * i))
tab.append([x, y])
self.tuplePoints = tuple(tab)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("tworzenie pięciokąta foremnego")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość boku: ")
self.entry_1 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.button_1.grid(row=3, column=1)
# done
class RegularHexagon(ConvexPolygon):
Side = Quantity
def __init__(self, window):
super(RegularHexagon, self).__init__("pink", "red")
self.tuplePoints = ()
def area(self):
return round((3 * math.pow(self.Side, 2) * math.sqrt(3)) / 2, 2)
def perimeter(self):
return round(6 * self.Side, 2)
def draw(self):
marginX = 300
marginY = 150
self.Side = float(self.entry_1.get())
tab = list()
for i in range(6):
x = marginX + self.Side * math.cos(math.radians(360 / 6 * i))
y = marginY + self.Side * math.sin(math.radians(360 / 6 * i))
tab.append([x, y])
self.tuplePoints = tuple(tab)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("tworzenie sześciokąta foremnego")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość boku: ")
self.entry_1 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.button_1.grid(row=3, column=1)
# done
class RegularOctagon(ConvexPolygon):
Side = Quantity
def __init__(self, window):
super(RegularOctagon, self).__init__("pink", "red")
self.tuplePoints = ()
def area(self):
return round(2 * (1 + math.sqrt(2)) * math.pow(self.Side, 2), 2)
def perimeter(self):
return round(8 * self.Side, 2)
def draw(self):
marginX = 300
marginY = 150
self.Side = float(self.entry_1.get())
tab = list()
for i in range(8):
x = marginX + self.Side * math.cos(math.radians(360 / 8 * i))
y = marginY + self.Side * math.sin(math.radians(360 / 8 * i))
tab.append([x, y])
self.tuplePoints = tuple(tab)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("tworzenie ośmiokąta foremnego")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość boku: ")
self.entry_1 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.button_1.grid(row=3, column=1)
# done
class Rhombus(ConvexPolygon):
Side = Quantity()
angle = Quantity()
def __init__(self, window):
super(Rhombus, self).__init__("#FF337A", "#33C1FF")
self.tuplePoints = ()
def area(self):
return round(math.pow(self.Side, 2) * math.sin(math.radians(self.angle)), 2)
def perimeter(self):
return round(4 * self.Side, 2)
def draw(self):
marginX = 200
marginY = 150
self.Side = float(self.entry_1.get())
self.angle = float(self.entry_2.get())
cos = math.cos(self.angle) * self.Side
sin = math.sin(self.angle) * self.Side
self.tuplePoints = (marginX, marginY,
marginX + self.Side, marginY,
marginX + self.Side + cos, marginY + sin,
marginX + cos, marginY + sin)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("tworzenie rąbu")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość a: ")
self.entry_1 = Entry(root)
self.label_2 = Label(root, text="Podaj kąt α:: ")
self.entry_2 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.label_2.grid(row=1, column=0)
self.entry_2.grid(row=1, column=1)
self.button_1.grid(row=3, column=1)
# done
class Square(Rhombus):
Side = Quantity()
def __init__(self, window):
super(Square, self).__init__(window)
def area(self):
return round(pow(self.Side, 2), 2)
def perimeter(self):
return round(4 * self.Side, 2)
def draw(self):
marginX = 150
marginY = 250
self.Side = float(self.entry_1.get())
self.tuplePoints = (marginX, marginY,
self.Side + marginX, marginY,
marginX + self.Side, marginY - self.Side, marginX,
marginY - self.Side, marginX, marginY)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("Tworzenie kwadratu")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość boku a: ")
self.entry_1 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.button_1.grid(row=3, column=1)
# done
# rownoleglobok
class Parallelogram(Rhombus):
SideA = Quantity()
SideB = Quantity()
angle = Quantity()
def __init__(self, window):
super(Parallelogram, self).__init__(window)
def perimeter(self):
return 2 * self.SideA + 2 * self.SideB
def area(self):
return round(self.SideA * self.SideB * math.sin(math.radians(self.angle)), 2)
def draw(self):
marginX = 50
marginY = 100
self.SideA = float(self.entry_1.get())
self.SideB = float(self.entry_2.get())
self.angle = float(self.entry_3.get())
a_cos = math.cos(math.radians(self.angle)) * self.SideA
b_sin = math.sin(math.radians(self.angle)) * self.SideB
self.tuplePoints = (marginX, marginY,
marginX + self.SideA + self.SideB, marginY,
marginX + self.SideA + a_cos + self.SideB, marginY + b_sin,
marginX + a_cos, marginY + b_sin)
window.initUI(self.tuplePoints, self.fill_colour, self.otuline_colour, self.area(), self.perimeter())
def getData(self):
print("tworzenie równoległoboku")
root = tkinter.Tk()
self.label_1 = Label(root, text="Podaj długość boku a: ")
self.entry_1 = Entry(root)
self.label_2 = Label(root, text="Podaj długość boku b: ")
self.entry_2 = Entry(root)
self.label_3 = Label(root, text="Podaj kąt α")
self.entry_3 = Entry(root)
self.button_1 = Button(root, text="Rysuj!", command=self.draw)
self.label_1.grid(row=0, column=0)
self.entry_1.grid(row=0, column=1)
self.label_2.grid(row=1, column=0)
self.entry_2.grid(row=1, column=1)
self.label_3.grid(row=2, column=0)
self.entry_3.grid(row=2, column=1)
self.button_1.grid(row=3, column=1)
# done
# kwadrat
class Rectangle(Parallelogram):
SideA = Quantity()
SideB = Quantity()
def __init__(self, window):
super(Rectangle, self).__init__(window)
def perimeter(self):
return 2 * self.SideA + 2 * self.SideB
def area(self):
return self.SideA * self.SideB
def draw(self):
marginX = 150
marginY = 100
self.SideA | |
# coding:utf-8
'''
@author = super_fazai
@File : sql_utils.py
@Time : 2016/7/14 14:36
@connect : <EMAIL>
'''
"""
sql utils
"""
import better_exceptions
better_exceptions.hook()
from gc import collect
import sqlite3
# from pymssql import *
from pymssql import connect as pymssql_connect
from pymssql import IntegrityError
from time import sleep
from redis import (
ConnectionPool,
StrictRedis,)
from pickle import dumps as pickle_dumps
from .common_utils import _print
__all__ = [
'BaseSqlServer', # cli for sql_server
'BaseRedisCli', # cli for redis
'BaseSqlite3Cli', # cli for sqlite3
'pretty_table', # 美化打印table
'create_dcs_tasks_in_redis', # 根据target_list创建分布式任务并插入到redis
]
class BaseSqlServer(object):
"""
sql_utils for sql_server
"""
def __init__(self, host, user, passwd, db, port):
super(BaseSqlServer, self).__init__()
# 死锁重试次数
self.dead_lock_retry_num = 3
self.host = host
self.user = user
self.passwd = <PASSWORD>
self.db = db
self.port = port
self._init_conn()
def _init_conn(self):
self.is_connect_success = True
try:
self.conn = pymssql_connect(
host=self.host,
user=self.user,
password=self.passwd,
database=self.db,
port=self.port,
charset='utf8')
except Exception:
self.is_connect_success = False
print('数据库连接失败!!')
def _select_table(self,
sql_str,
params=None,
lock_timeout=20000,
logger=None,):
res = None
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e,)
return res
try:
# 设置隔离级别为脏读
cs.execute('set tran isolation level read uncommitted;')
cs.execute('set lock_timeout {0};'.format(lock_timeout)) # 设置客户端执行超时等待为20秒
if params is not None:
if not isinstance(params, tuple):
params = tuple(params)
cs.execute(sql_str, params)
else:
cs.execute(sql_str)
# self.conn.commit()
res = cs.fetchall()
except Exception as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e,)
finally:
try:
cs.close()
except Exception:
pass
return res
def _insert_into_table(self,
sql_str,
params: tuple,
repeat_insert_default_res: bool=None) -> bool:
"""
插入表数据
:param sql_str:
:param params:
:param repeat_insert_default_res: 控制重复插入的返回值, 默认None, 返回True
:return:
"""
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(msg='遇到错误:', exception=e,)
return _
try:
cs.execute('set deadlock_priority low;') # 设置死锁释放级别
cs.execute(sql_str.encode('utf-8'), params) # 注意必须是tuple类型
self.conn.commit()
print('[+] add to db!')
_ = True
except IntegrityError:
print('重复插入...')
if repeat_insert_default_res is None:
_ = True
else:
_ = repeat_insert_default_res
except Exception as e:
print('-' * 9 + '| 修改信息失败, 未能将该页面信息存入到sqlserver中 |')
_print(
msg='遇到错误:',
exception=e,)
finally:
try:
cs.close()
except Exception:
pass
return _
def _insert_into_table_2(self,
sql_str,
params: tuple,
logger,
set_deadlock_priority_low=True,
repeat_insert_default_res: bool=None) -> bool:
"""
:param sql_str:
:param params:
:param logger:
:param set_deadlock_priority_low: 是否设置死锁等级低
:param repeat_insert_default_res: 控制重复插入的返回值, 默认None, 返回True
:return:
"""
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e, )
return _
try:
if set_deadlock_priority_low:
cs.execute('set deadlock_priority low;') # 设置死锁释放级别
# logger.info(str(params))
cs.execute(sql_str.encode('utf-8'), params) # 注意必须是tuple类型
self.conn.commit()
logger.info('[+] add to db!')
_ = True
except IntegrityError:
logger.info('重复插入goods_id[%s], 此处跳过!' % params[0])
if repeat_insert_default_res is None:
_ = True
else:
_ = repeat_insert_default_res
except Exception:
logger.error('| 修改信息失败, 未能将该页面信息存入到sqlserver中 | 出错goods_id: %s' % params[0], exc_info=True)
finally:
try:
cs.close()
except Exception:
pass
return _
async def _insert_into_table_3(self,
sql_str,
params: tuple,
logger,
error_msg_dict=None,
repeat_insert_default_res: bool=None) -> bool:
"""
异步
error_msg_dict参数:
eg: {
# 重复插入
'repeat_error': {
'field_name': '重复插入要记录的字段名',
'field_value': '重复记录该字段的值',
},
# 其他异常
'other_error': [{
'field_name': '字段名',
'field_value': '字段值',
}, ...]
}
:param sql_str:
:param params:
:param logger:
:param error_msg_dict: logger记录的额外信息
:param repeat_insert_default_res: 控制重复插入的返回值, 默认None, 返回True
:return:
"""
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e, )
return _
try:
cs.execute('set deadlock_priority low;') # 设置死锁释放级别
# logger.info(str(params))
cs.execute(sql_str.encode('utf-8'), params) # 注意必须是tuple类型
self.conn.commit()
logger.info('[+] add to db!')
_ = True
except IntegrityError:
if repeat_insert_default_res is None:
_ = True
else:
_ = repeat_insert_default_res
if not error_msg_dict:
logger.info('重复插入goods_id[%s], 此处跳过!' % params[0])
else:
if isinstance(error_msg_dict, dict):
msg = '重复插入{0}[{1}], 此处跳过!'.format(
error_msg_dict.get('repeat_error', {}).get('field_name', ''),
error_msg_dict.get('repeat_error', {}).get('field_value', '')
)
logger.info(msg)
else:
raise TypeError('传入的error_msg_dict类型错误, 请核对需求参数!')
except Exception:
if not error_msg_dict:
logger.error('| 修改信息失败, 未能将该页面信息存入到sqlserver中 | 出错goods_id: {0}'.format(params[0]), exc_info=True)
else:
if isinstance(error_msg_dict, dict):
msg = '| 修改信息失败, 未能将该页面信息存入到sqlserver中 | '
for item in error_msg_dict.get('other_error', []):
msg += '出错{0}: {1} '.format(
item.get('field_name', ''),
item.get('field_value', '')
)
logger.error(msg, exc_info=True)
else:
raise TypeError('传入的error_msg_dict类型错误, 请核对需求参数!')
finally:
try:
cs.close()
except Exception:
pass
return _
def _update_table(self, sql_str, params: tuple) -> bool:
"""
更新表数据
:param sql_str:
:param params:
:return:
"""
ERROR_NUMBER = 0
RETRY_NUM = self.dead_lock_retry_num # 死锁重试次数
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(msg='遇到错误:', exception=e,)
return _
while RETRY_NUM > 0:
try:
cs.execute('set deadlock_priority low;') # 设置死锁释放级别
cs.execute(sql_str, params)
self.conn.commit() # 不进行事务提交, 不提交无法更改
print('[+] add to db!')
_ = True
RETRY_NUM = 0
except Exception as e:
try:
ERROR_NUMBER = e.number
except:
pass
if ERROR_NUMBER == 1025: # 死锁状态码
print('遇到死锁!!进入等待...')
sleep(1)
RETRY_NUM -= 1
else:
print('-' * 9 + '| 修改信息失败, 未能将该页面信息存入到sqlserver中 |')
print('--------------------| 错误如下: ', e)
RETRY_NUM = 0
finally:
try:
cs.close()
except Exception:
pass
return _
def _update_table_2(self, sql_str, params: tuple, logger) -> bool:
ERROR_NUMBER = 0
# 死锁重试次数
RETRY_NUM = self.dead_lock_retry_num
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e, )
return _
while RETRY_NUM > 0:
try:
# 设置死锁释放级别
cs.execute('set deadlock_priority low;')
cs.execute(sql_str, params)
self.conn.commit() # 不进行事务提交
logger.info('[+] add to db!')
_ = True
RETRY_NUM = 0
except Exception as e:
try:
ERROR_NUMBER = e.number
except:
pass
if ERROR_NUMBER == 1025:
# 死锁状态码
logger.error('遇到死锁!!进入等待...')
sleep(1)
RETRY_NUM -= 1
else:
logger.error('| 修改信息失败, 未能将该页面信息存入到sqlserver中 出错goods_id: %s|' % params[-1])
logger.exception(e)
RETRY_NUM = 0
finally:
try:
cs.close()
except Exception:
pass
return _
async def _update_table_3(self, sql_str, params: tuple, logger, error_msg_dict=None) -> bool:
"""
异步更新数据
error_msg_dict参数:
eg: {
# 其他异常
'other_error': [{
'field_name': '字段名',
'field_value': '字段值',
}, ...]
}
:param sql_str:
:param params:
:param logger:
:param error_msg_dict: logger记录的额外信息
:return:
"""
ERROR_NUMBER = 0
# 死锁重试次数
RETRY_NUM = self.dead_lock_retry_num
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
logger=logger,
log_level=2,
exception=e,)
return _
while RETRY_NUM > 0:
try:
# 设置死锁释放级别
cs.execute('set deadlock_priority low;')
cs.execute(sql_str, params)
self.conn.commit()
logger.info('[+] add to db!')
_ = True
RETRY_NUM = 0
except Exception as e:
try:
ERROR_NUMBER = e.number
except:
pass
if ERROR_NUMBER == 1025:
# 死锁状态码
sleep(1)
RETRY_NUM -= 1
logger.error('遇到死锁!!进入等待...')
else:
RETRY_NUM = 0
if not error_msg_dict:
logger.error('-' * 9 + '| 修改信息失败, 未能将该页面信息存入到sqlserver中 | 出错goods_id: {0}'.format(params[-1]), exc_info=True)
else:
if isinstance(error_msg_dict, dict):
msg = '-' * 9 + '| 修改信息失败, 未能将该页面信息存入到sqlserver中 | '
for item in error_msg_dict.get('other_error', []):
msg += '出错{0}: {1} '.format(
item.get('field_name', ''),
item.get('field_value', '')
)
logger.error(msg, exc_info=True)
else:
raise TypeError('传入的error_msg_dict类型错误, 请核对需求参数!')
finally:
try:
cs.close()
except Exception:
pass
return _
def _delete_table(self, sql_str, params=None, lock_timeout=20000) -> bool:
_ = False
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cs = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
exception=e, )
return _
try:
# 设置客户端执行超时等待为20秒
cs.execute('set lock_timeout {0};'.format(lock_timeout))
if params is not None:
if not isinstance(params, tuple):
params = tuple(params)
cs.execute(sql_str, params)
else:
cs.execute(sql_str)
self.conn.commit() # 不进行事务提交
_ = True
except Exception as e:
_print(
msg='遇到错误:',
exception=e,)
finally:
try:
cs.close()
except Exception:
pass
return _
def _get_one_select_cursor(self, sql_str, params=None, lock_timeout=20000):
"""
获得一个select执行结果的cursor(用于美化打印table)
:return: 查询失败 None | 成功的cursor
"""
cursor = None
try:
if not self.is_connect_success:
raise AssertionError('sql_server连接失败! 执行操作终止!')
else:
pass
cursor = self.conn.cursor()
except (AttributeError, AssertionError) as e:
_print(
msg='遇到错误:',
exception=e,)
return cursor
try:
cursor.execute('set lock_timeout {0};'.format(lock_timeout)) # 设置客户端执行超时等待为20秒
if params is not None:
if not isinstance(params, tuple):
params = tuple(params)
cursor.execute(sql_str, params)
else:
cursor.execute(sql_str)
except Exception as e:
_print(
msg='遇到错误:',
exception=e,)
cursor = None
return cursor
return cursor
def __del__(self):
try:
if self.is_connect_success:
# 连接成功才进行释放
self.conn.close()
else:
pass
del self.is_connect_success
del self.host
del self.user
del self.passwd
del self.db
del self.port
except Exception:
pass
try:
collect()
except Exception:
pass
def pretty_table(cursor):
'''
美化打印table返回的数据(只支持select)
:param cursor: cursor数据库的游标
:return: None
'''
from prettytable import from_db_cursor
tb = from_db_cursor(cursor=cursor) # 返回一个 PrettyTable对象
tb.align = 'l' # 左对齐
# tb.padding_width = 5
print(tb)
return
class BaseRedisCli():
'''redis客户端'''
def __init__(self, host='127.0.0.1', port=6379, db=0):
self.pool = ConnectionPool(
host=host,
port=port,
db=db,)
self.redis_cli = StrictRedis(connection_pool=self.pool)
def set(self, name, value):
'''写/改'''
return self.redis_cli.set(name=name, value=value)
def get(self, name):
'''读'''
return self.redis_cli.get(name=name)
def delete(self, name):
'''删'''
return self.redis_cli.delete(name)
def __del__(self):
try:
del self.pool
del self.redis_cli
except:
| |
import sqlalchemy
from uuid import UUID
import json
from datetime import datetime
from systemcheck.models.meta.systemcheck_choices import InclusionChoice, ComponentChoice, OperatorChoice
from inspect import isclass
from sqlalchemy import ForeignKey, Table, DateTime, Integer, CHAR, inspect, String, Date, Time
from sqlalchemy.orm import relationship, class_mapper, ColumnProperty, RelationshipProperty
from sqlalchemy.orm.collections import InstrumentedDict, InstrumentedList, InstrumentedSet
from sqlalchemy import event, ForeignKeyConstraint, UniqueConstraint
from sqlalchemy.types import TypeDecorator
from sqlalchemy.sql import functions
from sqlalchemy.ext import declarative
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy_utils.types import UUIDType, ChoiceType
from typing import Any, Union, List
import keyring
import uuid
from pprint import pprint
import systemcheck
from functools import reduce
from typing import Union
import yaml
import logging
logger = logging.getLogger(__name__)
def bool_or_str(type_):
return is_string(type_) or is_boolean(type_)
def deepgetattr(obj, attr):
"""Recurses through an attribute chain to get the ultimate value."""
return reduce(getattr, attr.split('.'), obj)
class Column(sqlalchemy.Column):
""" Customized Column Type
The customized column type is used to provide information to the Qt layer when building dynamic user interfaces.
"""
def __init__(self, *args, **kwargs):
""" Supported Init Variables
The following variables are supported and are primarily intended to allow the dynamic generation of Qt forms:
:param qt_choices: possible values
:type dict:
:param qt_label: The label text that gets displayed in the UI
:type str:
:param qt_description: A description that gets displayed in the UI when hovering over a field.
:type str:
:param qt_show: Flag that determines whether a column should be displayed in a dynamically generated UI. By
:type bool:
"""
kwargs.setdefault('info', {})
kwargs['info'].setdefault('choices', kwargs.pop('choices', None))
kwargs['info'].setdefault('qt_label', kwargs.pop('qt_label', ''))
kwargs['info'].setdefault('qt_description', kwargs.pop('qt_description', ''))
kwargs['info'].setdefault('qt_show', kwargs.pop('qt_show', True))
kwargs['info'].setdefault('qt_enabled', kwargs.pop('qt_enabled', True))
kwargs['info'].setdefault('rel_class', kwargs.pop('rel_class', None))
sqlalchemy.Column.__init__(self, *args, **kwargs)
@property
def choices(self):
return self.info['choices'] if 'choices' in self.info else []
class QtModelMixin(object):
__qtmap__ = []
__icon__ = None
def _qt_column_count(self)->int:
""" Return the number of columns """
column_count=len(self.__qtmap__)
return column_count
def _qt_colnr_is_valid(self, colnr:int)->bool:
column_count=self._qt_column_count()
return 0 <= colnr < column_count
def _qt_set_value_by_colnr(self, colnr: int, value: object):
""" Set the Value of a Column by its its visible Number
QtModels refer to the underlying data by rows and columns. Somewhere a mapping has to occur that does this
automatically.
:param colnr: The Qt Column Number
:type int:
:param value: The value to be set in the column
"""
# TODO: The implementation here is quite ugly. Need to find a better way to do this, but for now it's acceptable
if self._qt_colnr_is_valid(colnr):
column = self.__qtmap__[colnr]
setattr(self, column.name, value)
self._commit()
return True
else:
return False
def _qt_headers(self):
headers = []
for column in self.__qtmap__:
col_type = type(column)
headers.append(col_type.info.get('qt_label'))
return headers
def _qt_header(self, column):
if self._qt_colnr_is_valid(column):
col = self.__qtmap__[column]
header = col.info.get('qt_label')
return header
return False
def _qt_data_colnr(self, colnr: int) -> object:
""" Get the Value of a Column by its its visible Number
QtModels refer to the underlying data by rows and columns. Somewhere a mapping has to occur that does this
automatically.
:param colnr: The Qt Column Number
:type int:
"""
# TODO: The implementation here is quite ugly. Need to find a better way to do this, but for now it's acceptable
if self._qt_colnr_is_valid(colnr):
# visible_columns = self._visible_columns()
try:
column = self.__qtmap__[colnr]
value = getattr(self, column.name)
return value
except Exception as err:
pprint(err)
else:
return False
def _qt_columns(self) -> List[Any]:
""" Return a list of columns that have the info medatadata variable qt_show set to True"""
return self.__qtmap__
def _dump(self, _indent:int=0)->str:
""" Recursively return the structure of the node and all its children as text """
return " " * _indent + repr(self) + \
"\n" + \
"".join([
c._dump(_indent + 1)
for c in self.children
])
def _qt_child(self, childnr:int)->Any:
""" Return the child object at a specific position"""
if self._qt_child_count() >0:
if childnr >= 0 and childnr<self._qt_child_count():
return self.children[childnr]
return False
def _qt_child_count(self)->int:
""" Return the number of children """
return len(self.children)
def _qt_insert_child(self, position:int, node)->bool:
self.children.insert(position, node)
self._commit()
return True
def _qt_row(self):
if self.parent_node is not None:
return self.parent_node.children.index(self)
def _qt_remove_child(self, position:int)->bool:
""" Remove a child item at a particular position
:param position: The position within the list of children
"""
if 0 <= position < self._qt_child_count():
child=self._qt_child(position)
session=inspect(child).session
# Since we are using SQLAlchemy, we can't simply delete objects. If an object is part of a change that was not
# committet yet, we need to use 'Session.expunge()' instead of 'Session.delete()'.
if child in session.new:
session.expunge(child)
else:
session.delete(child)
session.commit()
return True
def _commit(self):
session = systemcheck.session.SESSION
session.commit()
def _flush(self):
session = systemcheck.session.SESSION
session.flush()
class RestrictionsMixin:
inclusion = Column(String,
name='inclusion',
qt_label='Incl./Excl.',
default=InclusionChoice.INCLUDE, choices=InclusionChoice.CHOICES)
component = Column(String, name='component', qt_label='Component for restrictions', choices=ComponentChoice.CHOICES)
component_name = Column(String, name='component_name', qt_label='Component Name')
operator = Column(String, name='operator', qt_label='Restriction', choices=OperatorChoice.CHOICES)
low = Column(String, name='low', qt_label='Lower', qt_description='Lower range value')
high = Column(String, name='high', qt_label='High', qt_description='Higher range value')
__qtmap__ = [inclusion, component, component_name, operator, low, high]
class OperatorMixin:
operator = Column(Integer,
nullable=False, name='operator',
default=OperatorChoice.EQ,
qt_description='Comparison Operator',
qt_label='Comparison Operator',
qt_show=False,
choices=OperatorChoice.CHOICES
)
class TableNameMixin(object):
""" MixIn to automatically return the table name """
@declarative.declared_attr
def __tablename__(cls):
return cls.__name__.lower()
class BaseMixin(object):
""" Standard Mixin
"""
@property
def __relationships__(self):
"""
Return a list of relationships name which are not as a backref
name in model
"""
back_ref_relationships = list()
items = self.__mapper__.relationships.items()
for (key, value) in items:
if isinstance(value.backref, tuple):
back_ref_relationships.append(key)
return back_ref_relationships
@declarative.declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declarative.declared_attr
def saClassName(cls):
return cls.__name__.lower()
def __iter__(self):
return self.to_dict().items()
def to_dict(self, *args, backref:tuple=None, include_relationships=True, ignore_keys=True,
ignore_attrs:Union[list, None]=None,
override_default_ignore_attrs:bool=False,
rel_ignore_list=None,
ignore_parents=True, **kwargs):
""" Convert the values of a SQLAlchemy ORM Object into a dictionary
:param backref: a tuple that consists of the table object as back reference and the primary key id of the record
:param include_relationships: If set to False, only columns will be part of the dictionary
:param ignore_keys: If set to True, the resulting dictionary will not include primary key values
:param ignore_attributes: The list that contains the schema attributes that should be ignored.
:param override_default_ignore_attrs: By default the attribute 'parent_node' is excluded
since that would traverse the tree upwards. Had to be added since the the relationships for systemcheck
are bidirectional.
:param ignore_parents: If set to True, the attribute 'parent_node' will not get traversed.
:param folder_as_attribute: a attribute called 'folder' will be added that contains the objects path from root downwards.
"""
logger.debug('Converting to dictionary: %s', self.saClassName)
# logger.debug('Name: %s', self.name)
default_ignore_attrs=[]
if ignore_attrs is None:
ignore_attrs=default_ignore_attrs
else:
if override_default_ignore_attrs is False:
ignore_attrs.extend(default_ignore_attrs)
if ignore_parents:
ignore_attrs.append('parent_node')
primary_keys=[item.name for item in inspect(self.__class__).primary_key]
foreign_keys=[item.name
for item in inspect(self.__class__).columns
if len(item.foreign_keys)>0]
if ignore_keys:
result = {str(column.key): getattr(self, attr)
for attr, column in self.__mapper__.c.items()
if str(column.key) not in primary_keys and column.key not in foreign_keys}
else:
result = {str(column.key): getattr(self, attr)
for attr, column in self.__mapper__.c.items()}
if include_relationships:
for attr, relation in self.__mapper__.relationships.items():
# Avoid recursive loop between to tables.
if relation.key not in ignore_attrs:
if backref is not None:
if relation.table._is_join:
if relation.table.right==backref[0] or relation.table.left==backref[0]:
continue
#
elif backref[0] == relation.target:
if backref[1] == self.id:
continue
# else:
# print('========== should not have happened ============')
# continue
value = getattr(self, attr)
if value is None:
result[str(relation.key)] = None
elif isinstance(value.__class__, declarative.DeclarativeMeta):
result[str(relation.key)] = value.to_dict(backref=(self.__table__, self.id))
else:
result[str(relation.key)] = [i.to_dict(backref=(self.__table__, self.id))
for i in value]
return result
def to_json(self, *args, **kwargs):
def extended_encoder(x):
if isinstance(x, datetime):
return x.isoformat()
if isinstance(x, UUID):
return str(x)
return json.dumps(self.to_dict(*args, **kwargs))
def to_yaml(self, *args, **kwargs):
""" Converts the SQLAlchemy object to yaml
"""
return yaml.dump(self.to_dict(*args, **kwargs))
class StandardAbapAuthSelectionOptionMixin:
CHOICE_SIGN = [('I', 'Include'),
('E', 'Exclude')]
CHOICE_OPTION = [('EQ', 'Equal'),
('NE', 'Not Equal'),
('GT', 'Greater Than'),
('GE', 'Greater or Equal'),
('LT', 'Lower Than'),
('LE', 'Lower or Equal')]
SIGN = Column(ChoiceType(CHOICE_SIGN),
nullable = False,
default = 'I',
qt_label = 'Incl./Excl.',
qt_description = 'Should the specified items be included or excluded? Default is to include them',
choices = CHOICE_SIGN,
)
OPTION = Column(ChoiceType(CHOICE_SIGN),
nullable = False,
default = 'EQ',
qt_label = 'Sel. Option',
qt_description = 'Selection option',
choices = CHOICE_OPTION,
)
LOW = Column(String(12),
nullable=False,
qt_label='Lower Range Value',
qt_description='Lower Range Value. Must be specified.',
)
HIGH = Column(String(12),
nullable=True,
qt_label='Higher Range Value',
qt_description='Higher Range Value. Optional.',
)
class PasswordKeyringMixin():
keyring_uuid = Column(String(32), qt_show=False)
@hybrid_property
def password(self):
namespace='systemcheck'
keyring_user=self.keyring_uuid
pwd = keyring.get_password(namespace, username=keyring_user)
return pwd
@password.setter
def password(self, pwd):
if pwd:
namespace='systemcheck'
keyring_username=self.keyring_uuid
keyring.set_password(namespace, | |
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call
from mock.mock import patch
from unittest import TestCase
import sys
import os
import unittest
import upgradeHelper
import json
import copy
from StringIO import StringIO
class UpgradeCatalogFactoryMock(upgradeHelper.UpgradeCatalogFactory):
def __init__(self, data):
self._load(data)
def _load(self, data):
fn = StringIO(data)
with patch("__builtin__.open") as open_mock:
open_mock.return_value = fn
super(UpgradeCatalogFactoryMock, self)._load("")
class TestUpgradeHelper(TestCase):
original_curl = None
out = None
catalog_from = "1.3"
catalog_to = "2.2"
catalog_cfg_type = "my type"
required_service = "TEST"
curl_response = "{}"
test_catalog = """{
"version": "1.0",
"stacks": [
{
"name": "HDP",
"old-version": "%s",
"target-version": "%s",
"options": {
"config-types": {
"%s": {
"merged-copy": "yes"
}
}
},
"properties": {
"%s": {
"my property": {
"value": "my value",
"required-services": [\"%s\"]
}
}
},
"property-mapping": {
"my replace property": "my property 2"
}
}
]
}
"""
def setUp(self):
# replace original curl call to mock
self.test_catalog = self.test_catalog % (self.catalog_from, self.catalog_to,
self.catalog_cfg_type, self.catalog_cfg_type,
self.required_service)
self.original_curl = upgradeHelper.curl
upgradeHelper.curl = self.magic_curl
# mock logging methods
upgradeHelper.logging.getLogger = MagicMock()
upgradeHelper.logging.FileHandler = MagicMock()
self.out = StringIO()
sys.stdout = self.out
def magic_curl(self, *args, **kwargs):
resp = self.curl_response
self.curl_response = "{}"
if "parse" in kwargs and isinstance(resp, str) and kwargs["parse"] == True:
resp = json.loads(resp)
return resp
def tearDown(self):
sys.stdout = sys.__stdout__
@patch("optparse.OptionParser")
@patch("upgradeHelper.modify_configs")
@patch("__builtin__.open")
def test_ParseOptions(self, open_mock, modify_action_mock, option_parser_mock):
class options(object):
user = "test_user"
hostname = "127.0.0.1"
clustername = "test1"
password = "<PASSWORD>"
upgrade_json = "catalog_file"
from_stack = "0.0"
to_stack = "1.3"
logfile = "test.log"
report = "report.txt"
https = False
port = "8080"
warnings = []
printonly = False
args = ["update-configs"]
modify_action_mock.return_value = MagicMock()
test_mock = MagicMock()
test_mock.parse_args = lambda: (options, args)
option_parser_mock.return_value = test_mock
upgradeHelper.main()
self.assertEqual("8080", upgradeHelper.Options.API_PORT)
self.assertEqual("http", upgradeHelper.Options.API_PROTOCOL)
self.assertEqual(1, modify_action_mock.call_count)
self.assertEqual({"user": options.user, "pass": options.password}, upgradeHelper.Options.API_TOKENS)
self.assertEqual(options.clustername, upgradeHelper.Options.CLUSTER_NAME)
def test_is_services_exists(self):
old_services = upgradeHelper.Options.SERVICES
upgradeHelper.Options.SERVICES = set(['TEST1', 'TEST2'])
actual_result = upgradeHelper.is_services_exists(['TEST1'])
# check for situation with two empty sets
upgradeHelper.Options.SERVICES = set()
actual_result_1 = upgradeHelper.is_services_exists([])
upgradeHelper.Options.SERVICES = old_services
self.assertEqual(True, actual_result)
self.assertEqual(True, actual_result_1)
@patch("__builtin__.open")
@patch.object(os.path, "isfile")
@patch("os.remove")
def test_write_mapping(self, remove_mock, isfile_mock, open_mock):
test_data = {
"test_field": "test_value"
}
test_result = json.dumps(test_data)
output = StringIO()
isfile_mock.return_value = True
open_mock.return_value = output
# execute testing function
upgradeHelper.write_mapping(test_data)
self.assertEquals(1, isfile_mock.call_count)
self.assertEquals(1, remove_mock.call_count)
self.assertEquals(1, open_mock.call_count)
# check for content
self.assertEquals(test_result, output.getvalue())
@patch("__builtin__.open")
@patch.object(os.path, "isfile")
def test_read_mapping(self, isfile_mock, open_mock):
test_data = {
"test_field": "test_value"
}
test_result = json.dumps(test_data)
isfile_mock.return_value = True
output = StringIO(test_result)
open_mock.return_value = output
# execute testing function
actual_mapping = upgradeHelper.read_mapping()
self.assertEquals(1, isfile_mock.call_count)
self.assertEquals(1, open_mock.call_count)
self.assertEquals(test_data, actual_mapping)
@patch.object(upgradeHelper, "curl")
@patch.object(upgradeHelper, "write_mapping")
def test_get_mr1_mapping(self, write_mapping_mock, curl_mock):
return_data = [
{
"host_components": [ # MAPREDUCE_CLIENT
{
"HostRoles": {
"host_name": "test.host.vm"
}
}
]
},
{
"host_components": [ # JOBTRACKER
{
"HostRoles": {
"host_name": "test1.host.vm"
}
}
]
},
{
"host_components": [ # TASKTRACKER
{
"HostRoles": {
"host_name": "test2.host.vm"
}
}
]
},
{
"host_components": [ # HISTORYSERVER
{
"HostRoles": {
"host_name": "test3.host.vm"
}
}
]
}
]
expect_data = {
"MAPREDUCE_CLIENT": ["test.host.vm"],
"JOBTRACKER": ["test1.host.vm"],
"TASKTRACKER": ["test2.host.vm"],
"HISTORYSERVER": ["test3.host.vm"]
}
tricky_mock = MagicMock(side_effect=return_data)
curl_mock.side_effect = tricky_mock
# execute testing function
upgradeHelper.get_mr1_mapping()
self.assertEquals(write_mapping_mock.call_count, 1)
self.assertEquals(expect_data, write_mapping_mock.call_args[0][0])
@patch.object(upgradeHelper, "get_choice_string_input")
def test_get_YN_input(self, get_choice_string_input_mock):
yes = set(['yes', 'ye', 'y'])
no = set(['no', 'n'])
prompt = "test prompt"
default = "default value"
# execute testing function
upgradeHelper.get_YN_input(prompt, default)
expect_args = (prompt, default, yes, no)
self.assertEquals(expect_args, get_choice_string_input_mock.call_args[0])
@patch("__builtin__.raw_input")
def test_get_choice_string_input(self, raw_input_mock):
yes = set(['yes', 'ye', 'y'])
no = set(['no', 'n'])
input_answers = ["yes", "no", ""]
tricky_mock = MagicMock(side_effect=input_answers)
raw_input_mock.side_effect = tricky_mock
default = "default value"
expect_result = [True, False, default]
actual_result = []
for i in range(0, len(input_answers)):
actual_result.append(upgradeHelper.get_choice_string_input("test prompt", default, yes, no))
self.assertEquals(expect_result, actual_result)
@patch.object(upgradeHelper, "get_YN_input")
@patch.object(upgradeHelper, "read_mapping")
@patch.object(upgradeHelper, "curl")
def test_delete_mr(self, curl_mock, read_mapping_mock, get_YN_mock):
COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/%s/host_components/%s'
SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/MAPREDUCE'
NON_CLIENTS = ["JOBTRACKER", "TASKTRACKER", "HISTORYSERVER"]
PUT_IN_DISABLED = {
"HostRoles": {
"state": "DISABLED"
}
}
mr_mapping = {
"MAPREDUCE_CLIENT": ["test.host.vm"],
"JOBTRACKER": ["test1.host.vm"],
"TASKTRACKER": ["test2.host.vm"],
"HISTORYSERVER": ["test3.host.vm"]
}
expected_curl_exec_args = []
for key, hosts in mr_mapping.items():
if key in NON_CLIENTS:
for host in hosts:
expected_curl_exec_args.append(
[
(COMPONENT_URL_FORMAT % (host, key),),
{
"request_type": "PUT",
"data": PUT_IN_DISABLED,
"validate": True
}
]
)
expected_curl_exec_args.append(
[
(SERVICE_URL_FORMAT,),
{
"request_type": "DELETE",
"validate": True
}
]
)
get_YN_mock.return_value = True
read_mapping_mock.return_value = mr_mapping
# execute testing function
upgradeHelper.delete_mr()
self.assertEqual(expected_curl_exec_args, curl_mock.call_args_list)
pass
@patch.object(upgradeHelper, "curl")
def test_get_cluster_stackname(self, curl_mock):
expected_result = "test version"
actual_result = ""
curl_mock.return_value = {
"Clusters": {
"version": expected_result
}
}
# execute testing function
actual_result = upgradeHelper.get_cluster_stackname()
self.assertEqual(expected_result, actual_result)
@patch.object(upgradeHelper, "curl")
def test_has_component_in_stack_def(self, curl_mock):
curl_mock.side_effect = MagicMock(side_effect=["", upgradeHelper.FatalException(1, "some reason")])
# execute testing function
result_ok = upgradeHelper.has_component_in_stack_def("-", "", "")
result_fail = upgradeHelper.has_component_in_stack_def("-", "", "")
self.assertEqual(True, result_ok)
self.assertEqual(False, result_fail)
@patch.object(upgradeHelper, "get_cluster_stackname")
@patch.object(upgradeHelper, "has_component_in_stack_def")
@patch.object(upgradeHelper, "read_mapping")
@patch.object(upgradeHelper, "curl")
def test_add_services(self, curl_mock, read_mapping_mock, has_component_mock, get_stack_name_mock):
host_mapping = {
"MAPREDUCE_CLIENT": ["test.host.vm"],
"JOBTRACKER": ["test1.host.vm"],
"TASKTRACKER": ["test2.host.vm"],
"HISTORYSERVER": ["test3.host.vm"]
}
SERVICE_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/services/{0}'
COMPONENT_URL_FORMAT = SERVICE_URL_FORMAT + '/components/{1}'
HOST_COMPONENT_URL_FORMAT = upgradeHelper.Options.CLUSTER_URL + '/hosts/{0}/host_components/{1}'
service_comp = {
"YARN": ["NODEMANAGER", "RESOURCEMANAGER", "YARN_CLIENT"],
"MAPREDUCE2": ["HISTORYSERVER", "MAPREDUCE2_CLIENT"]}
new_old_host_map = {
"NODEMANAGER": "TASKTRACKER",
"HISTORYSERVER": "HISTORYSERVER",
"RESOURCEMANAGER": "JOBTRACKER",
"YARN_CLIENT": "MAPREDUCE_CLIENT",
"MAPREDUCE2_CLIENT": "MAPREDUCE_CLIENT"}
get_stack_name_mock.return_value = ""
has_component_mock.return_value = False
read_mapping_mock.return_value = host_mapping
expected_curl_args = []
for service in service_comp.keys():
expected_curl_args.append([
(SERVICE_URL_FORMAT.format(service),),
{
"validate": True,
"request_type": "POST"
}
])
for component in service_comp[service]:
expected_curl_args.append([
(COMPONENT_URL_FORMAT.format(service, component),),
{
"validate": True,
"request_type": "POST"
}
])
for host in host_mapping[new_old_host_map[component]]:
expected_curl_args.append([
(HOST_COMPONENT_URL_FORMAT.format(host, component),),
{
"validate": True,
"request_type": "POST"
}
])
# execute testing function
upgradeHelper.add_services()
self.assertEqual(expected_curl_args, curl_mock.call_args_list)
@patch.object(upgradeHelper, "get_config_resp_all")
def test_coerce_tag(self, get_config_resp_all_mock):
test_catalog = """
{
"version": "1.0",
"stacks": [
{
"name": "HDP",
"old-version": "1.0",
"target-version": "1.1",
"options": {
"config-types":{
"test": {
"merged-copy": "yes"
}
}
},
"properties": {
"test": {
"test": "host1.com"
}
},
"property-mapping": {
"test":{
"map-to": "test-arr",
"coerce-to": "yaml-array"
}
}
}
]
}
"""
old_opt = upgradeHelper.Options.OPTIONS
options = lambda: ""
options.from_stack = "1.0"
options.to_stack = "1.1"
options.upgrade_json = ""
upgradeHelper.Options.OPTIONS = options
upgradeHelper.Options.SERVICES = [self.required_service]
get_config_resp_all_mock.return_value = {
"test": {
"properties": {}
}
}
ucf = UpgradeCatalogFactoryMock(test_catalog)
scf = upgradeHelper.ServerConfigFactory()
cfg = scf.get_config("test")
ucfg = ucf.get_catalog("1.0", "1.1")
cfg.merge(ucfg)
scf.process_mapping_transformations(ucfg)
upgradeHelper.Options.OPTIONS = old_opt
self.assertEqual(True, "test-arr" in cfg.properties)
self.assertEqual("['host1.com']", cfg.properties["test-arr"])
@patch.object(upgradeHelper, "get_config_resp_all")
def test_override_tag(self, get_config_resp_all_mock):
test_catalog = """
{
"version": "1.0",
"stacks": [
{
"name": "HDP",
"old-version": "1.0",
"target-version": "1.1",
"options": {
"config-types":{
"test": {
"merged-copy": "yes"
}
}
},
"properties": {
"test": {
"test_property": {
"value": "host1.com",
"override": "no"
}
}
},
"property-mapping": {}
}
]
}
"""
old_opt = upgradeHelper.Options.OPTIONS
options = lambda: ""
options.from_stack = "1.0"
options.to_stack = "1.1"
options.upgrade_json = ""
upgradeHelper.Options.OPTIONS = options
upgradeHelper.Options.SERVICES = [self.required_service]
get_config_resp_all_mock.return_value = {
"test": {
"properties": {
"test_property": "test host"
}
}
}
ucf = UpgradeCatalogFactoryMock(test_catalog)
scf = upgradeHelper.ServerConfigFactory()
cfg = scf.get_config("test")
ucfg = ucf.get_catalog("1.0", "1.1")
cfg.merge(ucfg)
scf.process_mapping_transformations(ucfg)
upgradeHelper.Options.OPTIONS = old_opt
self.assertEqual(True, "test_property" in cfg.properties)
self.assertEqual("test host", cfg.properties["test_property"])
@patch.object(upgradeHelper, "get_config_resp_all")
def test_replace_tag(self, get_config_resp_all_mock):
test_catalog = """
{
"version": "1.0",
"stacks": [
{
"name": "HDP",
"old-version": "1.0",
"target-version": "1.1",
"options": {
"config-types":{
"test": {
"merged-copy": "yes"
}
}
},
"properties": {
"test": {
"test": "host1.com"
}
},
"property-mapping": {
"test":{
"map-to": "test-arr",
"replace-from": "com",
"replace-to": "org"
}
}
}
]
}
"""
old_opt = upgradeHelper.Options.OPTIONS
options = lambda: ""
options.from_stack = "1.0"
options.to_stack = "1.1"
options.upgrade_json = ""
upgradeHelper.Options.OPTIONS = options
upgradeHelper.Options.SERVICES = [self.required_service]
get_config_resp_all_mock.return_value = {
"test": {
"properties": {}
}
}
ucf = UpgradeCatalogFactoryMock(test_catalog)
scf = upgradeHelper.ServerConfigFactory()
cfg = scf.get_config("test")
ucfg = ucf.get_catalog("1.0", "1.1")
cfg.merge(ucfg)
scf.process_mapping_transformations(ucfg)
upgradeHelper.Options.OPTIONS = old_opt
self.assertEqual(True, "test-arr" in cfg.properties)
self.assertEqual("host1.org", cfg.properties["test-arr"])
@patch.object(upgradeHelper, "curl")
@patch("time.time")
def test_update_config(self, time_mock, curl_mock):
time_pass = | |
0, -1, -1, -1, -1]
carelessness -1.4 0.4899 [-2, -1, -1, -2, -1, -2, -1, -1, -2, -1]
carelessnesses -1.6 1.11355 [-4, -2, -2, -3, -1, 0, -1, -1, -1, -1]
cares 2.0 0.7746 [2, 3, 1, 3, 1, 2, 2, 2, 3, 1]
caring 2.2 0.4 [2, 3, 2, 2, 2, 2, 2, 3, 2, 2]
casual 0.8 0.74833 [1, 1, 0, 1, 0, 2, 0, 1, 2, 0]
casually 0.7 1.00499 [1, 0, 0, 0, 1, 0, 0, 3, 2, 0]
casualty -2.4 0.91652 [-4, -3, -3, -2, -1, -2, -3, -1, -2, -3]
catastrophe -3.4 0.4899 [-3, -3, -3, -4, -4, -3, -3, -3, -4, -4]
catastrophic -2.2 2.22711 [-3, -2, -4, -4, -4, -3, -2, -4, 2, 2]
cautious -0.4 0.66332 [0, 1, -1, 0, 0, 0, -1, -1, -1, -1]
celebrate 2.7 1.00499 [4, 4, 3, 2, 4, 2, 2, 2, 3, 1]
celebrated 2.7 0.78102 [2, 3, 3, 2, 3, 4, 3, 3, 1, 3]
celebrates 2.7 0.64031 [2, 3, 3, 2, 2, 3, 3, 3, 4, 2]
celebrating 2.7 0.64031 [3, 3, 4, 2, 2, 2, 3, 3, 2, 3]
censor -2.0 1.34164 [0, -3, -2, -3, -3, 0, -4, -1, -1, -3]
censored -0.6 1.68523 [-1, -1, -1, 2, -3, -2, -1, -1, -1, 3]
censors -1.2 1.07703 [-1, 0, -3, 0, -1, 0, -1, -2, -1, -3]
certain 1.1 0.7 [1, 0, 2, 0, 2, 2, 1, 1, 1, 1]
certainly 1.4 1.0198 [3, 2, 0, 1, 3, 1, 0, 1, 1, 2]
certainties 0.9 1.44568 [0, -2, 4, 0, 1, 1, 1, 1, 2, 1]
certainty 1.0 0.89443 [2, 1, 0, 1, 0, 0, 2, 2, 2, 0]
chagrin -1.9 0.53852 [-1, -2, -3, -2, -1, -2, -2, -2, -2, -2]
chagrined -1.4 1.2 [-1, -2, 2, -1, -2, -2, -2, -2, -2, -2]
challenge 0.3 1.00499 [1, 0, -1, 1, 1, -1, 1, 0, 2, -1]
challenged -0.4 1.62481 [0, -2, 1, -1, -3, -1, 3, -1, 1, -1]
challenger 0.5 1.43178 [0, 0, 2, -1, -2, 1, 3, 0, 2, 0]
challengers 0.4 1.56205 [0, -2, -1, 1, 1, 2, 3, 2, -1, -1]
challenges 0.3 1.48661 [0, -1, 2, -1, -2, 0, 3, 0, 2, 0]
challenging 0.6 0.91652 [0, 0, 0, 1, 1, -1, 0, 2, 2, 1]
challengingly -0.6 1.68523 [0, -1, -2, 1, -3, 2, -2, -1, 2, -2]
champ 2.1 0.83066 [2, 2, 2, 3, 2, 3, 2, 0, 3, 2]
champac -0.2 0.6 [0, 0, -2, 0, 0, 0, 0, 0, 0, 0]
champagne 1.2 1.07703 [1, 2, 2, 3, 0, 2, 0, 0, 2, 0]
champagnes 0.5 0.92195 [0, 0, 0, 0, 0, 1, 1, 3, 0, 0]
champaign 0.2 0.6 [0, 0, 0, 0, 2, 0, 0, 0, 0, 0]
champaigns 0.5 0.67082 [1, 0, 0, 0, 0, 0, 0, 1, 2, 1]
champaks -0.2 0.6 [0, 0, 0, 0, 0, 0, -2, 0, 0, 0]
champed 1.0 0.63246 [1, 1, 2, 1, 1, 2, 1, 0, 0, 1]
champer -0.1 0.53852 [0, -1, 1, 0, 0, 0, 0, -1, 0, 0]
champers 0.5 0.67082 [1, 0, 0, 0, 0, 0, 0, 1, 1, 2]
champerties -0.1 0.83066 [0, -1, 1, 1, 0, 0, 0, 0, -2, 0]
champertous 0.3 0.78102 [0, 0, 0, 1, -1, 2, 1, 0, 0, 0]
champerty -0.2 1.32665 [-2, -1, 0, -1, 0, 0, 0, -2, 2, 2]
champignon 0.4 0.8 [0, 0, 0, 0, 0, 2, 0, 2, 0, 0]
champignons 0.2 0.6 [0, 2, 0, 0, 0, 0, 0, 0, 0, 0]
champing 0.7 1.34536 [0, 2, 0, 3, 1, 1, 2, 0, -2, 0]
champion 2.9 0.83066 [3, 2, 3, 4, 4, 3, 2, 2, 4, 2]
championed 1.2 1.53623 [2, 1, 3, 1, 1, -3, 2, 2, 1, 2]
championing 1.8 0.9798 [1, 3, 2, 0, 3, 1, 2, 2, 1, 3]
champions 2.4 1.42829 [4, 0, 0, 3, 1, 3, 4, 3, 3, 3]
championship 1.9 1.04403 [3, 1, 1, 3, 2, 1, 3, 3, 0, 2]
championships 2.2 0.74833 [2, 2, 1, 2, 3, 2, 4, 2, 2, 2]
champs 1.8 0.4 [2, 2, 2, 2, 1, 2, 1, 2, 2, 2]
champy 1.0 1.0 [3, 0, 0, 0, 0, 2, 1, 2, 1, 1]
chance 1.0 0.7746 [1, 1, 0, 0, 0, 2, 1, 2, 1, 2]
chances 0.8 0.4 [0, 1, 1, 0, 1, 1, 1, 1, 1, 1]
chaos -2.7 0.9 [-2, -2, -3, -1, -4, -3, -3, -2, -3, -4]
chaotic -2.2 1.4 [-3, -2, -1, -2, -3, 1, -2, -2, -4, -4]
charged -0.8 0.87178 [-1, -2, -2, -1, -1, 0, -1, 1, 0, -1]
charges -1.1 0.7 [-2, -2, -2, -1, -1, 0, -1, -1, 0, -1]
charitable 1.7 0.64031 [1, 2, 1, 2, 2, 1, 2, 1, 3, 2]
charitableness 1.9 0.9434 [3, 1, 1, 3, 1, 3, 3, 2, 1, 1]
charitablenesses 1.6 1.74356 [2, 2, 3, 4, 1, -1, -2, 3, 2, 2]
charitably 1.4 0.66332 [1, 2, 1, 2, 2, 1, 0, 1, 2, 2]
charities 2.2 0.6 [3, 3, 2, 2, 1, 2, 2, 3, 2, 2]
charity 1.8 0.87178 [1, 3, 2, 2, 2, 1, 2, 0, 2, 3]
charm 1.7 0.78102 [3, 1, 1, 3, 2, 2, 1, 1, 1, 2]
charmed 2.0 0.63246 [3, 1, 2, 2, 2, 3, 1, 2, 2, 2]
charmer 1.9 0.53852 [3, 2, 2, 2, 2, 2, 1, 1, 2, 2]
charmers 2.1 0.83066 [2, 1, 2, 2, 4, 3, 2, 1, 2, 2]
charmeuse 0.3 0.78102 [0, 0, 0, 1, 0, 2, 1, 0, -1, 0]
charmeuses 0.4 0.66332 [0, 0, 1, 0, 1, 0, 0, 0, 0, 2]
charming 2.8 0.4 [3, 3, 3, 3, 3, 3, 2, 3, 2, 3]
charminger 1.5 0.67082 [2, 3, 1, 2, 1, 1, 2, 1, 1, 1]
charmingest 2.4 0.66332 [2, 3, 3, 1, 3, 2, 3, 3, 2, 2]
charmingly 2.2 0.87178 [2, 2, 2, 1, 2, 2, 3, 3, 4, 1]
charmless -1.8 0.87178 [-3, -1, -3, -1, -1, -1, -2, -1, -3, -2]
charms 1.9 0.7 [1, 2, 3, 2, 1, 2, 3, 1, 2, 2]
chastise -2.5 0.92195 [-4, -3, -2, -1, -4, -3, -2, -2, -2, -2]
chastised -2.2 1.16619 [-2, -3, -2, -4, -1, -1, -3, 0, -3, -3]
chastises -1.7 1.61555 [-3, -3, -3, -1, 1, -2, 1, -1, -2, -4]
chastising -1.7 0.78102 [-2, -3, -2, -2, -2, 0, -1, -1, -2, -2]
cheat -2.0 0.7746 [-2, -3, -3, -2, -2, -1, -1, -1, -2, -3]
cheated -2.3 0.64031 [-2, -4, -2, -2, -2, -2, -3, -2, -2, -2]
cheater -2.5 0.67082 [-2, -4, -2, -3, -2, -2, -3, -2, -3, -2]
cheaters -1.9 0.83066 [-2, -2, -2, -1, -1, -4, -2, -1, -2, -2]
cheating -2.6 0.91652 [-2, -3, -3, -2, -4, -4, -3, -2, -1, -2]
cheats -1.8 0.6 [-3, -1, -2, -1, -2, -1, -2, -2, -2, -2]
cheer 2.3 0.64031 [2, 1, 2, 2, 2, 3, 3, 3, 2, 3]
cheered 2.3 0.78102 [2, 3, 3, 4, 2, 1, 2, 2, 2, 2]
cheerer 1.7 0.45826 [1, 2, 2, 2, 1, 1, 2, 2, 2, 2]
cheerers 1.8 0.87178 [2, 2, 3, 2, 1, 2, 0, 1, 3, 2]
cheerful 2.5 0.67082 [3, 2, 3, 2, 2, 2, 4, 2, 3, 2]
cheerfuller 1.9 0.83066 [3, 3, 2, 3, 2, 1, 1, 2, 1, 1]
cheerfullest 3.2 0.87178 [4, 4, 4, 4, 3, 2, 2, 3, 2, 4]
cheerfully 2.1 0.83066 [3, 2, 2, 2, 1, 3, 1, 3, 1, 3]
cheerfulness 2.1 0.9434 [3, 2, 1, 2, 3, 4, 1, 2, 1, 2]
cheerier 2.6 0.4899 [2, 2, 3, 3, 2, 3, 3, 2, 3, 3]
cheeriest 2.2 0.6 [3, 2, 3, 1, 2, 2, 3, 2, 2, 2]
cheerily 2.5 0.67082 [3, 3, 2, 3, 2, 4, 2, 2, 2, 2]
cheeriness 2.5 0.67082 [3, 2, 4, 2, 3, 2, 3, 2, 2, 2]
cheering 2.3 0.64031 [3, 3, 2, 1, 3, 2, 2, 2, 3, 2]
cheerio 1.2 0.6 [2, 1, 1, 1, 2, 1, 1, 1, 2, 0]
cheerlead 1.7 0.78102 [1, 2, 0, 2, 2, 2, 2, 3, 1, 2]
cheerleader 0.9 0.9434 [1, 1, 0, 2, 1, 0, 0, 1, 0, 3]
cheerleaders 1.2 1.07703 [2, 0, 0, 1, 1, 0, 3, 3, 1, 1]
cheerleading 1.2 1.07703 [2, 2, 0, 0, 1, 0, 3, 2, 0, 2]
cheerleads 1.2 1.07703 [2, 3, 0, 3, 1, 0, 0, 1, 1, 1]
cheerled 1.5 1.11803 [0, 2, 1, 4, 2, 2, 2, 1, 1, 0]
cheerless -1.7 1.1 [-2, -3, -2, -2, -3, -2, -1, -1, 1, -2]
cheerlessly -0.8 1.98997 [-2, 4, -1, -2, -1, -2, -2, -2, 2, -2]
cheerlessness -1.7 1.48661 [-2, -1, -2, -3, -2, -4, -1, 2, -2, -2]
cheerly 2.4 0.66332 [2, 2, 3, 2, 2, 3, 4, 2, 2, 2]
cheers 2.1 1.3 [2, 2, 1, 3, 2, 3, 3, 4, -1, 2]
cheery 2.6 0.66332 [3, 2, 2, 3, 2, 3, 4, 2, 3, 2]
cherish 1.6 1.49666 [0, 3, 3, 3, 2, 2, 2, 1, -2, 2]
cherishable 2.0 1.41421 [-2, 2, 2, 2, 3, 2, 3, 3, 2, 3]
cherished 2.3 0.64031 [3, 2, 2, 3, 2, 2, 1, 3, 2, 3]
cherisher 2.2 0.4 [2, 2, 3, 2, 2, 2, 2, 3, 2, 2]
cherishers 1.9 0.7 [3, 3, 2, 2, 1, 1, 2, 2, 2, 1]
cherishes 2.2 0.74833 [2, 2, 3, 2, 2, 2, 2, 4, 2, 1]
cherishing 2.0 0.7746 [3, 3, 2, 2, 1, 2, 1, 3, 2, 1]
chic 1.1 1.3 [1, 2, 2, -2, 2, 0, 1, 1, 3, 1]
childish -1.2 0.74833 [-1, -1, -2, -3, -1, 0, -1, -1, -1, -1]
chilling -0.1 1.92094 [3, -2, 0, 1, -2, -2, -1, -2, 1, 3]
choke -2.5 0.92195 [-1, -2, -3, -3, -2, -4, -2, -4, -2, -2]
choked -2.1 1.3 [-4, -3, 0, -2, -1, -3, -3, -2, 0, -3]
chokes -2.0 0.89443 [-4, -3, -1, -2, -1, -2, -2, -2, -1, -2]
choking -2.0 1.26491 [-4, -2, -2, -3, -2, -2, -3, -1, 1, -2]
chuckle 1.7 0.45826 [2, 1, 2, 2, 2, 2, 1, 1, 2, 2]
chuckled 1.2 0.9798 [2, 2, 1, 1, 2, 0, 1, 2, -1, 2]
chucklehead -1.9 0.53852 [-2, -2, -1, -3, -2, -2, -2, -2, -1, -2]
chuckleheaded -1.3 1.84662 [-3, -4, -2, 0, 3, -1, -2, 0, -2, -2]
chuckleheads -1.1 0.9434 [-1, -2, 0, -1, -1, -3, 0, 0, -2, -1]
chuckler 0.8 1.07703 [2, 1, -1, 0, 2, 1, 1, 2, -1, 1]
chucklers 1.2 0.87178 [1, 1, 2, 3, 1, 0, 1, 0, 2, 1]
chuckles 1.1 1.13578 [2, 2, -1, 1, 2, 1, 1, 2, -1, 2]
chucklesome 1.1 0.53852 [1, 1, 2, 1, 1, 1, 0, 2, 1, 1]
chuckling 1.4 0.4899 [1, 2, 1, 2, 1, 1, 2, 2, 1, 1]
chucklingly 1.2 0.4 [1, 1, 1, 1, 2, 1, 1, 1, 2, 1]
clarifies 0.9 1.13578 [-2, 1, 0, 2, 1, 2, 2, 1, 1, 1]
clarity 1.7 0.78102 [2, 1, 2, 3, 3, 1, 1, | |
# coding: utf-8
# Plotting script for TREACTMECH files written by <NAME> - hr0392 at bristol.ac.uk
#
# Run the script within the directory containing the flowdata, flowvector, stress strain, displacement files. Output by default is within same directory.
#
# Displacement gives the corner nodes, everything else gives the centre of the cells.
#
#
#
import pandas as pd
import os
import numpy as np
import matplotlib.dates as mdates
import datetime
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
import sys
from trexoptions import * #import the option file from within the same folder
cwd = os.getcwd()
def flowdata_import():
"""
Imports the flowdata file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'flowfaces' that contains the flowdata for each of the default and user specificed faces.
"""
flowdata=pd.read_csv(cwd+'/flowdata.tec',sep=r"\s*",skiprows=[0],engine='python')
flowdata_modified= flowdata[flowdata.columns[:-1]]
flowdata_modified.columns = flowdata.columns[1:]
flowdata=flowdata_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y", '"Z(m)"':"Z", '"P(Pa)"':"Pressure(Pa)", '"T(C)"':"Temperature(C)",
'"SatGas"':"SatGas",'"SatLiq"':"SatLiq",'"X1"':"X1", '"X2"':"X2", '"Pcap(Pa)"':"Pcap", '"DGas_kg/m3"':"DGas_kg_m3",
'"DLiq_kg/m3"':"DLiq_kg_m3", '"Porosity"':"Porosity", '"Perm_X(m2)"':"Perm_X(m2)", '"Perm_Y(m2)"':"Perm_Y(m2)",
'"Perm_Z(m2)"':"Perm_Z(m2)", '"Krel_Gas"':"Krel_Gas", '"Krel_Liq"':"Krel_Liq", '"HGas(J/kg)"':"HGas(J_kg)",
'"HLiq(J/kg)"':"HLiq(J_kg)", '"Cp(J/kg/C)"':"Cp(J_kg_C)", '"TC(W/m/C)"':"TC(W_m_C)", '"DBlk_kg/m3"':"DBlk_kg_m3",
'"Tdif(m2/s)"':"Tdif(m2_s)"})
#Last time step - top, bottom, side walls
val=int(flowdata.loc[flowdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(flowdata.index[-1])
length=lastval - val #length of last time zone
zone=flowdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
flowfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
flowfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
flowfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
flowfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return flowfaces
def flowvector_import():
"""
Imports the flowvector file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'vecfaces' that contains the vector data for each of the default and user specificed faces.
"""
flowvector=pd.read_csv(cwd+'/flowvector.tec',sep=r"\s*",skiprows=[0],engine='python')
flowvector_modified= flowvector[flowvector.columns[:-1]]
flowvector_modified.columns = flowvector.columns[1:]
flowvector=flowvector_modified.rename(index=str,columns={'"X(m)"':"X", '"Y(m)"':"Y",'"Z(m)"':"Z",
'"FluxLiq"':"FluxLiq", '"FluxLiq_X"':"FluxLiq_X",'"FluxLiq_Y"':"FluxLiq_Y", '"FluxLiq_Z"':"FluxLiq_Z",
'"PorVelLiq"':"PorVelLiq", '"PorVelLiqX"':"PorVelLiqX",'"PorVelLiqY"':"PorVelLiqY", '"PorVelLiqZ"':"PorVelLiqZ",
'"FluxGas"':"FluxGas",'"FluxGas_X"':"FluxGas_X",'"FluxGas_Y"':"FluxGas_Y", '"FluxGas_Z"':"FluxGas_Z",
'"PorVelGas"':"PorVelGas",'"PorVelGasX"':"PorVelGasX",'"PorVelGasY"':"PorVelGasY", '"PorVelGasZ"':"PorVelGasZ",
'"HeatFlux"':"HeatFlux", '"HeatFlux_X"':"HeatFlux_X",'"HeatFlux_Y"':"HeatFlux_Y", '"HeatFlux_Z"':"HeatFlux_Z"})
val=int(flowvector.loc[flowvector["X"] == 'Zone'][-1:].index[0])
lastval=int(flowvector.index[-1])
length=lastval - val
zone=flowvector[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
vecfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
vecfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
vecfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
vecfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return vecfaces
def displace_import():
"""
Imports the displacement file from current working directory. Column names are largely preserved. Takes in only the last time step values.
Returns a dictionary 'dispfaces' that contains the vector data for each of the default and user specificed faces.
Note I added one to xsec user and half values as you get an extra datapoint for displacement output files.
"""
column_names=["X","Y","Z","Disp_x","Disp_y","Disp_z"]
displace=pd.read_csv(cwd+'/displacement.tec',sep=r"\s+",skiprows=[0,1],usecols=[0,1,2,3,4,5],
names=column_names,engine='python')
val=int(displace.loc[displace["X"] == 'Zone'][-1:].index[0])
lastval=int(displace.index[-1])
length=lastval - val
zone=displace[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z)
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)+1]],zone.Y.unique()[int(len(zone.Y.unique())/2)+1]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)+1]],zone.X.unique()[int(len(zone.X.unique())/2)+1]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)+1]],zone.Z.unique()[int(len(zone.Z.unique())/2)+1]
dispfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True: #added one to xsec half values as you get an extra datapoint
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]+1]
dispfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]+1]
dispfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
dispfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return dispfaces
def aq_conc_import():
"""
Imports the aq_conc file
"""
aqconcdata=pd.read_csv(cwd+'/aqconc.tec',sep=r"\s*",skiprows=[0],engine='python')
aqconcdata_modified= aqconcdata[aqconcdata.columns[:-1]]
aqconcdata_modified.columns = aqconcdata.columns[1:]
aqconcdata=aqconcdata_modified.rename(index=str,columns=aqconc_name)
#Last time step - top, bottom, side walls
val=int(aqconcdata.loc[aqconcdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(aqconcdata.index[-1])
length=lastval - val #length of last time zone
zone=aqconcdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
aqconcfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
aqconcfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
aqconcfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
aqconcfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return aqconcfaces
def gas_volfrac_import():
"""
Imports the gas_volfrac file
Why? See https://stackoverflow.com/questions/18039057/python-pandas-error-tokenizing-data
I need the 'bad lines' - specifically the time-step lines which are 11 values long and wanted to re-use other code that reads in those
lines for simplicity sake. Theres probably a much cleaner pandas import rule that could have been applied to all file Imports
that could be worked up fairly quickly... All the importing functions Im sure could be better/cleaner but y'know....
BE CAREFUL and make sure you wipe the written-in header in the .tec file if you go chaning stuff in here. The function will write a new header if the
gas_volfrac_name dictionary changes at all. Also because I'm writing inplace for simplicity I make a backup .tec file by default for caution
"""
with open(cwd+'/gas_volfrac.tec', 'r') as original: data = original.read()
header=str([i for i in gas_volfrac_name.values()]).strip('[]').replace(',','')
print (header)
print (data[0:len(header)])
if data[0:len(header)]!=header:
with open(cwd+'/gas_volfrac.tec', 'w') as modified: modified.write(header + "\n" + data)
gas_volfracdata=pd.read_csv(cwd+'/gas_volfrac.tec',sep=r"\s*",skiprows=[2],engine='python')
gas_volfracdata=gas_volfracdata.rename(columns=gas_volfrac_name) #fit the column name values with the dictionary
#Last time step - top, bottom, side walls
val=int(gas_volfracdata.loc[gas_volfracdata["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(gas_volfracdata.index[-1])
length=lastval - val #length of last time zone
zone=gas_volfracdata[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
gas_volfracfaces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
gas_volfracfaces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
gas_volfracfaces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
gas_volfracfaces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return gas_volfracfaces
def mineral_ab_import():
"""
Imports the mineral.tec file - mineral Abundances
"""
mineral_ab_data=pd.read_csv(cwd+'/mineral.tec',sep=r"\s*",skiprows=[0],engine='python')
mineral_ab_data_modified= mineral_ab_data[mineral_ab_data.columns[:-1]]
mineral_ab_data_modified.columns = mineral_ab_data.columns[1:]
mineral_ab_data=mineral_ab_data_modified.rename(index=str,columns=min_ab_name)
#Last time step - top, bottom, side walls
val=int(mineral_ab_data.loc[mineral_ab_data["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(mineral_ab_data.index[-1])
length=lastval - val #length of last time zone
zone=mineral_ab_data[val+1:lastval+1]
zone[zone.columns] = zone[zone.columns].apply(pd.to_numeric, errors='ignore', downcast='float')
top,tpval =zone.loc[zone["Z"] == max(zone.Z) ],max(zone.Z) #2D array of the top surface
bot,btval =zone.loc[zone["Z"] == min(zone.Z) ],min(zone.Z)#bottom surface
MaxY,MxYval=zone.loc[zone["Y"] == max(zone.Y) ],max(zone.Y)#MaxY face
MinY,MnYval=zone.loc[zone["Y"] == min(zone.Y) ],min(zone.Y)#MinY face
MaxX,MxXval=zone.loc[zone["X"] == max(zone.X) ],max(zone.X)#MaxX face
MinX,MnXval=zone.loc[zone["X"] == min(zone.X) ],min(zone.X)#MinX face
xsec_x,xsec_x_val=zone.loc[zone["Y"] == zone.Y.unique()[int(len(zone.Y.unique())/2)]],zone.Y.unique()[int(len(zone.Y.unique())/2)]
xsec_y,xsec_y_val=zone.loc[zone["X"] == zone.X.unique()[int(len(zone.X.unique())/2)]],zone.X.unique()[int(len(zone.X.unique())/2)]
xsec_z,xsec_z_val=zone.loc[zone["Z"] == zone.Z.unique()[int(len(zone.Z.unique())/2)]],zone.Z.unique()[int(len(zone.Z.unique())/2)]
mineral_ab_faces={'Top':top,'Bot':bot,'Max-Y':MaxY,'Min-Y':MinY,'Max-X':MaxX,'Min-X':MinX,
'tpval' : tpval, 'btval' : btval, 'MxYval' : MxYval, 'MnYval' : MnYval,
'MxXval' : MxXval, 'MnXval' : MnXval,'xsec_x_half':xsec_x,'xsec_x_half_val':xsec_x_val,
'xsec_y_half':xsec_y,'xsec_y_val_half':xsec_y_val}
if op_xsec_X_user == True:
for i in list(range(len(xsec_user_xvals))):
xsec_x_user,xsec_x_user_val=zone.loc[zone["Y"] == zone.Y.unique()[xsec_user_xvals[i]]],zone.Y.unique()[xsec_user_xvals[i]]
mineral_ab_faces.update({'xsec_x_user_'+str(xsec_user_xvals[i]):xsec_x_user,'xsec_x_user_val'+str(xsec_user_xvals[i]):xsec_x_user_val})
if op_xsec_Y_user == True:
for i in list(range(len(xsec_user_yvals))):
xsec_y_user,xsec_y_user_val=zone.loc[zone["X"] == zone.X.unique()[xsec_user_yvals[i]]],zone.X.unique()[xsec_user_yvals[i]]
mineral_ab_faces.update({'xsec_y_user_'+str(xsec_user_yvals[i]):xsec_y_user,'xsec_y_user_val'+str(xsec_user_yvals[i]):xsec_y_user_val})
if op_xsec_Z_user == True:
for i in list(range(len(xsec_user_zvals))):
xsec_z_user,xsec_z_user_val=zone.loc[zone["Z"] == zone.Z.unique()[xsec_user_zvals[i]]],zone.Z.unique()[xsec_user_zvals[i]]
mineral_ab_faces.update({'xsec_z_user_'+str(xsec_user_zvals[i]):xsec_z_user,'xsec_z_user_val'+str(xsec_user_zvals[i]):xsec_z_user_val})
return mineral_ab_faces
def mineral_si_import():
"""
Imports the min_SI.tec file - mineral saturation index
"""
mineral_si_data=pd.read_csv(cwd+'/mineral.tec',sep=r"\s*",skiprows=[0],engine='python')
mineral_si_data_modified= mineral_si_data[mineral_si_data.columns[:-1]]
mineral_si_data_modified.columns = mineral_si_data.columns[1:]
mineral_si_data=mineral_si_data_modified.rename(index=str,columns=min_si_name)
#Last time step - top, bottom, side walls
val=int(mineral_si_data.loc[mineral_si_data["X"] == 'Zone'][-1:].index[0])#value of the last time zone
lastval=int(mineral_si_data.index[-1])
length=lastval - val #length | |
<filename>general/apero-drs/database_test/database/test_calibdb.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2020-05-21
@author: cook
"""
from astropy.table import Table
import numpy as np
import pandas as pd
import sqlite3
import sys
import shutil
class Database:
'''A wrapper for an SQLite database.'''
def __init__(self, path, verbose=False):
'''
Create an object for reading and writing to a SQLite database.
:param path: the location on disk of the database.
This may be :memory: to create a temporary in-memory
database which will not be saved when the program closes.
'''
self._verbose_ = verbose
self._conn_ = sqlite3.connect(path)
self._cursor_ = self._conn_.cursor()
self.tables = []
self.path = path
self._update_table_list_()
return
def _infer_table_(self, table):
if table is None:
amsg = ("There are multiple tables in the database, "
"so you must set 'table' to pick one.")
assert len(self.tables) == 1, amsg
return self.tables[0]
return table
def _commit(self):
self._conn_.commit()
def execute(self, command, return_cursor=False):
'''
Directly execute an SQL command on the database and return
any results.
:param command: The SQL command to be run.
Returns:
The outputs of the command, if any, as a list.
'''
if self._verbose_:
print("SQL INPUT: ", command)
cursor = self._cursor_.execute(command)
result = self._cursor_.fetchall()
if self._verbose_:
print("SQL OUTPUT:", result)
if return_cursor:
return result, cursor
else:
return result
def _update_table_list_(self):
'''
Reads the database for tables and updates the class members
accordingly.
'''
# Get the new list of tables
command = 'SELECT name from sqlite_master where type= "table"'
self.tables = self.execute(command)
self.tables = [i[0] for i in self.tables]
self._commit()
return
def add_table(self, name, field_names, field_types):
'''
Adds a table to the database file.
:param name: The name of the table to create. This must not already be
in use or a SQL keyword.
:param field_names: The names of the fields (columns) in the table as a
list of str objects. These can't be SQL keywords.
:param field_types: The data types of the fields as a list. The list
can contain either SQL type specifiers or the
python int, str, and float types.
Examples:
# "REAL" does the same thing as float
db.addTable('planets', ['name', 'mass', 'radius'],
[str, float, "REAL"])
'''
translator = {str: "TEXT", int: "INTEGER", float: "REAL"}
fields = []
for n, t in zip(field_names, field_types):
assert type(n) is str
if type(t) is type:
t = translator[t]
else:
assert type(t) is str
fields.append(n + ' ' + t)
cargs = [name, ", ".join(fields)]
command = "CREATE TABLE IF NOT EXISTS {}({});".format(*cargs)
self.execute(command)
self._update_table_list_()
self._commit()
return
def delete_table(self, name):
'''
Deletes a table from the database, erasing all contained data
permenantly!
:param name: The name of the table to be deleted.
See Database.tables for a list of eligible tables.
'''
self.execute("DROP TABLE {}".format(name))
self._update_table_list_()
self._commit()
return
def rename_table(self, oldName, newName):
'''Renames a table.
:param oldName: The name of the table to be deleted. See Database.tables
for a list of eligible tables.
:param newName: The new name of the table. This must not be already
taken or an SQL keyword.
'''
self.execute("ALTER TABLE {} RENAME TO {}".format(oldName, newName))
self._commit()
return
def get(self, columns='*', table=None, condition=None, sort_by=None,
sort_descending=True, max_rows=None, return_array=False,
return_table=False, return_pandas=False):
'''
Retrieves data from the database with a variety of options.
:param columns: a string containing the comma-separated columns to
retrieve from the database. You may also apply basic
math functions and aggregators to the columns
( see examples below).
"*" retrieves all available columns.
:param table: A str which specifies which table within the database to
retrieve data from. If there is only one table to pick
from, this may be left as None to use it automatically.
:param condition: Filter results using a SQL conditions string
-- see examples, and possibly this
useful tutorial:
https://www.sqlitetutorial.net/sqlite-where/.
If None, no results will be filtered out.
:param sort_by: A str to sort the results by, which may be a column name
or simple functions thereof. If None, the results are not
sorted.
:param sort_descending: Whether to sort the outputs ascending or
descending. This has no effect if sortBy is
set to None.
:param max_rows: The number of rows to truncate the output to.
If this is None, all matching rows are returned.
:param returnAray: Whether to transform the results into a numpy array.
This works well only when the outputs all have the
same type, so it is off by default.
:returns:
The requested data (if any) filtered, sorted, and truncated
according to the arguments. The format is a list of rows containing
a tuple of columns, unless returnArray is True, in which case the
output is a numpy array.
Examples:
# Returns the entire table (if there is only one)
db.get()
# Returns the planet density, sorted descending by radius.
db.get("mass / (radius*radius*radius)", sortBy="radius")
# Returns the full row of the largest planet.
db.get(sortBy="radius", maxRows=1)
# Returns all planets of sufficient mass and radius.
db.get(condition="mass > 1 and radius > 1")
# Returns the names of the five largest planets.
db.get("name", sortBy="radius", maxRows=5)
'''
table = self._infer_table_(table)
command = "SELECT {} from {}".format(columns, table)
if condition is not None:
assert type(condition) is str
command += " WHERE {} ".format(condition)
if sort_by is not None:
command += " ORDER BY {} ".format(sort_by)
if sort_descending:
command += "DESC"
else:
command += "ASC"
if max_rows is not None:
assert type(max_rows) is int
command += " LIMIT {}".format(max_rows)
if return_pandas:
return self._to_pandas(command)
else:
result, cursor = self.execute(command, return_cursor=True)
if return_array:
return np.asarray(result)
if return_table:
return self._to_table(cursor, result)
return result
def add_row(self, values, table=None, columns="*", commit=True):
'''
Adds a row to the specified tables with the given values.
:param values: an iterable of the values to fill into the new row.
:param table: A str which specifies which table within the database
to retrieve data from. If there is only one table to
pick from, this may be left as None to use it
automatically.
:param columns: If you only want to initialize some of the columns,
you may list them here. Otherwise, '*' indicates that
all columns will be initialized.
'''
table = self._infer_table_(table)
_values = []
for i in values:
if not isinstance(i, str):
_values.append(str(i))
else:
_values.append('"{0}"'.format(i))
if columns == '*':
columns = ''
else:
columns = "(" + ", ".join(columns) + ")"
cargs = [table, columns, ', '.join(_values)]
command = "INSERT INTO {}{} VALUES({})".format(*cargs)
self.execute(command)
if commit:
self._commit()
return
def set(self, columns, values, condition, table=None):
'''
Changes the data in existing rows.
:param columns: The names of the columns to be changed, as a list of
strings. If there is only one column to change, it
can just be a string.
:param values: The values to set the columns to as a list. This must be
the same length as columns, and can consist either of a
str, float or int. Alternatively, a bytestring can be
given to set the value to the result of a SQL statement
given by the bytestring. If there is only one value,
putting in a list is optional.
:param condition: An SQL condition string to identify the rows to be
modified. This may be set to None to apply the
modification to all rows.
Examples:
# Sets the mass of a particular row identified by name.
db.set('mass', 1., 'name="HD 80606 b"')
# Increments the value of 'counts' for all rows
db.set('counts', b'counts+1', None)
# Resets all mass and radius values to null
db.set(['mass', 'radius'], [b'null', b'null'], None)
'''
if type(columns) is str:
columns = [columns]
if type(values) is not list:
values = [values]
table = self._infer_table_(table)
setStr = []
assert len(columns) == len(values)
for c, v in zip(columns, values):
assert type(c) is str, "The column to set must be a string."
if type(v) is bytes:
setStr.append(c + " = " + v.decode("utf-8"))
elif type(v) is str:
setStr.append(c + ' = "' + v + '"')
else:
setStr.append(c + " = " + str(v))
command = "UPDATE {} SET {}".format(table, | |
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
if not right_is_scalar:
# TODO: generate indices without broadcasting arrays (see test_equal comment)
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(func_name, res.shape, shape, repr_name=f"{res_name}.shape")
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Compare against the Python & operator.
if res.dtype == xp.bool:
for idx in sh.ndindex(res.shape):
s_left = bool(_left[idx])
s_right = bool(_right[idx])
s_res = bool(res[idx])
assert (s_left and s_right) == s_res
else:
for idx in sh.ndindex(res.shape):
s_left = int(_left[idx])
s_right = int(_right[idx])
s_res = int(res[idx])
s_and = ah.int_to_dtype(
s_left & s_right,
dh.dtype_nbits[res.dtype],
dh.dtype_signed[res.dtype],
)
assert s_and == s_res
@pytest.mark.parametrize(
binary_argnames, make_binary_params("bitwise_left_shift", dh.all_int_dtypes)
)
@given(data=st.data())
def test_bitwise_left_shift(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
if right_is_scalar:
assume(right >= 0)
else:
assume(not ah.any(ah.isnegative(right)))
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
if not right_is_scalar:
# TODO: generate indices without broadcasting arrays (see test_equal comment)
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(func_name, res.shape, shape, repr_name=f"{res_name}.shape")
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Compare against the Python << operator.
for idx in sh.ndindex(res.shape):
s_left = int(_left[idx])
s_right = int(_right[idx])
s_res = int(res[idx])
s_shift = ah.int_to_dtype(
# We avoid shifting very large ints
s_left << s_right if s_right < dh.dtype_nbits[res.dtype] else 0,
dh.dtype_nbits[res.dtype],
dh.dtype_signed[res.dtype],
)
assert s_shift == s_res
@pytest.mark.parametrize(
unary_argnames, make_unary_params("bitwise_invert", dh.bool_and_all_int_dtypes)
)
@given(data=st.data())
def test_bitwise_invert(func_name, func, strat, data):
x = data.draw(strat, label="x")
out = func(x)
ph.assert_dtype(func_name, x.dtype, out.dtype)
ph.assert_shape(func_name, out.shape, x.shape)
# Compare against the Python ~ operator.
if out.dtype == xp.bool:
for idx in sh.ndindex(out.shape):
s_x = bool(x[idx])
s_out = bool(out[idx])
assert (not s_x) == s_out
else:
for idx in sh.ndindex(out.shape):
s_x = int(x[idx])
s_out = int(out[idx])
s_invert = ah.int_to_dtype(
~s_x, dh.dtype_nbits[out.dtype], dh.dtype_signed[out.dtype]
)
assert s_invert == s_out
@pytest.mark.parametrize(
binary_argnames, make_binary_params("bitwise_or", dh.bool_and_all_int_dtypes)
)
@given(data=st.data())
def test_bitwise_or(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
if not right_is_scalar:
# TODO: generate indices without broadcasting arrays (see test_equal comment)
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(func_name, res.shape, shape, repr_name=f"{res_name}.shape")
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Compare against the Python | operator.
if res.dtype == xp.bool:
for idx in sh.ndindex(res.shape):
s_left = bool(_left[idx])
s_right = bool(_right[idx])
s_res = bool(res[idx])
assert (s_left or s_right) == s_res
else:
for idx in sh.ndindex(res.shape):
s_left = int(_left[idx])
s_right = int(_right[idx])
s_res = int(res[idx])
s_or = ah.int_to_dtype(
s_left | s_right,
dh.dtype_nbits[res.dtype],
dh.dtype_signed[res.dtype],
)
assert s_or == s_res
@pytest.mark.parametrize(
binary_argnames, make_binary_params("bitwise_right_shift", dh.all_int_dtypes)
)
@given(data=st.data())
def test_bitwise_right_shift(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
if right_is_scalar:
assume(right >= 0)
else:
assume(not ah.any(ah.isnegative(right)))
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
if not right_is_scalar:
# TODO: generate indices without broadcasting arrays (see test_equal comment)
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(
"bitwise_right_shift", res.shape, shape, repr_name=f"{res_name}.shape"
)
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Compare against the Python >> operator.
for idx in sh.ndindex(res.shape):
s_left = int(_left[idx])
s_right = int(_right[idx])
s_res = int(res[idx])
s_shift = ah.int_to_dtype(
s_left >> s_right, dh.dtype_nbits[res.dtype], dh.dtype_signed[res.dtype]
)
assert s_shift == s_res
@pytest.mark.parametrize(
binary_argnames, make_binary_params("bitwise_xor", dh.bool_and_all_int_dtypes)
)
@given(data=st.data())
def test_bitwise_xor(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
if not right_is_scalar:
# TODO: generate indices without broadcasting arrays (see test_equal comment)
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(func_name, res.shape, shape, repr_name=f"{res_name}.shape")
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Compare against the Python ^ operator.
if res.dtype == xp.bool:
for idx in sh.ndindex(res.shape):
s_left = bool(_left[idx])
s_right = bool(_right[idx])
s_res = bool(res[idx])
assert (s_left ^ s_right) == s_res
else:
for idx in sh.ndindex(res.shape):
s_left = int(_left[idx])
s_right = int(_right[idx])
s_res = int(res[idx])
s_xor = ah.int_to_dtype(
s_left ^ s_right,
dh.dtype_nbits[res.dtype],
dh.dtype_signed[res.dtype],
)
assert s_xor == s_res
@given(xps.arrays(dtype=xps.numeric_dtypes(), shape=hh.shapes()))
def test_ceil(x):
# This test is almost identical to test_floor()
out = xp.ceil(x)
ph.assert_dtype("ceil", x.dtype, out.dtype)
ph.assert_shape("ceil", out.shape, x.shape)
finite = ah.isfinite(x)
ah.assert_integral(out[finite])
assert ah.all(ah.less_equal(x[finite], out[finite]))
assert ah.all(
ah.less_equal(out[finite] - x[finite], ah.one(x[finite].shape, x.dtype))
)
integers = ah.isintegral(x)
ah.assert_exactly_equal(out[integers], x[integers])
@given(xps.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes()))
def test_cos(x):
out = xp.cos(x)
ph.assert_dtype("cos", x.dtype, out.dtype)
ph.assert_shape("cos", out.shape, x.shape)
ONE = ah.one(x.shape, x.dtype)
INFINITY = ah.infinity(x.shape, x.dtype)
domain = ah.inrange(x, -INFINITY, INFINITY, open=True)
codomain = ah.inrange(out, -ONE, ONE)
# cos maps (-inf, inf) to [-1, 1]. Values outside this domain are mapped
# to nan, which is already tested in the special cases.
ah.assert_exactly_equal(domain, codomain)
@given(xps.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes()))
def test_cosh(x):
out = xp.cosh(x)
ph.assert_dtype("cosh", x.dtype, out.dtype)
ph.assert_shape("cosh", out.shape, x.shape)
INFINITY = ah.infinity(x.shape, x.dtype)
domain = ah.inrange(x, -INFINITY, INFINITY)
codomain = ah.inrange(out, -INFINITY, INFINITY)
# cosh maps [-inf, inf] to [-inf, inf]. Values outside this domain are
# mapped to nan, which is already tested in the special cases.
ah.assert_exactly_equal(domain, codomain)
@pytest.mark.parametrize(binary_argnames, make_binary_params("divide", dh.float_dtypes))
@given(data=st.data())
def test_divide(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
res = func(left, right)
assert_binary_param_dtype(func_name, left, right, right_is_scalar, res, res_name)
# There isn't much we can test here. The spec doesn't require any behavior
# beyond the special cases, and indeed, there aren't many mathematical
# properties of division that strictly hold for floating-point numbers. We
# could test that this does implement IEEE 754 division, but we don't yet
# have those sorts in general for this module.
@pytest.mark.parametrize(binary_argnames, make_binary_params("equal", dh.all_dtypes))
@given(data=st.data())
def test_equal(
func_name,
func,
left_sym,
left_strat,
right_sym,
right_strat,
right_is_scalar,
res_name,
data,
):
left = data.draw(left_strat, label=left_sym)
right = data.draw(right_strat, label=right_sym)
out = func(left, right)
assert_binary_param_dtype(
func_name, left, right, right_is_scalar, out, res_name, xp.bool
)
# NOTE: ah.assert_exactly_equal() itself uses ah.equal(), so we must be careful
# not to use it here. Otherwise, the test would be circular and
# meaningless. Instead, we implement this by iterating every element of
# the arrays and comparing them. The logic here is also used for the tests
# for the other elementwise functions that accept any input dtype but
# always return bool (greater(), greater_equal(), less(), less_equal(),
# and not_equal()).
if not right_is_scalar:
# First we broadcast the arrays so that they can be indexed uniformly.
# TODO: it should be possible to skip this step if we instead generate
# indices to x1 and x2 that correspond to the broadcasted shapes. This
# would avoid the dependence in this test on broadcast_to().
shape = broadcast_shapes(left.shape, right.shape)
ph.assert_shape(func_name, out.shape, shape)
_left = xp.broadcast_to(left, shape)
_right = xp.broadcast_to(right, shape)
# Second, manually promote the dtypes. This is important. If the internal
# type promotion in ah.equal() is wrong, it will not be directly visible in
# the output type, but it can lead to wrong answers. For example,
# ah.equal(array(1.0, dtype=xp.float32), array(1.00000001, dtype=xp.float64)) will
# be wrong if the float64 is downcast to float32. # be wrong if the
# xp.float64 is downcast to float32. See the comment on
# test_elementwise_function_two_arg_bool_type_promotion() in
# test_type_promotion.py. The type promotion for ah.equal() is not *really*
# tested in that file, because doing so requires doing the consistency
# check we do here rather than just checking the res dtype.
promoted_dtype = dh.promotion_table[left.dtype, right.dtype]
_left = ah.asarray(_left, dtype=promoted_dtype)
_right = ah.asarray(_right, dtype=promoted_dtype)
scalar_type = dh.get_scalar_type(promoted_dtype)
for idx in sh.ndindex(shape):
x1_idx = _left[idx]
x2_idx = _right[idx]
out_idx = out[idx]
assert out_idx.shape == x1_idx.shape == x2_idx.shape # sanity check
assert bool(out_idx) == (scalar_type(x1_idx) == scalar_type(x2_idx))
@given(xps.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes()))
def test_exp(x):
out = xp.exp(x)
ph.assert_dtype("exp", x.dtype, out.dtype)
ph.assert_shape("exp", out.shape, x.shape)
INFINITY = ah.infinity(x.shape, x.dtype)
ZERO = ah.zero(x.shape, x.dtype)
domain = ah.inrange(x, -INFINITY, INFINITY)
codomain = ah.inrange(out, ZERO, INFINITY)
# exp maps [-inf, inf] to [0, inf]. Values outside this domain are
# mapped to nan, which is already tested in the special cases.
ah.assert_exactly_equal(domain, codomain)
@given(xps.arrays(dtype=xps.floating_dtypes(), shape=hh.shapes()))
def test_expm1(x):
out = xp.expm1(x)
ph.assert_dtype("expm1", x.dtype, out.dtype)
ph.assert_shape("expm1", out.shape, x.shape)
INFINITY = ah.infinity(x.shape, x.dtype)
NEGONE = -ah.one(x.shape, x.dtype)
domain = ah.inrange(x, -INFINITY, INFINITY)
codomain = ah.inrange(out, NEGONE, INFINITY)
# expm1 maps [-inf, inf] | |
TAG_SEQ_CHIP_NAME,
"flows": TAG_SEQ_FLOWS,
"libraryKitName": TAG_SEQ_LIB_KIT_NAME,
"libraryReadLength": TAG_SEQ_LIBRARY_READ_LENGTH,
"reference": TAG_SEQ_REFERENCE,
"sampleGrouping": TAG_SEQ_SAMPLE_GROUPING,
"sequencekitname": TAG_SEQ_SEQ_KIT_NAME,
"templatingKitName": TAG_SEQ_TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
return sysTemplate, isCreated, isUpdated, templateParams
def add_or_update_immune_repertoire_clonality_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "540"
END_BARCODE_KIT_NAME = ""
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
# the follow templates will be create with same settings.
template_names = [
"Oncomine BCR IGHKL-SR - DNA",
"Oncomine BCR IGH FR3-J RF1 Assay",
"Oncomine BCR IGH FR2-J RF2 Assay",
"Oncomine TCR Pan-Clonality Assay"
]
for template_name in template_names:
FLOWS = 500 if template_name in ["Oncomine BCR IGHKL-SR - DNA",
"Oncomine TCR Pan-Clonality Assay"] else 550
templateParams = TemplateParams(template_name, S5, "MIXED")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
# TS-18068 - Hide childhood Assay
templateParams = TemplateParams("Oncomine BCR Pan-Clonality Childhood Assay", S5, "MIXED")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": 550,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": "inactive",
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_SHM_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "530"
END_BARCODE_KIT_NAME = ""
FLOWS = 1100
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S530 V2"
SAMPLE_PREP_PROTOCOL = "pcr400bp"
PLAN_STATUS = "planned"
# the follow templates will be create with same settings.
template_names = [
"Oncomine BCR IGH FR1-J RF3 Assay",
"Oncomine BCR IGHV SHM Leader-J Assay"
]
for template_name in template_names:
templateParams = TemplateParams(template_name, S5, "MIXED")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Select BC Set-1"
BARCODE_KIT_NAME_DUAL = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "530"
FLOWS = 850
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 400
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S530 V2"
PLAN_STATUS = "planned"
SAMPLE_PREP_PROTOCOL = "pcr400bp"
templateParams = TemplateParams(
"Ion AmpliSeq Immune Repertoire Assay Plus - TCRB for S5", S5, "AMPS_RNA"
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"planStatus": PLAN_STATUS,
}
)
templateParams = TemplateParams("Oncomine TCR Beta-LR for S5", S5, "AMPS_RNA")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME_DUAL,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_long_igh_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "530"
FLOWS = 1100
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 400
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S530 V2"
PLAN_STATUS = "planned"
SAMPLE_PREP_PROTOCOL = "pcr400bp"
templateParams = TemplateParams("Oncomine IGH-LR for S5", S5, "AMPS_RNA")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"samplePrepProtocol": SAMPLE_PREP_PROTOCOL,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_pgm_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Select BC Set-1"
CATEGORIES = "onco_immune;immunology"
CHIP = "318"
FLOWS = 800
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 400
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "IonPGMHiQView"
TEMPLATE_KIT_NAME = "Ion PGM Hi-Q View Chef Kit"
PLAN_STATUS = "inactive"
# 83
templateParams = TemplateParams(
"Ion AmpliSeq Immune Repertoire Assay Plus - TCRB for PGM", PGM, "AMPS_RNA"
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_tagseq_cfdna_550_system_templates():
liquid_biopsy_plugins, tumor_plugins = create_tagseq_plugins()
# pan cancer on 550
# 84
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_550_chef_system_template(
"Oncomine TagSeq S550 Tumor"
)
finish_sys_template(sysTemplate, isCreated, templateParams, tumor_plugins)
# 85
sysTemplate, isCreated, isUpdated, templateParams = add_or_update_tagseq_cfdna_s5_550_chef_system_template(
"Oncomine TagSeq S550 Liquid Biopsy"
)
finish_sys_template(sysTemplate, isCreated, templateParams, liquid_biopsy_plugins)
def add_or_update_tagseq_cfdna_s5_550_chef_system_template(templateName):
TAG_SEQ_APPLICATION_GROUP = "onco_liquidBiopsy"
TAG_SEQ_BARCODE_KIT_NAME = "TagSequencing"
TAG_SEQ_CATEGORIES = "Oncomine;onco_liquidBiopsy;barcodes_8"
TAG_SEQ_CHIP_NAME = "550"
TAG_SEQ_FLOWS = 500
TAG_SEQ_LIB_KIT_NAME = "Oncomine cfDNA Assay"
TAG_SEQ_LIBRARY_READ_LENGTH = 200
TAG_SEQ_REFERENCE = "hg19"
TAG_SEQ_RUN_TYPE = "TAG_SEQUENCING"
TAG_SEQ_SAMPLE_GROUPING = "Self"
TAG_SEQ_SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TAG_SEQ_TEMPLATE_KIT_NAME = "Ion Chef S550 V1"
PLAN_STATUS = "planned"
templateParams = TemplateParams(templateName, S5, TAG_SEQ_RUN_TYPE)
templateParams.update(
{
"applicationGroup": TAG_SEQ_APPLICATION_GROUP,
"barcodeKitName": TAG_SEQ_BARCODE_KIT_NAME,
"categories": TAG_SEQ_CATEGORIES,
"chipType": TAG_SEQ_CHIP_NAME,
"flows": TAG_SEQ_FLOWS,
"libraryKitName": TAG_SEQ_LIB_KIT_NAME,
"libraryReadLength": TAG_SEQ_LIBRARY_READ_LENGTH,
"reference": TAG_SEQ_REFERENCE,
"sampleGrouping": TAG_SEQ_SAMPLE_GROUPING,
"sequencekitname": TAG_SEQ_SEQ_KIT_NAME,
"templatingKitName": TAG_SEQ_TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
return sysTemplate, isCreated, isUpdated, templateParams
def add_or_update_immune_repertoire_short_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "540"
END_BARCODE_KIT_NAME = "" # TODO: replace with the real barcode kit
FLOWS = 500
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
# 86
templateParams = TemplateParams("Oncomine TCR Beta-SR for S5", S5, "MIXED")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_short_igh_mouse_s5_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "Ion Dual Barcode Kit 1-96"
CATEGORIES = "onco_immune;immunology"
CHIP = "540"
END_BARCODE_KIT_NAME = "" # TODO: replace with the real barcode kit
FLOWS = 500
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
TEMPLATE_KIT_NAME = "Ion Chef S540 V1"
PLAN_STATUS = "planned"
# the follow templates will be create with same settings.
template_names = [
"Oncomine IGH-SR for S5",
"Ion Ampliseq Mouse BCR IGH-SR for S5",
"Ion Ampliseq Mouse TCRB-SR for S5",
]
for template_name in template_names:
templateParams = TemplateParams(template_name, S5, "MIXED")
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_immune_repertoire_short_pgm_system_templates():
APPLICATION_GROUP = "immune_repertoire"
BARCODE_KIT_NAME = "IonXpress"
CATEGORIES = "onco_immune;immunology"
CHIP = "318"
END_BARCODE_KIT_NAME = "" # TODO: replace with the real barcode kit
FLOWS = 500
LIBRARY_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
LIBRARY_READ_LENGTH = 200
REFERENCE = ""
SAMPLE_GROUPING = "Self"
SEQ_KIT_NAME = "IonPGMHiQView"
TEMPLATE_KIT_NAME = "Ion PGM Hi-Q View Chef Kit"
PLAN_STATUS = "inactive"
# 87
templateParams = TemplateParams(
"Ion AmpliSeq Immune Repertoire Assay - TCRB for PGM", PGM, "MIXED"
)
templateParams.update(
{
"applicationGroup": APPLICATION_GROUP,
"barcodeKitName": BARCODE_KIT_NAME,
"categories": CATEGORIES,
"chipType": CHIP,
"endBarcodeKitName": END_BARCODE_KIT_NAME,
"flows": FLOWS,
"libraryKitName": LIBRARY_KIT_NAME,
"libraryReadLength": LIBRARY_READ_LENGTH,
"reference": REFERENCE,
"sampleGrouping": SAMPLE_GROUPING,
"sequencekitname": SEQ_KIT_NAME,
"templatingKitName": TEMPLATE_KIT_NAME,
"planStatus": PLAN_STATUS,
}
)
sysTemplate, isCreated, isUpdated = add_or_update_sys_template(templateParams)
finish_sys_template(sysTemplate, isCreated, templateParams)
def add_or_update_oncomine_ocav3_550_system_templates():
# OCAv3 550
OCAV3_BARCODE_KIT_NAME = "IonXpress"
OCAV3_CATEGORIES = "Oncomine;onco_solidTumor;oca_s5"
OCAV3_CATEGORIES_2 = "Oncomine;barcodes_16;onco_solidTumor;oca_s5"
OCAV3_CATEGORIES_FUSION = "Oncomine;onco_solidTumor"
OCAV3_CHIP_NAME = "550"
OCAV3_FLOWS = 400
OCAV3_FUSION_LIB_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
OCAV3_LIB_KIT_NAME = "Ion AmpliSeq Library Kit Plus"
OCAV3_LIBRARY_READ_LENGTH = 200
OCAV3_REFERENCE = "hg19"
OCAV3_SEQ_KIT_NAME = "Ion S5 Sequencing Kit"
OCAV3_TEMPLATE_KIT_NAME = "Ion Chef S550 V1"
OCAV3_STATUS = "planned"
# pre-select plugins
plugins | |
current_domain["AdvancedOptions"][
"rest.action.multi.allow_explicit_index"
]
if not current_domain["AdvancedOptions"]:
del current_domain["AdvancedOptions"]
# Compare current configuration with provided configuration
config_diff = salt.utils.data.recursive_diff(current_domain, target_conf)
if config_diff:
action = "update"
# Compare ElasticsearchVersion separately, as the update procedure differs.
if elasticsearch_version and current_domain_version != elasticsearch_version:
action = "upgrade"
if action in ["create", "update"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'The Elasticsearch Domain "{}" would have been {}d.'
"".format(name, action)
)
ret["changes"] = config_diff
else:
boto_kwargs = salt.utils.data.filter_falsey(
{
"elasticsearch_version": elasticsearch_version,
"elasticsearch_cluster_config": elasticsearch_cluster_config,
"ebs_options": ebs_options,
"vpc_options": vpc_options,
"access_policies": access_policies,
"snapshot_options": snapshot_options,
"cognito_options": cognito_options,
"encryption_at_rest_options": encryption_at_rest_options,
"node_to_node_encryption_options": node_to_node_encryption_options,
"advanced_options": advanced_options,
"log_publishing_options": log_publishing_options,
"blocking": blocking,
"region": region,
"keyid": keyid,
"key": key,
"profile": profile,
}
)
if action == "update":
# Drop certain kwargs that do not apply to updates.
for item in [
"elasticsearch_version",
"encryption_at_rest_options",
"node_to_node_encryption_options",
]:
if item in boto_kwargs:
del boto_kwargs[item]
res = __salt__[
"boto3_elasticsearch.{}_elasticsearch_domain{}"
"".format(action, "_config" if action == "update" else "")
](name, **boto_kwargs)
if "error" in res:
ret["result"] = False
ret["comment"].append(res["error"])
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch Domain "{}" has been {}d.'.format(name, action)
)
ret["changes"] = config_diff
elif action == "upgrade":
res = upgraded(
name,
elasticsearch_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
ret["result"] = res["result"]
ret["comment"].extend(res["comment"])
if res["changes"]:
salt.utils.dictupdate.set_dict_key_value(
ret, "changes:old:version", res["changes"]["old"]
)
salt.utils.dictupdate.set_dict_key_value(
ret, "changes:new:version", res["changes"]["new"]
)
if tags is not None:
res = tagged(
name,
tags=tags,
replace=True,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
ret["result"] = res["result"]
ret["comment"].extend(res["comment"])
if "old" in res["changes"]:
salt.utils.dictupdate.update_dict_key_value(
ret, "changes:old:tags", res["changes"]["old"]
)
if "new" in res["changes"]:
salt.utils.dictupdate.update_dict_key_value(
ret, "changes:new:tags", res["changes"]["new"]
)
ret = _check_return_value(ret)
return ret
def absent(name, blocking=True, region=None, keyid=None, key=None, profile=None):
"""
Ensure the Elasticsearch Domain specified does not exist.
:param str name: The name of the Elasticsearch domain to be made absent.
:param bool blocking: Whether or not the state should wait for the deletion
to be completed. Default: ``True``
.. versionadded:: Natrium
Example:
.. code-block:: yaml
Remove Elasticsearch Domain:
boto3_elasticsearch.absent:
- name: my_domain
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
res = __salt__["boto3_elasticsearch.exists"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(res["error"])
elif res["result"]:
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'Elasticsearch domain "{}" would have been removed.' "".format(name)
)
ret["changes"] = {"old": name, "new": None}
else:
res = __salt__["boto3_elasticsearch.delete_elasticsearch_domain"](
domain_name=name,
blocking=blocking,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error deleting Elasticsearch domain "{}": {}'
"".format(name, res["error"])
)
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" has been deleted.' "".format(name)
)
ret["changes"] = {"old": name, "new": None}
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" is already absent.' "".format(name)
)
ret = _check_return_value(ret)
return ret
def upgraded(
name,
elasticsearch_version,
blocking=True,
region=None,
keyid=None,
key=None,
profile=None,
):
"""
Ensures the Elasticsearch domain specified runs on the specified version of
elasticsearch. Only upgrades are possible as downgrades require a manual snapshot
and an S3 bucket to store them in.
Note that this operation is blocking until the upgrade is complete.
:param str name: The name of the Elasticsearch domain to upgrade.
:param str elasticsearch_version: String of format X.Y to specify version for
the Elasticsearch domain eg. "1.5" or "2.3".
.. versionadded:: Natrium
Example:
.. code-block:: yaml
Upgrade Elasticsearch Domain:
boto3_elasticsearch.upgraded:
- name: my_domain
- elasticsearch_version: '7.2'
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
current_domain = None
res = __salt__["boto3_elasticsearch.describe_elasticsearch_domain"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if not res["result"]:
ret["result"] = False
if "ResourceNotFoundException" in res["error"]:
ret["comment"].append(
'The Elasticsearch domain "{}" does not exist.' "".format(name)
)
else:
ret["comment"].append(res["error"])
else:
current_domain = res["response"]
current_version = current_domain["ElasticsearchVersion"]
if elasticsearch_version and current_version == elasticsearch_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" is already '
"at the desired version {}"
"".format(name, elasticsearch_version)
)
elif LooseVersion(elasticsearch_version) < LooseVersion(current_version):
ret["result"] = False
ret["comment"].append(
'Elasticsearch domain "{}" cannot be downgraded '
'to version "{}".'
"".format(name, elasticsearch_version)
)
if isinstance(ret["result"], bool):
return ret
log.debug("%s :upgraded: Check upgrade in progress", __name__)
# Check if an upgrade is already in progress
res = __salt__["boto3_elasticsearch.get_upgrade_status"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
"Error determining current upgrade status "
'of domain "{}": {}'.format(name, res["error"])
)
return ret
if res["response"].get("StepStatus") == "IN_PROGRESS":
if blocking:
# An upgrade is already in progress, wait for it to complete
res2 = __salt__["boto3_elasticsearch.wait_for_upgrade"](
name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res2:
ret["result"] = False
ret["comment"].append(
"Error waiting for upgrade of domain "
'"{}" to complete: {}'
"".format(name, res2["error"])
)
elif (
res2["response"].get("UpgradeName", "").endswith(elasticsearch_version)
):
ret["result"] = True
ret["comment"].append(
'Elasticsearch Domain "{}" is '
'already at version "{}".'
"".format(name, elasticsearch_version)
)
else:
# We are not going to wait for it to complete, so bail.
ret["result"] = True
ret["comment"].append(
'An upgrade of Elasticsearch domain "{}" '
"is already underway: {}"
"".format(name, res["response"].get("UpgradeName"))
)
if isinstance(ret["result"], bool):
return ret
log.debug("%s :upgraded: Check upgrade eligibility", __name__)
# Check if the domain is eligible for an upgrade
res = __salt__["boto3_elasticsearch.check_upgrade_eligibility"](
name,
elasticsearch_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
"Error checking upgrade eligibility for "
'domain "{}": {}'.format(name, res["error"])
)
elif not res["response"]:
ret["result"] = False
ret["comment"].append(
'The Elasticsearch Domain "{}" is not eligible to '
"be upgraded to version {}."
"".format(name, elasticsearch_version)
)
else:
log.debug("%s :upgraded: Start the upgrade", __name__)
# Start the upgrade
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'The Elasticsearch version for domain "{}" would have been upgraded.'
)
ret["changes"] = {
"old": current_domain["ElasticsearchVersion"],
"new": elasticsearch_version,
}
else:
res = __salt__["boto3_elasticsearch.upgrade_elasticsearch_domain"](
name,
elasticsearch_version,
blocking=blocking,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error upgrading Elasticsearch domain "{}": {}'
"".format(name, res["error"])
)
else:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" has been '
"upgraded to version {}."
"".format(name, elasticsearch_version)
)
ret["changes"] = {
"old": current_domain["ElasticsearchVersion"],
"new": elasticsearch_version,
}
ret = _check_return_value(ret)
return ret
def latest(name, minor_only=True, region=None, keyid=None, key=None, profile=None):
"""
Ensures the Elasticsearch domain specifies runs on the latest compatible
version of elasticsearch, upgrading it if it is not.
Note that this operation is blocking until the upgrade is complete.
:param str name: The name of the Elasticsearch domain to upgrade.
:param bool minor_only: Only upgrade to the latest minor version.
.. versionadded:: Natrium
Example:
The following example will ensure the elasticsearch domain ``my_domain`` is
upgraded to the latest minor version. So if it is currently 5.1 it will be
upgraded to 5.6.
.. code-block:: yaml
Upgrade Elasticsearch Domain:
boto3_elasticsearch.latest:
- name: my_domain
- minor_only: True
- region: eu-west-1
"""
ret = {"name": name, "result": "oops", "comment": [], "changes": {}}
# Get current version
res = __salt__["boto3_elasticsearch.describe_elasticsearch_domain"](
domain_name=name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
'Error getting information of Elasticsearch domain "{}": {}'
"".format(name, res["error"])
)
else:
current_version = res["response"]["ElasticsearchVersion"]
# Get latest compatible version
latest_version = None
res = __salt__["boto3_elasticsearch.get_compatible_elasticsearch_versions"](
domain_name=name, region=region, keyid=keyid, key=key, profile=profile
)
if "error" in res:
ret["result"] = False
ret["comment"].append(
"Error getting compatible Elasticsearch versions "
'for Elasticsearch domain "{}": {}'
"".format(name, res["error"])
)
if isinstance(ret["result"], bool):
return ret
try:
latest_version = res["response"][0]["TargetVersions"].pop(-1)
except IndexError:
pass
if not current_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" can not be upgraded.' "".format(name)
)
elif not latest_version:
ret["result"] = True
ret["comment"].append(
'The Elasticsearch domain "{}" is already at '
'the lastest version "{}".'
"".format(name, current_version)
)
else:
a_current_version = current_version.split(".")
a_latest_version = latest_version.split(".")
if not (minor_only and a_current_version[0] != a_latest_version[0]):
if __opts__["test"]:
ret["result"] = None
ret["comment"].append(
'Elasticsearch domain "{}" would have been updated '
'to version "{}".'.format(name, latest_version)
)
ret["changes"] = {"old": current_version, "new": latest_version}
else:
ret = upgraded(
name,
latest_version,
region=region,
keyid=keyid,
key=key,
profile=profile,
)
else:
ret["result"] = True
ret["comment"].append(
'Elasticsearch domain "{}" is already at its '
"latest minor version {}."
"".format(name, current_version)
)
ret = _check_return_value(ret)
if ret["result"] and ret["changes"] and not minor_only:
# Try and see if we can upgrade again
res = | |
<gh_stars>1-10
from pathlib import Path
import numpy as np
from struct import unpack
import xarray as xr
from datetime import datetime
from glob import glob
def yyyymmddhhmmss_to_datetime(yyyymmdd: int, hhmmss: int) -> datetime:
""" Convert an integer pair of yyyymmdd and hhmmss to a useful time format.
"""
return datetime(
yyyymmdd // 10000, (yyyymmdd // 100) % 100, yyyymmdd % 100,
hhmmss // 10000, (hhmmss // 100) % 100, hhmmss % 100)
def l2_v5_0_binary_to_dataset(file) -> xr.Dataset:
"""
Read the Level 2 Solar Event Species Profiles for a version 5 SAGE III or SAGE III ISS binary file.
https://eosweb.larc.nasa.gov/sites/default/files/project/sage3/guide/Data_Product_User_Guide.pdf
"""
# Read all the data into memory
with open(file, 'rb') as f:
# Read the File Header
(event_id, yyyyddd, instrument_time, fill_value_int, fill_value_float, mission_id) = \
unpack('>iififi', f.read(6 * 4))
# Read the Version Tracking data
(L0DO_ver, L0_ver, software_ver, dataproduct_ver, spectroscopy_ver, gram95_ver, met_ver) = \
unpack('>fffffff', f.read(7 * 4))
# Read the File Description
(altitude_spacing, num_bins, num_aer_wavelengths, num_ground_tracks, num_aer_bins) = \
unpack('>fiiii', f.read(5 * 4))
# Read the Event Type data
(event_type_spacecraft, event_type_earth, beta_angle, event_status_flags) = unpack('>iifi', f.read(4 * 4))
# Read Data Capture Start Information
(start_date, start_time, start_latitude, start_longitude, start_altitude) = unpack('>iifff', f.read(5 * 4))
# Read Data Capture End Information
(end_date, end_time, end_latitude, end_longitude, end_altitude) = unpack('>iifff', f.read(5 * 4))
# Read Ground Track Information
gt_date = np.array(unpack('>' + 'i' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.int32)
gt_time = np.array(unpack('>' + 'i' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.int32)
gt_latitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
gt_longitude = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
gt_ray_dir = np.array(unpack('>' + 'f' * num_ground_tracks, f.read(num_ground_tracks * 4)), dtype=np.float32)
# Read Profile Altitude Levels data
homogeneity = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
altitude = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
potential_altitude = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
# Read the Input Temp/Pres for Retrievals
input_temperature = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_temperature_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_pressure = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_pressure_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
input_tp_source_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Derived Tropopause data
(temperature_tropopause, altitude_tropopause) = unpack('>ff', f.read(2 * 4))
# Read the Composite Ozone data
o3_composite = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_slant_path = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_slant_path_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_composite_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Mesospheric Ozone data
o3_mesospheric = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_slant_path = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_slant_path_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mesospheric_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the MLR Ozone data
o3_mlr = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_slant_path = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_slant_path_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_mlr_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Ozone Least Squares data
o3 = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_slant_path = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_slant_path_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
o3_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read Water Vapor data
water_vapor = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
water_vapor_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
water_vapor_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the NO2 data
no2 = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_slant_path = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_slant_path_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
no2_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Retrieved T/P data
temperature = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
temperature_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
pressure = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
pressure_error = np.array(unpack('>' + 'f' * num_bins, f.read(num_bins * 4)), dtype=np.float32)
tp_qa_flags = np.array(unpack('>' + 'i' * num_bins, f.read(num_bins * 4)), dtype=np.int32)
# Read the Aerosol Information
aerosol_wavelengths = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
aerosol_half_bandwidths = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth_error = np.array(unpack('>' + 'f' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.float32)
stratospheric_optical_depth_qa_flags = np.array(unpack('>' + 'i' * num_aer_wavelengths,
f.read(num_aer_wavelengths * 4)), dtype=np.int32)
# Read the Aerosol Extinction data
aerosol_extinction = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.float32)
aerosol_extinction_error = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.float32)
aerosol_extinction_qa_flags = np.ndarray((num_aer_wavelengths, num_aer_bins), dtype=np.int32)
for i in range(num_aer_wavelengths):
aerosol_extinction[i] = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)))
aerosol_extinction_error[i] = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)))
aerosol_extinction_qa_flags[i] = np.array(unpack('>' + 'i' * num_aer_bins,
f.read(num_aer_bins * 4)))
# Read the Aerosol Extinction Ratio data
aerosol_spectral_dependence_flag = np.array(unpack('>' + 'i' * num_aer_bins,
f.read(num_aer_bins * 4)), dtype=np.int32)
extinction_ratio = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)), dtype=np.float32)
extinction_ratio_error = np.array(unpack('>' + 'f' * num_aer_bins,
f.read(num_aer_bins * 4)), dtype=np.float32)
extinction_ratio_qa_flags = np.array(unpack('>' + 'i' * num_aer_bins,
f.read(num_aer_bins * 4)), dtype=np.int32)
# Convert date and time pairs to a single datetime.
start_datetime = yyyymmddhhmmss_to_datetime(start_date, start_time)
end_datetime = yyyymmddhhmmss_to_datetime(end_date, end_time)
gt_datetime = [yyyymmddhhmmss_to_datetime(date, time) if date != fill_value_int else np.datetime64('NaT')
for (date, time) in zip(gt_date, gt_time)]
# Return the data as an xarray dataset
ds = xr.Dataset(
{
'yyyyddd': np.int32(yyyyddd),
'mission_time': np.float32(instrument_time),
'event_type_spacecraft': np.int32(event_type_spacecraft),
'event_type_earth': np.int32(event_type_earth),
'beta_angle': np.float32(beta_angle),
'event_status_flags': np.int32(event_status_flags),
'start_time': start_datetime,
'start_latitude': np.float32(start_latitude),
'start_longitude': np.float32(start_longitude),
'start_altitude': np.float32(start_altitude),
'end_time': end_datetime,
'end_latitude': np.float32(end_latitude),
'end_longitude': np.float32(end_longitude),
'end_altitude': np.float32(end_altitude),
'gt_time': (['num_ground_tracks'], gt_datetime),
'gt_latitude': (['num_ground_tracks'], gt_latitude),
'gt_longitude': (['num_ground_tracks'], gt_longitude),
'gt_ray_dir': (['num_ground_tracks'], gt_ray_dir),
'homogeneity': (['altitude'], np.int32(homogeneity)),
'potential_alt': (['altitude'], potential_altitude),
'input_temperature': (['altitude'], input_temperature),
'input_temperature_error': (['altitude'], input_temperature_error),
'input_pressure': (['altitude'], input_pressure),
'input_pressure_error': (['altitude'], input_pressure_error),
'input_tp_source_flags': (['altitude'], input_tp_source_flags),
'temperature_tropopause': np.float32(temperature_tropopause),
'altitude_tropopause': np.float32(altitude_tropopause),
'o3_composite': (['altitude'], o3_composite),
'o3_composite_error': (['altitude'], o3_composite_error),
'o3_composite_slant_path': (['altitude'], o3_composite_slant_path),
'o3_composite_slant_path_error': (['altitude'], o3_composite_slant_path_error),
'o3_composite_qa_flags': (['altitude'], o3_composite_qa_flags),
'o3_mesospheric': (['altitude'], o3_mesospheric),
'o3_mesospheric_error': (['altitude'], o3_mesospheric_error),
'o3_mesospheric_slant_path': (['altitude'], o3_mesospheric_slant_path),
'o3_mesospheric_slant_path_error': (['altitude'], o3_mesospheric_slant_path_error),
'o3_mesospheric_qa_flags': (['altitude'], o3_mesospheric_qa_flags),
'o3_mlr': (['altitude'], o3_mlr),
'o3_mlr_error': (['altitude'], o3_mlr_error),
'o3_mlr_slant_path': (['altitude'], o3_mlr_slant_path),
'o3_mlr_slant_path_error': (['altitude'], o3_mlr_slant_path_error),
'o3_mlr_qa_flags': (['altitude'], o3_mlr_qa_flags),
'o3': (['altitude'], o3),
'o3_error': (['altitude'], o3_error),
'o3_slant_path': (['altitude'], o3_slant_path),
'o3_slant_path_error': (['altitude'], o3_slant_path_error),
'o3_qa_flags': (['altitude'], o3_qa_flags),
'water_vapor': (['altitude'], water_vapor),
'water_vapor_error': (['altitude'], water_vapor_error),
'water_vapor_qa_flags': (['altitude'], water_vapor_qa_flags),
'no2': (['altitude'], no2),
'no2_error': (['altitude'], no2_error),
'no2_slant_path': (['altitude'], no2_slant_path),
'no2_slant_path_error': (['altitude'], no2_slant_path_error),
'no2_qa_flags': (['altitude'], no2_qa_flags),
'temperature': (['altitude'], temperature),
'temperature_error': (['altitude'], temperature_error),
'pressure': (['altitude'], pressure),
'pressure_error': (['altitude'], pressure_error),
'tp_qa_flags': (['altitude'], tp_qa_flags),
'Half-Bandwidths of Aerosol Channels': (['Aerosol_wavelengths'], aerosol_half_bandwidths),
'stratospheric_optical_depth': (['Aerosol_wavelengths'], stratospheric_optical_depth),
'stratospheric_optical_depth_error': (['Aerosol_wavelengths'], stratospheric_optical_depth_error),
'stratospheric_optical_depth_qa_flags': (['Aerosol_wavelengths'], stratospheric_optical_depth_qa_flags),
'aerosol_extinction': (['Aerosol_wavelengths', 'Aerosol_altitude'], aerosol_extinction),
'aerosol_extinction_error': (['Aerosol_wavelengths', 'Aerosol_altitude'], aerosol_extinction_error),
'aerosol_extinction_qa_flags': (
['Aerosol_wavelengths', 'Aerosol_altitude'], aerosol_extinction_qa_flags),
'aerosol_spectral_dependence_flag': (['Aerosol_altitude'], aerosol_spectral_dependence_flag),
'extinction_ratio': (['Aerosol_altitude'], extinction_ratio),
'extinction_ratio_error': (['Aerosol_altitude'], extinction_ratio_error),
'extinction_ratio_qa_flags': (['Aerosol_altitude'], extinction_ratio_qa_flags)
},
coords={
'event_id': np.int32(event_id),
'altitude': altitude,
'Aerosol_wavelengths': aerosol_wavelengths,
'Aerosol_altitude': altitude[:num_aer_bins]
},
attrs={
'Mission Identification': mission_id,
'Version: Definitive Orbit Processing': np.float32(L0DO_ver),
'Version: Level 0 Processing': np.float32(L0_ver),
'Version: Software Processing': np.float32(software_ver),
'Version: Data Product': np.float32(dataproduct_ver),
'Version: Spectroscopy': np.float32(spectroscopy_ver),
'Version: GRAM 95': np.float32(gram95_ver),
'Version: Meteorological': np.float32(met_ver),
'Altitude-Based Grid Spacing': altitude_spacing,
'_FillValue': fill_value_int
})
# Assert dimension lengths are correct
assert (len(ds.num_ground_tracks) == num_ground_tracks)
assert (len(ds.altitude) == num_bins)
assert (len(ds.Aerosol_wavelengths) == num_aer_wavelengths)
for var in ds.variables:
if np.issubdtype(ds[var].dtype, np.floating) and ds[var].size > 1:
ds[var] = ds[var].where(ds[var] != fill_value_float)
ds | |
#!/usr/bin/env python3
"""This module is used to operate with archives."""
import os # filesystem read
import tarfile # txz/tbz/tgz/tar compression
import zipfile # zip compresssion
from bbarchivist import barutils # zip tester
from bbarchivist import bbconstants # premade stuff
from bbarchivist import decorators # timer
from bbarchivist import iniconfig # config parsing
from bbarchivist import sevenziputils # 7z
from bbarchivist import utilities # platform determination
__author__ = "Thurask"
__license__ = "WTFPL v2"
__copyright__ = "2015-2019 Thurask"
def smart_is_tarfile(filepath):
"""
:func:`tarfile.is_tarfile` plus error handling.
:param filepath: Filename.
:type filepath: str
"""
try:
istar = tarfile.is_tarfile(filepath)
except (OSError, IOError):
return False
else:
return istar
def generic_tarfile_verify(filepath, method):
"""
Verify that a tar/tgz/tbz/txz file is valid and working.
:param filepath: Filename.
:type filepath: str
:param method: Tarfile read method.
:type method: str
"""
if smart_is_tarfile(filepath):
with tarfile.open(filepath, method) as thefile:
mems = thefile.getmembers()
sentinel = False if not mems else True
else:
sentinel = False
return sentinel
def generic_tarfile_compress(archivename, filename, method, strength=5):
"""
Pack a file into an uncompressed/gzip/bzip2/LZMA tarfile.
:param archivename: Archive name.
:type archivename: str
:param filename: Name of file to pack into archive.
:type filename: str
:param method: Tarfile compress method.
:type method: str
:param strength: Compression strength. 5 is normal, 9 is ultra.
:type strength: int
"""
nocomp = ["w:", "w:xz"] # methods w/o compression: tar, tar.xz
if method in nocomp:
generic_nocompresslevel(archivename, filename, method)
else:
generic_compresslevel(archivename, filename, method, strength)
def generic_compresslevel(archivename, filename, method, strength=5):
"""
Pack a file into a gzip/bzip2 tarfile.
:param archivename: Archive name.
:type archivename: str
:param filename: Name of file to pack into archive.
:type filename: str
:param method: Tarfile compress method.
:type method: str
:param strength: Compression strength. 5 is normal, 9 is ultra.
:type strength: int
"""
with tarfile.open(archivename, method, compresslevel=strength) as afile:
afile.add(filename, filter=None, arcname=os.path.basename(filename))
def generic_nocompresslevel(archivename, filename, method):
"""
Pack a file into an uncompressed/LZMA tarfile.
:param archivename: Archive name.
:type archivename: str
:param filename: Name of file to pack into archive.
:type filename: str
:param method: Tarfile compress method.
:type method: str
"""
with tarfile.open(archivename, method) as afile:
afile.add(filename, filter=None, arcname=os.path.basename(filename))
@decorators.timer
def tar_compress(filepath, filename):
"""
Pack a file into an uncompressed tarfile.
:param filepath: Basename of file, no extension.
:type filepath: str
:param filename: Name of file to pack.
:type filename: str
"""
generic_tarfile_compress("{0}.tar".format(filepath), filename, "w:")
def tar_verify(filepath):
"""
Verify that a tar file is valid and working.
:param filepath: Filename.
:type filepath: str
"""
return generic_tarfile_verify(filepath, "r:")
@decorators.timer
def tgz_compress(filepath, filename, strength=5):
"""
Pack a file into a gzip tarfile.
:param filepath: Basename of file, no extension.
:type filepath: str
:param filename: Name of file to pack.
:type filename: str
:param strength: Compression strength. 5 is normal, 9 is ultra.
:type strength: int
"""
generic_tarfile_compress("{0}.tar.gz".format(filepath), filename, "w:gz", strength)
def tgz_verify(filepath):
"""
Verify that a tar.gz file is valid and working.
:param filepath: Filename.
:type filepath: str
"""
return generic_tarfile_verify(filepath, "r:gz")
@decorators.timer
def tbz_compress(filepath, filename, strength=5):
"""
Pack a file into a bzip2 tarfile.
:param filepath: Basename of file, no extension.
:type filepath: str
:param filename: Name of file to pack.
:type filename: str
:param strength: Compression strength. 5 is normal, 9 is ultra.
:type strength: int
"""
generic_tarfile_compress("{0}.tar.bz2".format(filepath), filename, "w:bz2", strength)
def tbz_verify(filepath):
"""
Verify that a tar.bz2 file is valid and working.
:param filepath: Filename.
:type filepath: str
"""
return generic_tarfile_verify(filepath, "r:bz2")
@decorators.timer
def txz_compress(filepath, filename):
"""
Pack a file into a LZMA tarfile.
:param filepath: Basename of file, no extension.
:type filepath: str
:param filename: Name of file to pack.
:type filename: str
"""
if not utilities.new_enough(3, 3):
pass
else:
generic_tarfile_compress("{0}.tar.xz".format(filepath), filename, "w:xz")
def txz_verify(filepath):
"""
Verify that a tar.xz file is valid and working.
:param filepath: Filename.
:type filepath: str
"""
if not utilities.new_enough(3, 3):
sentinel = None
else:
sentinel = generic_tarfile_verify(filepath, "r:xz")
return sentinel
@decorators.timer
def zip_compress(filepath, filename):
"""
Pack a file into a DEFLATE zipfile.
:param filepath: Basename of file, no extension.
:type filepath: str
:param filename: Name of file to pack.
:type filename: str
"""
zipf = "{0}.zip".format(filepath)
with zipfile.ZipFile(zipf, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as zfile:
zfile.write(filename, arcname=os.path.basename(filename))
def zip_verify(filepath):
"""
Verify that a .zip file is valid and working.
:param filepath: Filename.
:type filepath: str
"""
if zipfile.is_zipfile(filepath):
brokens = barutils.bar_tester(filepath)
sentinel = True if brokens != filepath else False
else:
sentinel = False
return sentinel
def filter_method(method, szexe=None):
"""
Make sure methods are OK.
:param method: Compression method to use.
:type method: str
:param szexe: Path to 7z executable, if needed.
:type szexe: str
"""
if not utilities.new_enough(3, 3) and method == "txz":
method = "zip" # fallback
method = filter_method_nosz(method, szexe)
return method
def filter_method_nosz(method, szexe=None):
"""
Make sure 7-Zip is OK.
:param method: Compression method to use.
:type method: str
:param szexe: Path to 7z executable, if needed.
:type szexe: str
"""
if method == "7z" and szexe is None:
ifexists = utilities.prep_seven_zip() # see if 7z exists
if not ifexists:
method = "zip" # fallback
else:
szexe = utilities.get_seven_zip(False)
return method
def calculate_strength():
"""
Determine zip/gzip/bzip2 strength by OS bit setting.
"""
strength = 9 if utilities.is_amd64() else 5
return strength
def filter_with_boolfilt(files, criterion, critargs):
"""
Return everything that matches criterion.
:param files: Files to work on.
:type files: list(str)
:param criterion: Function to use for evaluation.
:type criterion: func
:param critargs: Arguments for function, other than file.
:type critargs: list
"""
return [file for file in files if criterion(file, *critargs)]
def filter_without_boolfilt(files, criterion, critargs):
"""
Return everything that doesn't match criterion.
:param files: Files to work on.
:type files: list(str)
:param criterion: Function to use for evaluation.
:type criterion: func
:param critargs: Arguments for function, other than file.
:type critargs: list
"""
return [file for file in files if not criterion(file, *critargs)]
def filtercomp(files, criterion, critargs, boolfilt=True):
"""
:param files: Files to work on.
:type files: list(str)
:param criterion: Function to use for evaluation.
:type criterion: func
:param critargs: Arguments for function, other than file.
:type critargs: list
:param boolfilt: True if comparing criterion, False if comparing not criterion.
:type boolfilt: bool
"""
if boolfilt:
fx2 = filter_with_boolfilt(files, criterion, critargs)
else:
fx2 = filter_without_boolfilt(files, criterion, critargs)
return fx2
def compressfilter_select(filepath, files, selective=False):
"""
:param filepath: Working directory. Required.
:type filepath: str
:param files: List of files in filepath.
:type files: list(str)
:param selective: Only compress autoloaders. Default is false.
:type selective: bool/str
"""
arx = bbconstants.ARCS
pfx = bbconstants.PREFIXES
if selective is None:
filt2 = os.listdir(filepath)
elif selective == "arcsonly":
filt2 = filtercomp(files, utilities.prepends, ("", arx))
elif selective:
filt0 = filtercomp(files, utilities.prepends, (pfx, ""))
filt1 = filtercomp(filt0, utilities.prepends, ("", arx), False) # pop archives
filt2 = filtercomp(filt1, utilities.prepends, ("", ".exe")) # include exes
else:
filt2 = filtercomp(files, utilities.prepends, ("", arx), False) # pop archives
return filt2
def compressfilter(filepath, selective=False):
"""
Filter directory listing of working directory.
:param filepath: Working directory. Required.
:type filepath: str
:param selective: Only compress autoloaders. Default is false.
:type selective: bool/str
"""
files = [file for file in os.listdir(filepath) if not os.path.isdir(file)]
filt2 = compressfilter_select(filepath, files, selective)
filt3 = [os.path.join(filepath, file) for file in filt2]
return filt3
def prep_compress_function(method="7z", szexe=None, errors=False):
"""
Prepare compression function and partial arguments.
:param method: Compression type. Default is "7z".
:type method: str
:param szexe: Path to 7z executable, if needed.
:type szexe: str
:param errors: Print completion status message. Default is false.
:type errors: bool
"""
methods = {"7z": sevenziputils.sz_compress, "tgz": tgz_compress, "txz": txz_compress,
"tbz": tbz_compress, "tar": tar_compress, "zip": zip_compress}
args = [szexe] if method == "7z" else []
if method in ("7z", "tbz", "tgz"):
args.append(calculate_strength())
if method == "7z":
args.append(errors)
return methods[method], args
def compress(filepath, method="7z", szexe=None, selective=False, errors=False):
"""
Compress all autoloader files in a given folder, with a given method.
:param filepath: Working directory. Required.
:type filepath: str
:param method: Compression type. Default is "7z".
:type method: str
:param szexe: Path to 7z executable, if needed.
:type szexe: str
:param selective: Only compress autoloaders. Default is false.
:type selective: bool
:param errors: Print completion status message. Default is false.
:type errors: bool
"""
method = filter_method(method, szexe)
files = compressfilter(filepath, selective)
for file in files:
fname = | |
last establishment sn = esn
#create receipt msg stream
res = bytearray()
# Event 0 Inception Transferable (nxt digest not empty)
serder = incept(keys=[coeSigners[esn].verfer.qb64],
nxt=Nexter(keys=[coeSigners[<KEY>)
assert sn == int(serder.ked["s"], 16) == 0
coepre = serder.ked["i"]
assert coepre == '<KEY>'
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0) # return Siger if index
# attach to key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
# make copy of kes so can use again for valKevery
coeKevery.process(ims=bytearray(kes)) # create Kever using Kevery
coeKever = coeKevery.kevers[coepre]
assert coeKever.prefixer.qb64 == coepre
valKevery.process(ims=kes)
assert coepre in valKevery.kevers
valKever = valKevery.kevers[coepre]
assert len(kes) == 0
# create receipt from val to coe
reserder = receipt(pre=coeKever.prefixer.qb64,
sn=coeKever.sn,
dig=coeKever.serder.diger.qb64)
# sign event not receipt
valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index
assert valCigar.qb64 == '<KEY>'
recnt = Counter(code=CtrDex.ControllerIdxSigs, count=1)
assert recnt.qb64 == '-AAB'
res.extend(reserder.raw)
res.extend(recnt.qb64b)
res.extend(valPrefixer.qb64b)
res.extend(valCigar.qb64b)
assert res == bytearray(b'{"v":"KERI10JSON000091_","i":"DSuhyBcPZEZLK-fcw5tzHn2N46wRCG_ZOo'
b'eKtWTOunRA","s":"0","t":"rct","d":"EB5PLgogAWw5iniBXk0MKnFU9udCH'
b'a9ez_HJxCuvL_xM"}-AABB<KEY>'
b'<KEY>'
b'Fu_5asnM7m67KlGC9EYaw0KDQ')
coeKevery.process(ims=res) # coe process the receipt from val
# check if in receipt database
result = coeKevery.db.getRcts(key=dgKey(pre=coeKever.prefixer.qb64,
dig=coeKever.serder.diger.qb64))
assert bytes(result[0]) == valPrefixer.qb64b + valCigar.qb64b
# create receipt to escrow use invalid dig and sn so not in db
fake = reserder.dig # some other dig
reserder = receipt(pre=coeKever.prefixer.qb64,
sn=2,
dig=fake)
# sign event not receipt
valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index
recnt = Counter(code=CtrDex.ControllerIdxSigs, count=1)
# attach to receipt msg stream
res.extend(reserder.raw)
res.extend(recnt.qb64b)
res.extend(valPrefixer.qb64b)
res.extend(valCigar.qb64b)
coeKevery.process(ims=res) # coe process the escrow receipt from val
# check if in escrow database
result = coeKevery.db.getUres(key=snKey(pre=coeKever.prefixer.qb64,
sn=2))
assert bytes(result[0]) == fake.encode("utf-8") + valPrefixer.qb64b + valCigar.qb64b
# create receipt stale use invalid dig and valid sn so bad receipt
fake = reserder.dig # some other dig
reserder = receipt(pre=coeKever.prefixer.qb64,
sn=coeKever.sn,
dig=fake)
# sign event not receipt
valCigar = valSigner.sign(ser=serder.raw) # returns Cigar cause no index
recnt = Counter(code=CtrDex.ControllerIdxSigs, count=1)
# attach to receipt msg stream
res.extend(reserder.raw)
res.extend(recnt.qb64b)
res.extend(valPrefixer.qb64b)
res.extend(valCigar.qb64b)
with pytest.raises(ValidationError):
coeKevery.processOne(ims=res) # coe process the escrow receipt from val
# Next Event Rotation Transferable
sn += 1
esn += 1
assert sn == esn == 1
serder = rotate(pre=coeKever.prefixer.qb64,
keys=[coeSigners[esn].verfer.qb64],
dig=coeKever.serder.diger.qb64,
nxt=Nexter(keys=[coeSigners[esn+1].verfer.qb64]).qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0) # returns siger
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
# Next Event Interaction
sn += 1 # do not increment esn
assert sn == 2
assert esn == 1
serder = interact(pre=coeKever.prefixer.qb64,
dig=coeKever.serder.diger.qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
# Next Event Rotation Transferable
sn += 1
esn += 1
assert sn == 3
assert esn == 2
serder = rotate(pre=coeKever.prefixer.qb64,
keys=[coeSigners[esn].verfer.qb64],
dig=coeKever.serder.diger.qb64,
nxt=Nexter(keys=[coeSigners[esn+1].verfer.qb64]).qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
# Next Event Interaction
sn += 1 # do not increment esn
assert sn == 4
assert esn == 2
serder = interact(pre=coeKever.prefixer.qb64,
dig=coeKever.serder.diger.qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
# Next Event Interaction
sn += 1 # do not increment esn
assert sn == 5
assert esn == 2
serder = interact(pre=coeKever.prefixer.qb64,
dig=coeKever.serder.diger.qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
# Next Event Interaction
sn += 1 # do not increment esn
assert sn == 6
assert esn == 2
serder = interact(pre=coeKever.prefixer.qb64,
dig=coeKever.serder.diger.qb64,
sn=sn)
event_digs.append(serder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[esn].sign(serder.raw, index=0)
#extend key event stream
kes.extend(serder.raw)
kes.extend(counter.qb64b)
kes.extend(siger.qb64b)
coeKevery.process(ims=bytearray(kes)) # update key event verifier state
valKevery.process(ims=kes)
assert coeKever.verfers[0].qb64 == coeSigners[esn].verfer.qb64
db_digs = [bytes(val).decode("utf-8") for val in coeKever.baser.getKelIter(coepre)]
assert len(db_digs) == len(event_digs) == 7
assert valKever.sn == coeKever.sn
assert valKever.verfers[0].qb64 == coeKever.verfers[0].qb64 == coeSigners[esn].verfer.qb64
assert not os.path.exists(valKevery.db.path)
assert not os.path.exists(coeKever.baser.path)
""" Done Test """
def test_direct_mode():
"""
Test direct mode with transferable validator event receipts
"""
# manual process to generate a list of secrets
# root = pysodium.randombytes(pysodium.crypto_pwhash_SALTBYTES)
# secrets = generateSecrets(root=root, count=8)
# Direct Mode initiated by coe is controller, val is validator
# but goes both ways once initiated.
# set of secrets (seeds for private keys)
coeSecrets = [
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'<KEY>',
'AxFfJTcSuEE11FINfXMqWttkZGnUZ8KaREhrnyAXTsjw',
'ALq-w1UKkdrppwZzGTtz4PWYEeWm0-sDHzOv5sq96xJY'
]
# create coe signers
coeSigners = [Signer(qb64=secret) for secret in coeSecrets]
assert [signer.qb64 for signer in coeSigners] == coeSecrets
# set of secrets (seeds for private keys)
valSecrets = ['<KEY>',
'<KEY>',
'<KEY>tRNM',
'<KEY>_SLCHQ0pqoBWGk9s4N1brD-4pD_ANbs',
'<KEY>rYMlKAYL8k',
'<KEY>',
'<KEY>',
'<KEY>']
# create val signers
valSigners = [Signer(qb64=secret) for secret in valSecrets]
assert [signer.qb64 for signer in valSigners] == valSecrets
with openDB("controller") as coeLogger, openDB("validator") as valLogger:
# init Keverys
coeKevery = Kevery(db=coeLogger)
valKevery = Kevery(db=valLogger)
coe_event_digs = [] # list of coe's own event log digs to verify against database
val_event_digs = [] # list of val's own event log digs to verify against database
# init sequence numbers for both coe and val
csn = cesn = 0 # sn and last establishment sn = esn
vsn = vesn = 0 # sn and last establishment sn = esn
# Coe Event 0 Inception Transferable (nxt digest not empty)
coeSerder = incept(keys=[coeSigners[cesn].verfer.qb64],
nxt=Nexter(keys=[coeSigners[<KEY>,
code=MtrDex.Blake3_256)
assert csn == int(coeSerder.ked["s"], 16) == 0
coepre = coeSerder.ked["i"]
assert coepre == '<KEY>'
coe_event_digs.append(coeSerder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = coeSigners[cesn].sign(coeSerder.raw, index=0) # return Siger if index
# create serialized message
cmsg = bytearray(coeSerder.raw)
cmsg.extend(counter.qb64b)
cmsg.extend(siger.qb64b)
assert cmsg == bytearray(b'{"v":"KERI10JSON0000e6_","i":"<KEY>'
b'B-6n4WDi7w","s":"0","t":"icp","kt":"1","k":["DSuhyBcPZEZLK-fcw5t'
b'zHn2N46wRCG_ZOoeKtWTOunRA"],"n":"EPYuj8mq_PYYsoBKkzX1kxSPGYBWaIy'
b'a3slgCOyOtlqU","wt":"0","w":[],"c":[]}-AABAAmDoPp9jDio1hznNDO-3T'
b'2KA_FUbY8f_qybT6_FqPAuf89e9AMDXP5wch6jvT4Ev4QRp8HqtTb9t2Y6_KJPYlBw')
# create own Coe Kever in Coe's Kevery
coeKevery.processOne(ims=bytearray(cmsg)) # send copy of cmsg
coeKever = coeKevery.kevers[coepre]
assert coeKever.prefixer.qb64 == coepre
# Val Event 0 Inception Transferable (nxt digest not empty)
valSerder = incept(keys=[valSigners[vesn].verfer.qb64],
nxt=Nexter(keys=[valSigners[vesn+1].verfer.qb64]).qb64,
code=MtrDex.Blake3_256)
assert vsn == int(valSerder.ked["s"], 16) == 0
valpre = valSerder.ked["i"]
assert valpre == '<KEY>'
val_event_digs.append(valSerder.dig)
# create sig counter
counter = Counter(CtrDex.ControllerIdxSigs) # default is count = 1
# sign serialization
siger = valSigners[vesn].sign(valSerder.raw, index=0) # return Siger if index
# create serialized message
vmsg = bytearray(valSerder.raw)
vmsg.extend(counter.qb64b)
vmsg.extend(siger.qb64b)
assert vmsg == bytearray(b'{"v":"KERI10JSON0000e6_","i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY'
b'_qrIZIicQg","s":"0","t":"icp","kt":"1","k":["<KEY>jAiUDdUBP'
b'NPyrSz_ad_Qf9yzhDNZlEKiMc"],"n":"EOWDAJvex5dZzDxeHBANyaIoUG3F4-i'
b'c81G6GwtnC4f4","wt":"0","w":[],"c":[]}-AABAAll_W0_FsjUyJnYokSNPq'
b'q7xdwIBs0ebq2eUez6RKNB-UG_y6fD0e6fb_nANvmNCWjsoFjWv3XP3ApXUabMgyBA')
# create own Val Kever in Val's Kevery
valKevery.processOne(ims=bytearray(vmsg)) # send copy of vmsg
valKever = valKevery.kevers[valpre]
assert valKever.prefixer.qb64 == valpre
# simulate sending of coe's inception message to val
valKevery.process(ims=bytearray(cmsg)) # make copy of msg
assert coepre in valKevery.kevers # creates Kever for coe in val's .kevers
# create receipt of coe's inception
# create seal of val's last est event
seal = SealEvent(i=valpre,
s="{:x}".format(valKever.lastEst.s),
d=valKever.lastEst.d)
coeK = valKevery.kevers[coepre] # lookup coeKever from val's .kevers
# create validator receipt
reserder = chit(pre=coeK.prefixer.qb64,
sn=coeK.sn,
dig=coeK.serder.diger.qb64,
seal=seal)
# sign coe's event not receipt
# look up event to sign from val's kever for coe
coeIcpDig = bytes(valKevery.db.getKeLast(key=snKey(pre=coepre, sn=csn)))
assert coeIcpDig == coeK.serder.diger.qb64b == b'EEnwxEm5Bg5s5aTLsgQCNpubIYzwlvMwZIzdOM0Z3u7o'
coeIcpRaw = bytes(valKevery.db.getEvt(key=dgKey(pre=coepre, dig=coeIcpDig)))
assert coeIcpRaw == (b'{"v":"KERI10JSON0000e6_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYyB-6n4WDi7w",'
b'"s":"0","t":"icp","kt":"1","k":["DSuhyBcPZEZLK-fcw5tzHn2N46wRCG_ZOoeKtWTOunR'
b'A"],"n":"EPYuj8mq_PYYsoBKkzX1kxSPGYBWaIya3slgCOyOtlqU","wt":"0","w":[],"c":['
b']}')
counter = Counter(CtrDex.ControllerIdxSigs)
assert counter.qb64 == '-AAB'
siger = valSigners[vesn].sign(ser=coeIcpRaw, index=0) # return Siger if index
assert siger.qb64 == '<KEY>'
# process own Val receipt in Val's Kevery so have copy in own log
rmsg = bytearray(reserder.raw)
rmsg.extend(counter.qb64b)
rmsg.extend(siger.qb64b)
assert rmsg == bytearray(b'{"v":"KERI10JSON000105_","i":"EH7Oq9oxCgYa-nnNLvwhp9sFZpALILlRYy'
b'B-6n4WDi7w","s":"0","t":"vrc","d":"EEnwxEm5Bg5s5aTLsgQCNpubIYzwl'
b'vMwZIzdOM0Z3u7o","a":{"i":"EpDA1n-WiBA0A8YOqnKrB-wWQYYC49i5zY_qr'
b'IZIicQg","s":"0","d":"EGFSGYH2BjtKwX1osO0ZvLw98nuuo3lMkveRoPIJzu'
b'po"}}-AABAAb6S-RXeAqUKl8UuNwYpiaFARhMj-95elxmr7uNU8m7buVSPVLbTWc'
b'QYfI_04HoP_A_fvlU_b099fiEJyDSA2Cg')
valKevery.processOne(ims=bytearray(rmsg)) # process copy | |
(opt_isDefault <= recurring) {
action(value, opt_isDefault);
if (!value.children) {
return;
}
/** @type {number} */
var i = 0;
var codeSegments = value.children;
for (;i < codeSegments.length;i++) {
this.eachLevel(codeSegments[i], opt_isDefault + 1, recurring, action);
}
}
},
/**
* @param {?} opt_attributes
* @param {Function} action
* @return {undefined}
*/
each : function(opt_attributes, action) {
this.eachLevel(opt_attributes, 0, Number.MAX_VALUE, action);
}
};
$jit.Trans = {
$extend : true,
/**
* @param {?} t
* @return {?}
*/
linear : function(t) {
return t;
}
};
var $cookies = $jit.Trans;
(function() {
/**
* @param {Function} transition
* @param {Text} params
* @return {?}
*/
var makeTrans = function(transition, params) {
params = $.splat(params);
return $.extend(transition, {
/**
* @param {?} pos
* @return {?}
*/
easeIn : function(pos) {
return transition(pos, params);
},
/**
* @param {number} pos
* @return {?}
*/
easeOut : function(pos) {
return 1 - transition(1 - pos, params);
},
/**
* @param {number} pos
* @return {?}
*/
easeInOut : function(pos) {
return pos <= 0.5 ? transition(2 * pos, params) / 2 : (2 - transition(2 * (1 - pos), params)) / 2;
}
});
};
var transitions = {
/**
* @param {?} p
* @param {Array} x
* @return {?}
*/
Pow : function(p, x) {
return Math.pow(p, x[0] || 6);
},
/**
* @param {number} p
* @return {?}
*/
Expo : function(p) {
return Math.pow(2, 8 * (p - 1));
},
/**
* @param {?} p
* @return {?}
*/
Circ : function(p) {
return 1 - Math.sin(Math.acos(p));
},
/**
* @param {number} p
* @return {?}
*/
Sine : function(p) {
return 1 - Math.sin((1 - p) * Math.PI / 2);
},
/**
* @param {?} p
* @param {number} x
* @return {?}
*/
Back : function(p, x) {
x = x[0] || 1.618;
return Math.pow(p, 2) * ((x + 1) * p - x);
},
/**
* @param {number} p
* @return {?}
*/
Bounce : function(p) {
var value;
/** @type {number} */
var a = 0;
/** @type {number} */
var b = 1;
for (;1;a += b, b /= 2) {
if (p >= (7 - 4 * a) / 11) {
/** @type {number} */
value = b * b - Math.pow((11 - 6 * a - 11 * p) / 4, 2);
break;
}
}
return value;
},
/**
* @param {number} p
* @param {Array} x
* @return {?}
*/
Elastic : function(p, x) {
return Math.pow(2, 10 * --p) * Math.cos(20 * p * Math.PI * (x[0] || 1) / 3);
}
};
$.each(transitions, function(value, key) {
$cookies[key] = makeTrans(value);
});
$.each(["Quad", "Cubic", "Quart", "Quint"], function(key, dataAndEvents) {
$cookies[key] = makeTrans(function(pos) {
return Math.pow(pos, [dataAndEvents + 2]);
});
});
})();
var Animation = new Class({
/**
* @param {Object} options
* @return {undefined}
*/
initialize : function(options) {
this.setOptions(options);
},
/**
* @param {Object} options
* @return {?}
*/
setOptions : function(options) {
var opt = {
duration : 2500,
fps : 40,
transition : $cookies.Quart.easeInOut,
/** @type {function (): undefined} */
compute : $.empty,
/** @type {function (): undefined} */
complete : $.empty,
link : "ignore"
};
this.opt = $.merge(opt, options || {});
return this;
},
/**
* @return {undefined}
*/
step : function() {
/** @type {number} */
var time = $.time();
var opt = this.opt;
if (time < this.time + opt.duration) {
var from = opt.transition((time - this.time) / opt.duration);
opt.compute(from);
} else {
this.timer = clearInterval(this.timer);
opt.compute(1);
opt.complete();
}
},
/**
* @return {?}
*/
start : function() {
if (!this.check()) {
return this;
}
/** @type {number} */
this.time = 0;
this.startTimer();
return this;
},
/**
* @return {?}
*/
startTimer : function() {
var self = this;
var fps = this.opt.fps;
if (this.timer) {
return false;
}
/** @type {number} */
this.time = $.time() - this.time;
/** @type {number} */
this.timer = setInterval(function() {
self.step();
}, Math.round(1E3 / fps));
return true;
},
/**
* @return {?}
*/
pause : function() {
this.stopTimer();
return this;
},
/**
* @return {?}
*/
resume : function() {
this.startTimer();
return this;
},
/**
* @return {?}
*/
stopTimer : function() {
if (!this.timer) {
return false;
}
/** @type {number} */
this.time = $.time() - this.time;
this.timer = clearInterval(this.timer);
return true;
},
/**
* @return {?}
*/
check : function() {
if (!this.timer) {
return true;
}
if (this.opt.link == "cancel") {
this.stopTimer();
return true;
}
return false;
}
});
/**
* @return {?}
*/
var Options = function() {
/** @type {Arguments} */
var args = arguments;
/** @type {number} */
var i = 0;
/** @type {number} */
var argLength = args.length;
var methods = {};
for (;i < argLength;i++) {
var attributes = Options[args[i]];
if (attributes.$extend) {
$.extend(methods, attributes);
} else {
methods[args[i]] = attributes;
}
}
return methods;
};
Options.AreaChart = {
$extend : true,
animate : true,
labelOffset : 3,
type : "stacked",
Tips : {
enable : false,
/** @type {function (): undefined} */
onShow : $.empty,
/** @type {function (): undefined} */
onHide : $.empty
},
Events : {
enable : false,
/** @type {function (): undefined} */
onClick : $.empty
},
selectOnHover : true,
showAggregates : true,
showLabels : true,
filterOnClick : false,
restoreOnRightClick : false
};
Options.Margin = {
$extend : false,
top : 0,
left : 0,
right : 0,
bottom : 0
};
Options.Canvas = {
$extend : true,
injectInto : "id",
type : "2D",
width : false,
height : false,
useCanvas : false,
withLabels : true,
background : false,
Scene : {
Lighting : {
enable : false,
ambient : [1, 1, 1],
directional : {
direction : {
x : -100,
y : -100,
z : -100
},
color : [0.5, 0.3, 0.1]
}
}
}
};
Options.Tree = {
$extend : true,
orientation : "left",
subtreeOffset : 8,
siblingOffset : 5,
indent : 10,
multitree : false,
align : "center"
};
Options.Node = {
$extend : false,
overridable : false,
type : "circle",
color : "#ccb",
alpha : 1,
dim : 3,
height : 20,
width : 90,
autoHeight : false,
autoWidth : false,
lineWidth : 1,
transform : true,
align : "center",
angularWidth : 1,
span : 1,
CanvasStyles : {}
};
Options.Edge = {
$extend : false,
overridable : false,
type : "line",
color : "#ccb",
lineWidth : 1,
dim : 15,
alpha : 1,
epsilon : 7,
CanvasStyles : {}
};
Options.Fx = {
$extend : true,
fps : 40,
duration : 2500,
transition : $jit.Trans.Quart.easeInOut,
clearCanvas : true
};
Options.Label = {
$extend : false,
overridable : false,
type : "HTML",
style : " ",
size : 10,
family : "sans-serif",
textAlign : "center",
textBaseline : "alphabetic",
color : "#fff"
};
Options.Tips = {
$extend : false,
enable : false,
type : "auto",
offsetX : 20,
offsetY : 20,
force : false,
/** @type {function (): undefined} */
onShow : $.empty,
/** @type {function (): undefined} */
onHide : $.empty
};
Options.NodeStyles = {
$extend : false,
enable : false,
type : "auto",
stylesHover : false,
stylesClick : false
};
Options.Events = {
$extend : false,
enable : false,
enableForEdges : false,
type : "auto",
/** @type {function (): undefined} */
onClick : $.empty,
/** @type {function (): undefined} */
onRightClick : $.empty,
/** @type {function (): undefined} */
onMouseMove : $.empty,
/** @type {function (): undefined} */
onMouseEnter : $.empty,
/** @type {function (): undefined} */
onMouseLeave : $.empty,
/** @type {function (): undefined} */
onDragStart : $.empty,
/** @type {function (): undefined} */
onDragMove : $.empty,
/** @type {function (): undefined} */
onDragCancel : $.empty,
/** @type {function (): undefined} */
onDragEnd : $.empty,
/** @type {function (): undefined} | |
(2 * np.prod(PQ), 2 * np.prod(PQ)), dtype = tf.complex64)
SG['S11'] = tensor_utils.expand_and_tile_tf(SG_S11, batchSize, pixelsX, pixelsY)
SG_S12 = tf.eye(num_rows = 2 * np.prod(PQ), dtype = tf.complex64)
SG['S12'] = tensor_utils.expand_and_tile_tf(SG_S12, batchSize, pixelsX, pixelsY)
SG_S21 = tf.eye(num_rows = 2 * np.prod(PQ), dtype = tf.complex64)
SG['S21'] = tensor_utils.expand_and_tile_tf(SG_S21, batchSize, pixelsX, pixelsY)
SG_S22 = tf.zeros(shape = (2 * np.prod(PQ), 2 * np.prod(PQ)), dtype = tf.complex64)
SG['S22'] = tensor_utils.expand_and_tile_tf(SG_S22, batchSize, pixelsX, pixelsY)
### Step 7: Calculate eigenmodes ###
# Build the eigenvalue problem.
P_00 = tf.linalg.matmul(KX, tf.linalg.inv(ERC))
P_00 = tf.linalg.matmul(P_00, KY)
P_01 = tf.linalg.matmul(KX, tf.linalg.inv(ERC))
P_01 = tf.linalg.matmul(P_01, KX)
P_01 = URC - P_01
P_10 = tf.linalg.matmul(KY, tf.linalg.inv(ERC))
P_10 = tf.linalg.matmul(P_10, KY) - URC
P_11 = tf.linalg.matmul(-KY, tf.linalg.inv(ERC))
P_11 = tf.linalg.matmul(P_11, KX)
P_row0 = tf.concat([P_00, P_01], axis = 5)
P_row1 = tf.concat([P_10, P_11], axis = 5)
P = tf.concat([P_row0, P_row1], axis = 4)
Q_00 = tf.linalg.matmul(KX, tf.linalg.inv(URC))
Q_00 = tf.linalg.matmul(Q_00, KY)
Q_01 = tf.linalg.matmul(KX, tf.linalg.inv(URC))
Q_01 = tf.linalg.matmul(Q_01, KX)
Q_01 = ERC - Q_01
Q_10 = tf.linalg.matmul(KY, tf.linalg.inv(URC))
Q_10 = tf.linalg.matmul(Q_10, KY) - ERC
Q_11 = tf.linalg.matmul(-KY, tf.linalg.inv(URC))
Q_11 = tf.linalg.matmul(Q_11, KX)
Q_row0 = tf.concat([Q_00, Q_01], axis = 5)
Q_row1 = tf.concat([Q_10, Q_11], axis = 5)
Q = tf.concat([Q_row0, Q_row1], axis = 4)
# Compute eignmodes for the layers in each pixel for the whole batch.
OMEGA_SQ = tf.linalg.matmul(P, Q)
LAM, W = tensor_utils.eig_general(OMEGA_SQ)
LAM = tf.sqrt(LAM)
LAM = tf.linalg.diag(LAM)
V = tf.linalg.matmul(Q, W)
V = tf.linalg.matmul(V, tf.linalg.inv(LAM))
# Scattering matrices for the layers in each pixel for the whole batch.
W_inv = tf.linalg.inv(W)
V_inv = tf.linalg.inv(V)
A = tf.linalg.matmul(W_inv, W0) + tf.linalg.matmul(V_inv, V0)
B = tf.linalg.matmul(W_inv, W0) - tf.linalg.matmul(V_inv, V0)
X = tf.linalg.expm(-LAM * k0 * params['L'])
S = dict({})
A_inv = tf.linalg.inv(A)
S11_left = tf.linalg.matmul(X, B)
S11_left = tf.linalg.matmul(S11_left, A_inv)
S11_left = tf.linalg.matmul(S11_left, X)
S11_left = tf.linalg.matmul(S11_left, B)
S11_left = A - S11_left
S11_left = tf.linalg.inv(S11_left)
S11_right = tf.linalg.matmul(X, B)
S11_right = tf.linalg.matmul(S11_right, A_inv)
S11_right = tf.linalg.matmul(S11_right, X)
S11_right = tf.linalg.matmul(S11_right, A)
S11_right = S11_right - B
S['S11'] = tf.linalg.matmul(S11_left, S11_right)
S12_right = tf.linalg.matmul(B, A_inv)
S12_right = tf.linalg.matmul(S12_right, B)
S12_right = A - S12_right
S12_left = tf.linalg.matmul(S11_left, X)
S['S12'] = tf.linalg.matmul(S12_left, S12_right)
S['S21'] = S['S12']
S['S22'] = S['S11']
# Update the global scattering matrices.
for l in range(Nlay):
S_layer = dict({})
S_layer['S11'] = S['S11'][:, :, :, l, :, :]
S_layer['S12'] = S['S12'][:, :, :, l, :, :]
S_layer['S21'] = S['S21'][:, :, :, l, :, :]
S_layer['S22'] = S['S22'][:, :, :, l, :, :]
SG = rcwa_utils.redheffer_star_product(SG, S_layer)
### Step 8: Reflection side ###
# Eliminate layer dimension for tensors as they are unchanging on this dimension.
KX = KX[:, :, :, 0, :, :]
KY = KY[:, :, :, 0, :, :]
KZref = KZref[:, :, :, 0, :, :]
KZtrn = KZtrn[:, :, :, 0, :, :]
Z = Z[:, :, :, 0, :, :]
I = I[:, :, :, 0, :, :]
W0 = W0[:, :, :, 0, :, :]
V0 = V0[:, :, :, 0, :, :]
Q_ref_00 = tf.linalg.matmul(KX, KY)
Q_ref_01 = params['ur1'] * params['er1'] * I - tf.linalg.matmul(KX, KX)
Q_ref_10 = tf.linalg.matmul(KY, KY) - params['ur1'] * params['er1'] * I
Q_ref_11 = -tf.linalg.matmul(KY, KX)
Q_ref_row0 = tf.concat([Q_ref_00, Q_ref_01], axis = 4)
Q_ref_row1 = tf.concat([Q_ref_10, Q_ref_11], axis = 4)
Q_ref = tf.concat([Q_ref_row0, Q_ref_row1], axis = 3)
W_ref_row0 = tf.concat([I, Z], axis = 4)
W_ref_row1 = tf.concat([Z, I], axis = 4)
W_ref = tf.concat([W_ref_row0, W_ref_row1], axis = 3)
LAM_ref_row0 = tf.concat([-1j * KZref, Z], axis = 4)
LAM_ref_row1 = tf.concat([Z, -1j * KZref], axis = 4)
LAM_ref = tf.concat([LAM_ref_row0, LAM_ref_row1], axis = 3)
V_ref = tf.linalg.matmul(Q_ref, tf.linalg.inv(LAM_ref))
W0_inv = tf.linalg.inv(W0)
V0_inv = tf.linalg.inv(V0)
A_ref = tf.linalg.matmul(W0_inv, W_ref) + tf.linalg.matmul(V0_inv, V_ref)
A_ref_inv = tf.linalg.inv(A_ref)
B_ref = tf.linalg.matmul(W0_inv, W_ref) - tf.linalg.matmul(V0_inv, V_ref)
SR = dict({})
SR['S11'] = tf.linalg.matmul(-A_ref_inv, B_ref)
SR['S12'] = 2 * A_ref_inv
SR_S21 = tf.linalg.matmul(B_ref, A_ref_inv)
SR_S21 = tf.linalg.matmul(SR_S21, B_ref)
SR['S21'] = 0.5 * (A_ref - SR_S21)
SR['S22'] = tf.linalg.matmul(B_ref, A_ref_inv)
### Step 9: Transmission side ###
Q_trn_00 = tf.linalg.matmul(KX, KY)
Q_trn_01 = params['ur2'] * params['er2'] * I - tf.linalg.matmul(KX, KX)
Q_trn_10 = tf.linalg.matmul(KY, KY) - params['ur2'] * params['er2'] * I
Q_trn_11 = -tf.linalg.matmul(KY, KX)
Q_trn_row0 = tf.concat([Q_trn_00, Q_trn_01], axis = 4)
Q_trn_row1 = tf.concat([Q_trn_10, Q_trn_11], axis = 4)
Q_trn = tf.concat([Q_trn_row0, Q_trn_row1], axis = 3)
W_trn_row0 = tf.concat([I, Z], axis = 4)
W_trn_row1 = tf.concat([Z, I], axis = 4)
W_trn = tf.concat([W_trn_row0, W_trn_row1], axis = 3)
LAM_trn_row0 = tf.concat([1j * KZtrn, Z], axis = 4)
LAM_trn_row1 = tf.concat([Z, 1j * KZtrn], axis = 4)
LAM_trn = tf.concat([LAM_trn_row0, LAM_trn_row1], axis = 3)
V_trn = tf.linalg.matmul(Q_trn, tf.linalg.inv(LAM_trn))
W0_inv = tf.linalg.inv(W0)
V0_inv = tf.linalg.inv(V0)
A_trn = tf.linalg.matmul(W0_inv, W_trn) + tf.linalg.matmul(V0_inv, V_trn)
A_trn_inv = tf.linalg.inv(A_trn)
B_trn = tf.linalg.matmul(W0_inv, W_trn) - tf.linalg.matmul(V0_inv, V_trn)
ST = dict({})
ST['S11'] = tf.linalg.matmul(B_trn, A_trn_inv)
ST_S12 = tf.linalg.matmul(B_trn, A_trn_inv)
ST_S12 = tf.linalg.matmul(ST_S12, B_trn)
ST['S12'] = 0.5 * (A_trn - ST_S12)
ST['S21'] = 2 * A_trn_inv
ST['S22'] = tf.linalg.matmul(-A_trn_inv, B_trn)
### Step 10: Compute global scattering matrix ###
SG = rcwa_utils.redheffer_star_product(SR, SG)
SG = rcwa_utils.redheffer_star_product(SG, ST)
### Step 11: Compute source parameters ###
# Compute mode coefficients of the source.
delta = np.zeros((batchSize, pixelsX, pixelsY, np.prod(PQ)))
delta[:, :, :, int(np.prod(PQ) / 2.0)] = 1
# Incident wavevector.
kinc_x0_pol = tf.math.real(kinc_x0[:, :, :, 0, 0])
kinc_y0_pol = tf.math.real(kinc_y0[:, :, :, 0, 0])
kinc_z0_pol = tf.math.real(kinc_z0[:, :, :, 0])
kinc_pol = tf.concat([kinc_x0_pol, kinc_y0_pol, kinc_z0_pol], axis = 3)
# Calculate TE and TM polarization unit vectors.
firstPol = True
for pol in range(batchSize):
if (kinc_pol[pol, 0, 0, 0] == 0.0 and kinc_pol[pol, 0, 0, 1] == 0.0):
ate_pol = np.zeros((1, pixelsX, pixelsY, 3))
ate_pol[:, :, :, 1] = 1
ate_pol = tf.convert_to_tensor(ate_pol, dtype = tf.float32)
else:
# Calculation of `ate` for oblique incidence.
n_hat = np.zeros((1, pixelsX, pixelsY, 3))
n_hat[:, :, :, 0] = 1
n_hat = tf.convert_to_tensor(n_hat, dtype = tf.float32)
kinc_pol_iter = kinc_pol[pol, :, :, :]
kinc_pol_iter = kinc_pol_iter[tf.newaxis, :, :, :]
ate_cross = tf.linalg.cross(n_hat, kinc_pol_iter)
ate_pol = ate_cross / tf.norm(ate_cross, axis = 3, keepdims = True)
if firstPol:
ate = ate_pol
firstPol = False
else:
ate = tf.concat([ate, ate_pol], axis = 0)
atm_cross = tf.linalg.cross(kinc_pol, ate)
atm = atm_cross / tf.norm(atm_cross, axis = 3, keepdims = True)
ate = tf.cast(ate, dtype = tf.complex64)
atm = tf.cast(atm, dtype = tf.complex64)
# Decompose the TE and TM polarization into x and y components.
EP = params['pte'] * ate + params['ptm'] * atm
EP_x = EP[:, :, :, 0]
EP_x = EP_x[:, :, :, tf.newaxis]
EP_y = EP[:, :, :, 1]
EP_y = EP_y[:, :, :, tf.newaxis]
esrc_x = EP_x * delta
esrc_y = EP_y * delta
esrc = tf.concat([esrc_x, esrc_y], axis = 3)
esrc = esrc[:, :, :, :, tf.newaxis]
W_ref_inv = tf.linalg.inv(W_ref)
### Step 12: Compute reflected and transmitted fields ###
csrc = tf.linalg.matmul(W_ref_inv, esrc)
# Compute tranmission and reflection mode coefficients.
cref = tf.linalg.matmul(SG['S11'], csrc)
ctrn = tf.linalg.matmul(SG['S21'], csrc)
eref = tf.linalg.matmul(W_ref, cref)
etrn = tf.linalg.matmul(W_trn, ctrn)
rx = eref[:, :, :, 0 : np.prod(PQ), :]
ry = eref[:, :, :, np.prod(PQ) : 2 * np.prod(PQ), :]
tx = etrn[:, :, :, 0 : np.prod(PQ), :]
ty = etrn[:, :, :, np.prod(PQ) : 2 * np.prod(PQ), :]
# Compute longitudinal components.
KZref_inv = tf.linalg.inv(KZref)
KZtrn_inv = tf.linalg.inv(KZtrn)
rz = tf.linalg.matmul(KX, rx) + tf.linalg.matmul(KY, ry)
rz = tf.linalg.matmul(-KZref_inv, rz)
tz = tf.linalg.matmul(KX, tx) + tf.linalg.matmul(KY, ty)
tz = tf.linalg.matmul(-KZtrn_inv, tz)
### Step 13: Compute diffraction efficiences ###
rx2 = tf.math.real(rx) ** 2 + tf.math.imag(rx) ** 2
ry2 = tf.math.real(ry) ** 2 + tf.math.imag(ry) ** 2
rz2 = tf.math.real(rz) ** 2 + tf.math.imag(rz) ** 2
R2 = rx2 + ry2 + rz2
R = tf.math.real(-KZref / params['ur1']) / tf.math.real(kinc_z0 / params['ur1'])
R = tf.linalg.matmul(R, R2)
R = tf.reshape(R, shape = (batchSize, pixelsX, pixelsY, PQ[0], PQ[1]))
REF = tf.math.reduce_sum(R, axis = [3, 4])
tx2 = tf.math.real(tx) ** 2 + tf.math.imag(tx) ** 2
ty2 = tf.math.real(ty) ** | |
import numbers
from collections.abc import Iterable
from copy import deepcopy
import numpy as np
from qutip.qobjevo import QobjEvo
from qutip.qip.operations import expand_operator
from qutip.qobj import Qobj
from qutip.operators import sigmaz, destroy, num
from qutip.qip.pulse import Pulse
__all__ = ["Noise", "DecoherenceNoise", "RelaxationNoise",
"ControlAmpNoise", "RandomNoise", "process_noise"]
def process_noise(pulses, noise_list, dims, t1=None, t2=None,
device_noise=False):
"""
Apply noise to the input list of pulses. It does not modify the input
pulse, but return a new one containing the noise.
Parameters
----------
pulses: list of :class:`.Pulse`
The input pulses, on which the noise object will be applied.
noise_list: list of :class:`.Noise`
A list of noise objects.
dims: int or list
Dimension of the system.
If int, we assume it is the number of qubits in the system.
If list, it is the dimension of the component systems.
t1: list or float, optional
Characterize the decoherence of amplitude damping for
each qubit. A list of size `N` or a float for all qubits.
t2: list of float, optional
Characterize the decoherence of dephasing for
each qubit. A list of size `N` or a float for all qubits.
device_noise: bool
If pulse independent noise such as relaxation are included.
Default is False.
Returns
-------
noisy_pulses: list of :class:`qutip.qip.Pulse`
The noisy pulses, including the system noise.
"""
noisy_pulses = deepcopy(pulses)
systematic_noise = Pulse(None, None, label="systematic_noise")
if (t1 is not None) or (t2 is not None):
noise_list.append(RelaxationNoise(t1, t2))
for noise in noise_list:
if isinstance(noise, (DecoherenceNoise, RelaxationNoise)) \
and not device_noise:
pass
else:
noisy_pulses, systematic_noise = noise._apply_noise(
dims=dims, pulses=noisy_pulses,
systematic_noise=systematic_noise)
if device_noise:
return noisy_pulses + [systematic_noise]
else:
return noisy_pulses
class Noise(object):
"""
The base class representing noise in a processor.
The noise object can be added to :class:`.Processor` and
contributes to evolution.
"""
def __init__(self):
pass
def get_noisy_dynamics(self, dims, pulses, systematic_noise):
"""
Return a pulses list added with noise and
the pulse independent noise in a dummy Pulse object.
Parameters
----------
dims: list, optional
The dimension of the components system, the default value is
[2,2...,2] for qubits system.
pulses: list of :class:`.Pulse`
The input pulses, on which the noise object is to be applied.
systematic_noise: :class:`.Pulse`
The dummy pulse with no ideal control element.
Returns
-------
noisy_pulses: list of :class:`.Pulse`
Noisy pulses.
systematic_noise: :class:`.Pulse`
The dummy pulse representing pulse independent noise.
"""
raise NotImplementedError(
"Subclass error needs a method"
"`get_noisy_dynamics` to process the noise.")
def _apply_noise(self, pulses=None, systematic_noise=None, dims=None):
"""
For backward compatibility, in case the method has no return value
or only return the pulse.
"""
result = self.get_noisy_dynamics(
pulses=pulses, systematic_noise=systematic_noise, dims=dims)
if result is None: # in-place change
pass
elif isinstance(result, tuple) and len(result) == 2:
pulses, systematic_noise = result
# only pulse
elif isinstance(result, list) and len(result) == len(pulses):
pulses = result
else:
raise TypeError(
"Returned value of get_noisy_dynamics not understood.")
return pulses, systematic_noise
class DecoherenceNoise(Noise):
"""
The decoherence noise in a processor. It generates lindblad noise
according to the given collapse operator `c_ops`.
Parameters
----------
c_ops: :class:`qutip.Qobj` or list
The Hamiltonian representing the dynamics of the noise.
targets: int or list, optional
The indices of qubits that are acted on. Default is on all
qubits
coeff: list, optional
A list of the coefficients for the control Hamiltonians.
tlist: array_like, optional
A NumPy array specifies the time of each coefficient.
all_qubits: bool, optional
If `c_ops` contains only single qubits collapse operator,
``all_qubits=True`` will allow it to be applied to all qubits.
Attributes
----------
c_ops: :class:`qutip.Qobj` or list
The Hamiltonian representing the dynamics of the noise.
targets: int or list
The indices of qubits that are acted on.
coeff: list
A list of the coefficients for the control Hamiltonians.
tlist: array_like
A NumPy array specifies the time of each coefficient.
all_qubits: bool
If `c_ops` contains only single qubits collapse operator,
``all_qubits=True`` will allow it to be applied to all qubits.
"""
def __init__(self, c_ops, targets=None, coeff=None, tlist=None,
all_qubits=False):
if isinstance(c_ops, Qobj):
self.c_ops = [c_ops]
else:
self.c_ops = c_ops
self.coeff = coeff
self.tlist = tlist
self.targets = targets
if all_qubits:
if not all([c_op.dims == [[2], [2]] for c_op in self.c_ops]):
raise ValueError(
"The operator is not a single qubit operator, "
"thus cannot be applied to all qubits")
self.all_qubits = all_qubits
def get_noisy_dynamics(
self, dims=None, pulses=None, systematic_noise=None):
if systematic_noise is None:
systematic_noise = Pulse(None, None, label="system")
N = len(dims)
# time-independent
if (self.coeff is None) and (self.tlist is None):
self.coeff = True
for c_op in self.c_ops:
if self.all_qubits:
for targets in range(N):
systematic_noise.add_lindblad_noise(
c_op, targets, self.tlist, self.coeff)
else:
systematic_noise.add_lindblad_noise(
c_op, self.targets, self.tlist, self.coeff)
return pulses, systematic_noise
class RelaxationNoise(Noise):
"""
The decoherence on each qubit characterized by two time scales t1 and t2.
Parameters
----------
t1: float or list, optional
Characterize the decoherence of amplitude damping for
each qubit.
t2: float or list, optional
Characterize the decoherence of dephasing for
each qubit.
targets: int or list, optional
The indices of qubits that are acted on. Default is on all
qubits
Attributes
----------
t1: float or list
Characterize the decoherence of amplitude damping for
each qubit.
t2: float or list
Characterize the decoherence of dephasing for
each qubit.
targets: int or list
The indices of qubits that are acted on.
"""
def __init__(self, t1=None, t2=None, targets=None):
self.t1 = t1
self.t2 = t2
self.targets = targets
def _T_to_list(self, T, N):
"""
Check if the relaxation time is valid
Parameters
----------
T: list of float
The relaxation time
N: int
The number of component systems.
Returns
-------
T: list
The relaxation time in Python list form
"""
if (isinstance(T, numbers.Real) and T > 0) or T is None:
return [T] * N
elif isinstance(T, Iterable) and len(T) == N:
return T
else:
raise ValueError(
"Invalid relaxation time T={},"
"either the length is not equal to the number of qubits, "
"or T is not a positive number.".format(T))
def get_noisy_dynamics(
self, dims=None, pulses=None, systematic_noise=None):
if systematic_noise is None:
systematic_noise = Pulse(None, None, label="system")
N = len(dims)
self.t1 = self._T_to_list(self.t1, N)
self.t2 = self._T_to_list(self.t2, N)
if len(self.t1) != N or len(self.t2) != N:
raise ValueError(
"Length of t1 or t2 does not match N, "
"len(t1)={}, len(t2)={}".format(
len(self.t1), len(self.t2)))
if self.targets is None:
targets = range(N)
else:
targets = self.targets
for qu_ind in targets:
t1 = self.t1[qu_ind]
t2 = self.t2[qu_ind]
if t1 is not None:
op = 1/np.sqrt(t1) * destroy(dims[qu_ind])
systematic_noise.add_lindblad_noise(op, qu_ind, coeff=True)
if t2 is not None:
# Keep the total dephasing ~ exp(-t/t2)
if t1 is not None:
if 2*t1 < t2:
raise ValueError(
"t1={}, t2={} does not fulfill "
"2*t1>t2".format(t1, t2))
T2_eff = 1./(1./t2-1./2./t1)
else:
T2_eff = t2
op = 1/np.sqrt(2*T2_eff) * 2 * num(dims[qu_ind])
systematic_noise.add_lindblad_noise(op, qu_ind, coeff=True)
return pulses, systematic_noise
class ControlAmpNoise(Noise):
"""
The noise in the amplitude of the control pulse.
Parameters
----------
coeff: list
A list of the coefficients for the control Hamiltonians.
For available choices, see :class:`qutip.QobjEvo`.
tlist: array_like, optional
A NumPy array specifies the time of each coefficient.
indices: list of int, optional
The indices of target pulse in the list of pulses.
Attributes
----------
coeff: list
A list of the coefficients for the control Hamiltonians.
For available choices, see :class:`qutip.QobjEvo`.
tlist: array_like
A NumPy array specifies the time of each coefficient.
indices: list of int
The indices of target pulse in the list of pulses.
"""
def __init__(self, coeff, tlist=None, indices=None):
self.coeff = coeff
self.tlist = tlist
self.indices = indices
def get_noisy_dynamics(
self, dims=None, pulses=None, systematic_noise=None):
if pulses is None:
pulses = []
if self.indices is None:
indices = range(len(pulses))
else:
indices = self.indices
for i in indices:
pulse = pulses[i]
if isinstance(self.coeff, (int, float)):
coeff = pulse.coeff * self.coeff
else:
coeff = self.coeff
if self.tlist is None:
tlist = pulse.tlist
else:
tlist = self.tlist
pulses[i].add_coherent_noise(
pulse.qobj, pulse.targets, tlist, coeff)
return pulses, systematic_noise
class RandomNoise(ControlAmpNoise):
"""
Random noise in the amplitude of the control pulse. The arguments for
the random generator need to be given as key word arguments.
Parameters
----------
dt: float, optional
The time interval between two random amplitude. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.